Example usage for java.util LinkedList size

List of usage examples for java.util LinkedList size

Introduction

In this page you can find the example usage for java.util LinkedList size.

Prototype

int size

To view the source code for java.util LinkedList size.

Click Source Link

Usage

From source file:ddf.catalog.transformer.input.pdf.GeoPdfParser.java

/**
 * Generates a WKT compliant String from a PDF Document if it contains GeoPDF information.
 * Currently, only WGS84 Projections are supported (GEOGRAPHIC GeoPDF ProjectionType).
 *
 * @param pdfDocument - The PDF document
 * @return the WKT String/*  w  ww . j  a  va  2s  .c  o m*/
 * @throws IOException
 */
public String getWktFromPDF(PDDocument pdfDocument) throws IOException {
    ToDoubleVisitor toDoubleVisitor = new ToDoubleVisitor();
    LinkedList<String> polygons = new LinkedList<>();

    for (PDPage pdPage : pdfDocument.getPages()) {
        COSDictionary cosObject = pdPage.getCOSObject();

        COSBase lgiDictObject = cosObject.getObjectFromPath(LGIDICT);

        // Handle Multiple Map Frames
        if (lgiDictObject instanceof COSArray) {
            for (int i = 0; i < ((COSArray) lgiDictObject).size(); i++) {
                COSDictionary lgidict = (COSDictionary) cosObject.getObjectFromPath(LGIDICT + "/[" + i + "]");

                COSDictionary projectionArray = (COSDictionary) lgidict.getDictionaryObject(PROJECTION);
                if (projectionArray != null) {
                    String projectionType = ((COSString) projectionArray.getItem(PROJECTION_TYPE)).getString();
                    if (GEOGRAPHIC.equals(projectionType)) {
                        COSArray neatlineArray = (COSArray) cosObject
                                .getObjectFromPath(LGIDICT + "/[" + i + "]/" + NEATLINE);
                        String wktString = getWktFromNeatLine(lgidict, neatlineArray, toDoubleVisitor);
                        polygons.add(wktString);
                    } else {
                        LOGGER.debug("Unsupported projection type {}.  Map Frame will be skipped.",
                                projectionType);
                    }
                } else {
                    LOGGER.debug("No projection array found on the map frame.  Map Frame will be skipped.");
                }
            }
            // Handle One Map Frame
        } else if (lgiDictObject instanceof COSDictionary) {
            COSDictionary lgidict = (COSDictionary) lgiDictObject;
            COSDictionary projectionArray = (COSDictionary) lgidict.getDictionaryObject(PROJECTION);
            if (projectionArray != null) {
                String projectionType = ((COSString) projectionArray.getItem(PROJECTION_TYPE)).getString();
                if (GEOGRAPHIC.equals(projectionType)) {
                    COSArray neatlineArray = (COSArray) cosObject.getObjectFromPath(LGIDICT + "/" + NEATLINE);
                    if (neatlineArray == null) {
                        neatlineArray = generateNeatLineFromPDFDimensions(pdPage);

                    }
                    polygons.add(getWktFromNeatLine(lgidict, neatlineArray, toDoubleVisitor));

                } else {
                    LOGGER.debug("Unsupported projection type {}.  Map Frame will be skipped.", projectionType);
                }
            } else {
                LOGGER.debug("No projection array found on the map frame.  Map Frame will be skipped.");
            }
        }
    }

    if (polygons.size() == 0) {
        LOGGER.debug(
                "No GeoPDF information found on PDF during transformation.  Metacard location will not be set.");
        return null;
    }

    if (polygons.size() == 1) {
        return POLYGON + polygons.get(0) + "))";
    } else {
        return polygons.stream().map(polygon -> "((" + polygon + "))")
                .collect(Collectors.joining(",", MULTIPOLYGON, ")"));
    }
}

From source file:com.github.vanroy.springdata.jest.JestElasticsearchTemplateTests.java

@Test
public void shouldReturnObjectsForGivenIdsUsingMultiGetWithFields() {
    // given/*from   w w w .  j  av a2 s  .  com*/
    List<IndexQuery> indexQueries;
    // first document
    String documentId = randomNumeric(5);
    SampleEntity sampleEntity1 = SampleEntity.builder().id(documentId).message("some message").type("type1")
            .version(System.currentTimeMillis()).build();

    // second document
    String documentId2 = randomNumeric(5);
    SampleEntity sampleEntity2 = SampleEntity.builder().id(documentId2).message("some message").type("type2")
            .version(System.currentTimeMillis()).build();

    indexQueries = getIndexQueries(Arrays.asList(sampleEntity1, sampleEntity2));

    elasticsearchTemplate.bulkIndex(indexQueries);
    elasticsearchTemplate.refresh(SampleEntity.class);

    // when
    SearchQuery query = new NativeSearchQueryBuilder().withIds(Arrays.asList(documentId, documentId2))
            .withFields("message", "type").build();
    LinkedList<SampleEntity> sampleEntities = elasticsearchTemplate.multiGet(query, SampleEntity.class,
            new JestMultiGetResultMapper() {
                @Override
                public <T> LinkedList<T> mapResults(MultiDocumentResult responses, Class<T> clazz) {
                    LinkedList<T> list = new LinkedList<>();
                    for (MultiDocumentResult.MultiDocumentResultItem response : responses.getItems()) {
                        SampleEntity entity = new SampleEntity();
                        entity.setId(response.getId());

                        //TODO: Need to map Fields
                        //               entity.setMessage((String) response.getField("message").getValue());
                        //               entity.setType((String) response.getField("type").getValue());
                        list.add((T) entity);
                    }
                    return list;
                }
            });
    // then
    assertThat(sampleEntities.size(), is(equalTo(2)));
}

From source file:com.rackspacecloud.client.cloudfiles.FilesClient.java

/**
 * /*from ww  w  . j a  va  2  s.com*/
 * 
 * @param limit
 *             
 * @param marker
 *             
 * 
 * @return 0 
 * @throws IOException
 *              IO
 * @throws HttpException
 *              Http
 * @throws FilesExcepiton
 *              
 * @throws FilesAuthorizationException
 *              
 */
public List<FilesContainer> listContainers(int limit, String marker)
        throws IOException, HttpException, FilesException {
    if (!this.isLoggedin()) {
        throw new FilesAuthorizationException("You must be logged in", null, null);
    }
    HttpGet method = null;
    try {
        LinkedList<NameValuePair> parameters = new LinkedList<NameValuePair>();

        if (limit > 0) {
            parameters.add(new BasicNameValuePair("limit", String.valueOf(limit)));
        }
        if (marker != null) {
            parameters.add(new BasicNameValuePair("marker", marker));
        }

        String uri = parameters.size() > 0 ? makeURI(storageURL, parameters) : storageURL;
        method = new HttpGet(uri);
        method.getParams().setIntParameter("http.socket.timeout", connectionTimeOut);
        method.setHeader(FilesConstants.X_AUTH_TOKEN, authToken);
        FilesResponse response = new FilesResponse(client.execute(method));

        if (response.getStatusCode() == HttpStatus.SC_UNAUTHORIZED) {
            method.abort();
            if (login()) {
                method = new HttpGet(uri);
                method.getParams().setIntParameter("http.socket.timeout", connectionTimeOut);
                method.setHeader(FilesConstants.X_AUTH_TOKEN, authToken);
                response = new FilesResponse(client.execute(method));
            } else {
                throw new FilesAuthorizationException("Re-login failed", response.getResponseHeaders(),
                        response.getStatusLine());
            }
        }

        if (response.getStatusCode() == HttpStatus.SC_OK) {
            // logger.warn(method.getResponseCharSet());
            StrTokenizer tokenize = new StrTokenizer(response.getResponseBodyAsString());
            tokenize.setDelimiterString("\n");
            String[] containers = tokenize.getTokenArray();
            ArrayList<FilesContainer> containerList = new ArrayList<FilesContainer>();
            for (String container : containers) {
                containerList.add(new FilesContainer(container, this));
            }
            return containerList;
        } else if (response.getStatusCode() == HttpStatus.SC_NO_CONTENT) {
            return new ArrayList<FilesContainer>();
        } else if (response.getStatusCode() == HttpStatus.SC_NOT_FOUND) {
            throw new FilesNotFoundException("Account was not found", response.getResponseHeaders(),
                    response.getStatusLine());
        } else {
            throw new FilesException("Unexpected resposne from server", response.getResponseHeaders(),
                    response.getStatusLine());
        }
    } catch (Exception ex) {
        ex.printStackTrace();
        throw new FilesException("Unexpected error, probably parsing Server XML", ex);
    } finally {
        if (method != null)
            method.abort();
    }
}

From source file:jp.zippyzip.impl.GeneratorServiceImpl.java

public void preZips() {

    Date timestamp = getLzhDao().getZipInfo().getTimestamp();
    LinkedList<Pref> prefs = getPrefs();
    LinkedList<City> cities = getCities();

    try {/* w ww .  j a  va 2  s .co m*/

        for (Pref pref : prefs) {

            TreeMap<String, TreeSet<String>> zips = new TreeMap<String, TreeSet<String>>();

            for (City city : cities) {

                if (!city.getCode().startsWith(pref.getCode())) {
                    continue;
                }

                ParentChild data = getParentChildDao().get(city.getCode());

                if (data != null) {

                    for (String json : data.getChildren()) {

                        String zip = new JSONObject(json).optString("code", "");

                        if (!zips.containsKey(zip)) {
                            zips.put(zip, new TreeSet<String>());
                        }

                        zips.get(zip).add(city.getCode());
                    }
                }

                data = getParentChildDao().get(city.getCode() + "c");

                if (data != null) {

                    for (String json : data.getChildren()) {

                        String zip = new JSONObject(json).optString("code", "");

                        if (!zips.containsKey(zip)) {
                            zips.put(zip, new TreeSet<String>());
                        }

                        zips.get(zip).add(city.getCode() + "c");
                    }
                }
            }

            StringBuilder rec = new StringBuilder("[");
            LinkedList<String> list = new LinkedList<String>();

            for (String zip : zips.keySet()) {

                for (String key : zips.get(zip)) {

                    rec.append(new JSONStringer().object().key("zip").value(zip).key("key").value(key)
                            .endObject().toString());

                    if (rec.length() > 400) {
                        rec.append("]");
                        list.add(rec.toString());
                        rec = new StringBuilder("[");
                    } else {
                        rec.append(",");
                    }
                }
            }

            if (rec.length() > 1) {
                rec.append("]");
                list.add(rec.toString());
            }

            getParentChildDao()
                    .store(new ParentChild("pre" + pref.getCode(), timestamp, new LinkedList<String>(), list));
            log.info(pref.getCode() + ":" + list.size());
        }

    } catch (JSONException e) {
        log.log(Level.WARNING, "", e);
    }

    return;
}

From source file:com.glaf.core.config.Configuration.java

private Resource loadResource(Properties properties, Resource wrapper, boolean quiet) {
    String name = UNKNOWN_RESOURCE;
    try {/*from  w w  w  . j  a v  a2 s.c  o  m*/
        Object resource = wrapper.getResource();
        name = wrapper.getName();

        DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance();
        // ignore all comments inside the xml file
        docBuilderFactory.setIgnoringComments(true);

        // allow includes in the xml file
        docBuilderFactory.setNamespaceAware(true);
        try {
            docBuilderFactory.setXIncludeAware(true);
        } catch (UnsupportedOperationException e) {
            LOG.error("Failed to set setXIncludeAware(true) for parser " + docBuilderFactory + ":" + e, e);
        }
        DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
        Document doc = null;
        Element root = null;
        boolean returnCachedProperties = false;

        if (resource instanceof URL) { // an URL resource
            doc = parse(builder, (URL) resource);
        } else if (resource instanceof String) { // a CLASSPATH resource
            URL url = getResource((String) resource);
            doc = parse(builder, url);
        } else if (resource instanceof InputStream) {
            doc = parse(builder, (InputStream) resource, null);
            returnCachedProperties = true;
        } else if (resource instanceof Properties) {
            overlay(properties, (Properties) resource);
        } else if (resource instanceof Element) {
            root = (Element) resource;
        }

        if (root == null) {
            if (doc == null) {
                if (quiet) {
                    return null;
                }
                throw new RuntimeException(resource + " not found");
            }
            root = doc.getDocumentElement();
        }
        Properties toAddTo = properties;
        if (returnCachedProperties) {
            toAddTo = new Properties();
        }
        if (!"configuration".equals(root.getTagName()))
            LOG.fatal("bad conf file: top-level element not <configuration>");
        NodeList props = root.getChildNodes();

        for (int i = 0; i < props.getLength(); i++) {
            Node propNode = props.item(i);
            if (!(propNode instanceof Element))
                continue;
            Element prop = (Element) propNode;
            if ("configuration".equals(prop.getTagName())) {
                loadResource(toAddTo, new Resource(prop, name), quiet);
                continue;
            }
            if (!"property".equals(prop.getTagName()))
                LOG.warn("bad conf file: element not <property>");
            NodeList fields = prop.getChildNodes();
            String attr = null;
            String value = null;
            boolean finalParameter = false;
            LinkedList<String> source = new LinkedList<String>();
            for (int j = 0; j < fields.getLength(); j++) {
                Node fieldNode = fields.item(j);
                if (!(fieldNode instanceof Element))
                    continue;
                Element field = (Element) fieldNode;
                if ("name".equals(field.getTagName()) && field.hasChildNodes())
                    attr = StringInterner.weakIntern(((Text) field.getFirstChild()).getData().trim());
                if ("value".equals(field.getTagName()) && field.hasChildNodes())
                    value = StringInterner.weakIntern(((Text) field.getFirstChild()).getData());
                if ("final".equals(field.getTagName()) && field.hasChildNodes())
                    finalParameter = "true".equals(((Text) field.getFirstChild()).getData());
                if ("source".equals(field.getTagName()) && field.hasChildNodes())
                    source.add(StringInterner.weakIntern(((Text) field.getFirstChild()).getData()));
            }
            source.add(name);

            // Ignore this parameter if it has already been marked as
            // 'final'
            if (attr != null) {
                loadProperty(toAddTo, name, attr, value, finalParameter,
                        source.toArray(new String[source.size()]));
            }
        }

        if (returnCachedProperties) {
            overlay(properties, toAddTo);
            return new Resource(toAddTo, name);
        }
        return null;
    } catch (IOException e) {
        LOG.fatal("error parsing conf " + name, e);
        throw new RuntimeException(e);
    } catch (DOMException e) {
        LOG.fatal("error parsing conf " + name, e);
        throw new RuntimeException(e);
    } catch (SAXException e) {
        LOG.fatal("error parsing conf " + name, e);
        throw new RuntimeException(e);
    } catch (ParserConfigurationException e) {
        LOG.fatal("error parsing conf " + name, e);
        throw new RuntimeException(e);
    }
}

From source file:org.epics.archiverappliance.retrieval.DataRetrievalServlet.java

private void doGetMultiPV(HttpServletRequest req, HttpServletResponse resp)
        throws ServletException, IOException {

    PoorMansProfiler pmansProfiler = new PoorMansProfiler();

    // Gets the list of PVs specified by the `pv` parameter
    // String arrays might be inefficient for retrieval. In any case, they are sorted, which is essential later on.
    List<String> pvNames = Arrays.asList(req.getParameterValues("pv"));

    // Ensuring that the AA has finished starting up before requests are accepted.
    if (configService.getStartupState() != STARTUP_SEQUENCE.STARTUP_COMPLETE) {
        String msg = "Cannot process data retrieval requests for specified PVs ("
                + StringUtils.join(pvNames, ", ") + ") until the appliance has completely started up.";
        logger.error(msg);/*from  w w  w  . jav  a2s.c om*/
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, msg);
        return;
    }

    // Getting various fields from arguments
    String startTimeStr = req.getParameter("from");
    String endTimeStr = req.getParameter("to");
    boolean useReduced = false;
    String useReducedStr = req.getParameter("usereduced");
    if (useReducedStr != null && !useReducedStr.equals("")) {
        try {
            useReduced = Boolean.parseBoolean(useReducedStr);
        } catch (Exception ex) {
            logger.error("Exception parsing usereduced", ex);
            useReduced = false;
        }
    }

    // Getting MIME type
    String extension = req.getPathInfo().split("\\.")[1];
    logger.info("Mime is " + extension);

    if (!extension.equals("json") && !extension.equals("raw") && !extension.equals("jplot")
            && !extension.equals("qw")) {
        String msg = "Mime type " + extension + " is not supported. Please use \"json\", \"jplot\" or \"raw\".";
        resp.setHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
        return;
    }

    boolean useChunkedEncoding = true;
    String doNotChunkStr = req.getParameter("donotchunk");
    if (doNotChunkStr != null && !doNotChunkStr.equals("false")) {
        logger.info("Turning off HTTP chunked encoding");
        useChunkedEncoding = false;
    }

    boolean fetchLatestMetadata = false;
    String fetchLatestMetadataStr = req.getParameter("fetchLatestMetadata");
    if (fetchLatestMetadataStr != null && fetchLatestMetadataStr.equals("true")) {
        logger.info("Adding a call to the engine to fetch the latest metadata");
        fetchLatestMetadata = true;
    }

    // For data retrieval we need a PV info. However, in case of PV's that have long since retired, we may not want to have PVTypeInfo's in the system.
    // So, we support a template PV that lays out the data sources.
    // During retrieval, you can pass in the PV as a template and we'll clone this and make a temporary copy.
    String retiredPVTemplate = req.getParameter("retiredPVTemplate");

    // Goes through given PVs and returns bad request error.
    int nullPVs = 0;
    for (String pvName : pvNames) {
        if (pvName == null) {
            nullPVs++;
        }
        if (nullPVs > 0) {
            logger.warn("Some PVs are null in the request.");
            resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST);
            return;
        }
    }

    if (pvNames.toString().matches("^.*" + ARCH_APPL_PING_PV + ".*$")) {
        logger.debug("Processing ping PV - this is used to validate the connection with the client.");
        processPingPV(req, resp);
        return;
    }

    for (String pvName : pvNames)
        if (pvName.endsWith(".VAL")) {
            int len = pvName.length();
            pvName = pvName.substring(0, len - 4);
            logger.info("Removing .VAL from pvName for request giving " + pvName);
        }

    // ISO datetimes are of the form "2011-02-02T08:00:00.000Z"
    Timestamp end = TimeUtils.plusHours(TimeUtils.now(), 1);
    if (endTimeStr != null) {
        try {
            end = TimeUtils.convertFromISO8601String(endTimeStr);
        } catch (IllegalArgumentException ex) {
            try {
                end = TimeUtils.convertFromDateTimeStringWithOffset(endTimeStr);
            } catch (IllegalArgumentException ex2) {
                String msg = "Cannot parse time " + endTimeStr;
                logger.warn(msg, ex2);
                resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
                resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
                return;
            }
        }
    }

    // We get one day by default
    Timestamp start = TimeUtils.minusDays(end, 1);
    if (startTimeStr != null) {
        try {
            start = TimeUtils.convertFromISO8601String(startTimeStr);
        } catch (IllegalArgumentException ex) {
            try {
                start = TimeUtils.convertFromDateTimeStringWithOffset(startTimeStr);
            } catch (IllegalArgumentException ex2) {
                String msg = "Cannot parse time " + startTimeStr;
                logger.warn(msg, ex2);
                resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
                resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
                return;
            }
        }
    }

    if (end.before(start)) {
        String msg = "For request, end " + end.toString() + " is before start " + start.toString() + " for pvs "
                + StringUtils.join(pvNames, ", ");
        logger.error(msg);
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
        return;
    }

    LinkedList<TimeSpan> requestTimes = new LinkedList<TimeSpan>();

    // We can specify a list of time stamp pairs using the optional timeranges parameter
    String timeRangesStr = req.getParameter("timeranges");
    if (timeRangesStr != null) {
        boolean continueWithRequest = parseTimeRanges(resp, "[" + StringUtils.join(pvNames, ", ") + "]",
                requestTimes, timeRangesStr);
        if (!continueWithRequest) {
            // Cannot parse the time ranges properly; we so abort the request.
            String msg = "The specified time ranges could not be processed appropriately. Aborting.";
            logger.info(msg);
            resp.setHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
            return;
        }

        // Override the start and the end so that the mergededup consumer works correctly.
        start = requestTimes.getFirst().getStartTime();
        end = requestTimes.getLast().getEndTime();

    } else {
        requestTimes.add(new TimeSpan(start, end));
    }

    assert (requestTimes.size() > 0);

    // Get a post processor for each PV specified in pvNames
    // If PV in the form <pp>(<pv>), process it
    String postProcessorUserArg = req.getParameter("pp");
    List<String> postProcessorUserArgs = new ArrayList<>(pvNames.size());
    List<PostProcessor> postProcessors = new ArrayList<>(pvNames.size());
    for (int i = 0; i < pvNames.size(); i++) {
        postProcessorUserArgs.add(postProcessorUserArg);

        if (pvNames.get(i).contains("(")) {
            if (!pvNames.get(i).contains(")")) {
                String msg = "Unbalanced paren " + pvNames.get(i);
                logger.error(msg);
                resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
                resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
                return;
            }
            String[] components = pvNames.get(i).split("[(,)]");
            postProcessorUserArg = components[0];
            postProcessorUserArgs.set(i, postProcessorUserArg);
            pvNames.set(i, components[1]);
            if (components.length > 2) {
                for (int j = 2; j < components.length; j++) {
                    postProcessorUserArgs.set(i, postProcessorUserArgs.get(i) + "_" + components[j]);
                }
            }
            logger.info("After parsing the function call syntax pvName is " + pvNames.get(i)
                    + " and postProcessorUserArg is " + postProcessorUserArg);
        }
        postProcessors.add(PostProcessors.findPostProcessor(postProcessorUserArg));
    }

    List<PVTypeInfo> typeInfos = new ArrayList<PVTypeInfo>(pvNames.size());
    for (int i = 0; i < pvNames.size(); i++) {
        typeInfos.add(PVNames.determineAppropriatePVTypeInfo(pvNames.get(i), configService));
    }
    pmansProfiler.mark("After PVTypeInfo");

    for (int i = 0; i < pvNames.size(); i++)
        if (typeInfos.get(i) == null && RetrievalState.includeExternalServers(req)) {
            logger.debug(
                    "Checking to see if pv " + pvNames.get(i) + " is served by a external Archiver Server");
            typeInfos.set(i,
                    checkIfPVisServedByExternalServer(pvNames.get(i), start, req, resp, useChunkedEncoding));
        }

    for (int i = 0; i < pvNames.size(); i++) {
        if (typeInfos.get(i) == null) {
            // TODO Only needed if we're forwarding the request to another server.
            if (resp.isCommitted()) {
                logger.debug("Proxied the data thru an external server for PV " + pvNames.get(i));
                return;
            }

            if (retiredPVTemplate != null) {
                PVTypeInfo templateTypeInfo = PVNames.determineAppropriatePVTypeInfo(retiredPVTemplate,
                        configService);
                if (templateTypeInfo != null) {
                    typeInfos.set(i, new PVTypeInfo(pvNames.get(i), templateTypeInfo));
                    typeInfos.get(i).setPaused(true);
                    typeInfos.get(i).setApplianceIdentity(configService.getMyApplianceInfo().getIdentity());
                    // Somehow tell the code downstream that this is a fake typeInfos.
                    typeInfos.get(i).setSamplingMethod(SamplingMethod.DONT_ARCHIVE);
                    logger.debug("Using a template PV for " + pvNames.get(i)
                            + " Need to determine the actual DBR type.");
                    setActualDBRTypeFromData(pvNames.get(i), typeInfos.get(i), configService);
                }
            }
        }

        if (typeInfos.get(i) == null) {
            String msg = "Unable to find typeinfo for pv " + pvNames.get(i);
            logger.error(msg);
            resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
            resp.sendError(HttpServletResponse.SC_NOT_FOUND, msg);
            return;
        }

        if (postProcessors.get(i) == null) {
            if (useReduced) {
                String defaultPPClassName = configService.getInstallationProperties().getProperty(
                        "org.epics.archiverappliance.retrieval.DefaultUseReducedPostProcessor",
                        FirstSamplePP.class.getName());
                logger.debug("Using the default usereduced preprocessor " + defaultPPClassName);
                try {
                    postProcessors.set(i, (PostProcessor) Class.forName(defaultPPClassName).newInstance());
                } catch (Exception ex) {
                    logger.error("Exception constructing new instance of post processor " + defaultPPClassName,
                            ex);
                    postProcessors.set(i, null);
                }
            }
        }

        if (postProcessors.get(i) == null) {
            logger.debug("Using the default raw preprocessor");
            postProcessors.set(i, new DefaultRawPostProcessor());
        }
    }

    // Get the appliances for each of the PVs
    List<ApplianceInfo> applianceForPVs = new ArrayList<ApplianceInfo>(pvNames.size());
    for (int i = 0; i < pvNames.size(); i++) {
        applianceForPVs.add(configService.getApplianceForPV(pvNames.get(i)));
        if (applianceForPVs.get(i) == null) {
            // TypeInfo cannot be null here...
            assert (typeInfos.get(i) != null);
            applianceForPVs.set(i, configService.getAppliance(typeInfos.get(i).getApplianceIdentity()));
        }
    }

    /*
     * Retrieving the external appliances if the current appliance has not got the PV assigned to it, and
     * storing the associated information of the PVs in that appliance.
     */
    Map<String, ArrayList<PVInfoForClusterRetrieval>> applianceToPVs = new HashMap<String, ArrayList<PVInfoForClusterRetrieval>>();
    for (int i = 0; i < pvNames.size(); i++) {
        if (!applianceForPVs.get(i).equals(configService.getMyApplianceInfo())) {

            ArrayList<PVInfoForClusterRetrieval> appliancePVs = applianceToPVs
                    .get(applianceForPVs.get(i).getMgmtURL());
            appliancePVs = (appliancePVs == null) ? new ArrayList<>() : appliancePVs;
            PVInfoForClusterRetrieval pvInfoForRetrieval = new PVInfoForClusterRetrieval(pvNames.get(i),
                    typeInfos.get(i), postProcessors.get(i), applianceForPVs.get(i));
            appliancePVs.add(pvInfoForRetrieval);
            applianceToPVs.put(applianceForPVs.get(i).getRetrievalURL(), appliancePVs);
        }
    }

    List<List<Future<EventStream>>> listOfEventStreamFuturesLists = new ArrayList<List<Future<EventStream>>>();
    Set<String> retrievalURLs = applianceToPVs.keySet();
    if (retrievalURLs.size() > 0) {
        // Get list of PVs and redirect them to appropriate appliance to be retrieved.
        String retrievalURL;
        ArrayList<PVInfoForClusterRetrieval> pvInfos;
        while (!((retrievalURL = retrievalURLs.iterator().next()) != null)) {
            // Get array list of PVs for appliance
            pvInfos = applianceToPVs.get(retrievalURL);
            try {
                List<List<Future<EventStream>>> resultFromForeignAppliances = retrieveEventStreamFromForeignAppliance(
                        req, resp, pvInfos, requestTimes, useChunkedEncoding,
                        retrievalURL + "/../data/getDataForPVs.raw", start, end);
                listOfEventStreamFuturesLists.addAll(resultFromForeignAppliances);
            } catch (Exception ex) {
                logger.error("Failed to retrieve " + StringUtils.join(pvNames, ", ") + " from " + retrievalURL
                        + ".");
                return;
            }
        }
    }

    pmansProfiler.mark("After Appliance Info");

    // Setting post processor for PVs, taking into account whether there is a field in the PV name
    List<String> pvNamesFromRequests = new ArrayList<String>(pvNames.size());
    for (int i = 0; i < pvNames.size(); i++) {
        String pvName = pvNames.get(i);
        pvNamesFromRequests.add(pvName);
        PVTypeInfo typeInfo = typeInfos.get(i);
        postProcessorUserArg = postProcessorUserArgs.get(i);

        // If a field is specified in a PV name, it will create a post processor for that
        String fieldName = PVNames.getFieldName(pvName);
        if (fieldName != null && !fieldName.equals("") && !pvName.equals(typeInfo.getPvName())) {
            logger.debug("We reset the pvName " + pvName + " to one from the typeinfo " + typeInfo.getPvName()
                    + " as that determines the name of the stream. " + "Also using ExtraFieldsPostProcessor.");
            pvNames.set(i, typeInfo.getPvName());
            postProcessors.set(i, new ExtraFieldsPostProcessor(fieldName));
        }

        try {
            // Postprocessors get their mandatory arguments from the request.
            // If user does not pass in the expected request, throw an exception.
            postProcessors.get(i).initialize(postProcessorUserArg, pvName);
        } catch (Exception ex) {
            String msg = "Postprocessor threw an exception during initialization for " + pvName;
            logger.error(msg, ex);
            resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
            resp.sendError(HttpServletResponse.SC_NOT_FOUND, msg);
            return;
        }
    }

    /*
     * MergeDedupConsumer is what writes PB data in its respective format to the HTML response.
     * The response, after the MergeDedupConsumer is created, contains the following:
     * 
     * 1) The content type for the response.
     * 2) Any additional headers for the particular MIME response.
     * 
     * Additionally, the MergeDedupConsumer instance holds a reference to the output stream
     * that is used to write to the HTML response. It is stored under the name `os`.
     */
    MergeDedupConsumer mergeDedupCountingConsumer;
    try {
        mergeDedupCountingConsumer = createMergeDedupConsumer(resp, extension, useChunkedEncoding);
    } catch (ServletException se) {
        String msg = "Exception when retrieving data " + "-->" + se.toString();
        logger.error(msg, se);
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, msg);
        return;
    }

    /* 
     * BasicContext contains the PV name and the expected return type. Used to access PB files.
     * RetrievalExecutorResult contains a thread service class and the time spans Presumably, the 
     * thread service is what retrieves the data, and the BasicContext is the context in which it 
     * works.
     */
    List<HashMap<String, String>> engineMetadatas = new ArrayList<HashMap<String, String>>();
    try {
        List<BasicContext> retrievalContexts = new ArrayList<BasicContext>(pvNames.size());
        List<RetrievalExecutorResult> executorResults = new ArrayList<RetrievalExecutorResult>(pvNames.size());
        for (int i = 0; i < pvNames.size(); i++) {
            if (fetchLatestMetadata) {
                // Make a call to the engine to fetch the latest metadata.
                engineMetadatas.add(fetchLatestMedataFromEngine(pvNames.get(i), applianceForPVs.get(i)));
            }
            retrievalContexts.add(new BasicContext(typeInfos.get(i).getDBRType(), pvNamesFromRequests.get(i)));
            executorResults.add(determineExecutorForPostProcessing(pvNames.get(i), typeInfos.get(i),
                    requestTimes, req, postProcessors.get(i)));
        }

        /*
         * There are as many Future objects in the eventStreamFutures List as there are periods over 
         * which to fetch data. Retrieval of data happen here in parallel.
         */
        List<LinkedList<Future<RetrievalResult>>> listOfRetrievalResultFuturesLists = new ArrayList<LinkedList<Future<RetrievalResult>>>();
        for (int i = 0; i < pvNames.size(); i++) {
            listOfRetrievalResultFuturesLists.add(resolveAllDataSources(pvNames.get(i), typeInfos.get(i),
                    postProcessors.get(i), applianceForPVs.get(i), retrievalContexts.get(i),
                    executorResults.get(i), req, resp));
        }
        pmansProfiler.mark("After data source resolution");

        for (int i = 0; i < pvNames.size(); i++) {
            // Data is retrieved here
            List<Future<EventStream>> eventStreamFutures = getEventStreamFuturesFromRetrievalResults(
                    executorResults.get(i), listOfRetrievalResultFuturesLists.get(i));
            listOfEventStreamFuturesLists.add(eventStreamFutures);
        }

    } catch (Exception ex) {
        if (ex != null && ex.toString() != null && ex.toString().contains("ClientAbortException")) {
            // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
            logger.debug("Exception when retrieving data ", ex);
        } else {
            logger.error("Exception when retrieving data " + "-->" + ex.toString(), ex);
        }
    }

    long s1 = System.currentTimeMillis();
    String currentlyProcessingPV = null;

    /*
     * The following try bracket goes through each of the streams in the list of event stream futures.
     * 
     * It is intended that the process goes through one PV at a time.
     */
    try {
        for (int i = 0; i < pvNames.size(); i++) {
            List<Future<EventStream>> eventStreamFutures = listOfEventStreamFuturesLists.get(i);
            String pvName = pvNames.get(i);
            PVTypeInfo typeInfo = typeInfos.get(i);
            HashMap<String, String> engineMetadata = fetchLatestMetadata ? engineMetadatas.get(i) : null;
            PostProcessor postProcessor = postProcessors.get(i);

            logger.debug("Done with the RetrievalResults; moving onto the individual event stream "
                    + "from each source for " + StringUtils.join(pvNames, ", "));
            pmansProfiler.mark("After retrieval results");
            for (Future<EventStream> future : eventStreamFutures) {
                EventStreamDesc sourceDesc = null;

                // Gets the result of a data retrieval
                try (EventStream eventStream = future.get()) {
                    sourceDesc = null; // Reset it for each loop iteration.
                    sourceDesc = eventStream.getDescription();
                    if (sourceDesc == null) {
                        logger.warn("Skipping event stream without a desc for pv " + pvName);
                        continue;
                    }

                    logger.debug("Processing event stream for pv " + pvName + " from source "
                            + ((eventStream.getDescription() != null) ? eventStream.getDescription().getSource()
                                    : " unknown"));

                    try {
                        mergeTypeInfo(typeInfo, sourceDesc, engineMetadata);
                    } catch (MismatchedDBRTypeException mex) {
                        logger.error(mex.getMessage(), mex);
                        continue;
                    }

                    if (currentlyProcessingPV == null || !currentlyProcessingPV.equals(pvName)) {
                        logger.debug("Switching to new PV " + pvName + " In some mime responses we insert "
                                + "special headers at the beginning of the response. Calling the hook for "
                                + "that");
                        currentlyProcessingPV = pvName;
                        /*
                         * Goes through the PB data stream over a period of time. The relevant MIME response
                         * actually deal with the processing of the PV. `start` and `end` refer to the very
                         * beginning and very end of the time period being retrieved over, regardless of
                         * whether it is divided up or not.
                         */
                        mergeDedupCountingConsumer.processingPV(currentlyProcessingPV, start, end,
                                (eventStream != null) ? sourceDesc : null);
                    }

                    try {
                        // If the postProcessor does not have a consolidated event stream, we send each eventstream across as we encounter it.
                        // Else we send the consolidatedEventStream down below.
                        if (!(postProcessor instanceof PostProcessorWithConsolidatedEventStream)) {
                            /*
                             * The eventStream object contains all the data over the current period.
                             */
                            mergeDedupCountingConsumer.consumeEventStream(eventStream);
                            resp.flushBuffer();
                        }
                    } catch (Exception ex) {
                        if (ex != null && ex.toString() != null
                                && ex.toString().contains("ClientAbortException")) {
                            // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
                            logger.debug(
                                    "Exception when consuming and flushing data from " + sourceDesc.getSource(),
                                    ex);
                        } else {
                            logger.error("Exception when consuming and flushing data from "
                                    + sourceDesc.getSource() + "-->" + ex.toString(), ex);
                        }
                    }
                    pmansProfiler.mark("After event stream " + eventStream.getDescription().getSource());
                } catch (Exception ex) {
                    if (ex != null && ex.toString() != null && ex.toString().contains("ClientAbortException")) {
                        // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
                        logger.debug("Exception when consuming and flushing data from "
                                + (sourceDesc != null ? sourceDesc.getSource() : "N/A"), ex);
                    } else {
                        logger.error("Exception when consuming and flushing data from "
                                + (sourceDesc != null ? sourceDesc.getSource() : "N/A") + "-->" + ex.toString(),
                                ex);
                    }
                }
            }

            // TODO Go through data from other appliances here

            if (postProcessor instanceof PostProcessorWithConsolidatedEventStream) {
                try (EventStream eventStream = ((PostProcessorWithConsolidatedEventStream) postProcessor)
                        .getConsolidatedEventStream()) {
                    EventStreamDesc sourceDesc = eventStream.getDescription();
                    if (sourceDesc == null) {
                        logger.error("Skipping event stream without a desc for pv " + pvName
                                + " and post processor " + postProcessor.getExtension());
                    } else {
                        mergeDedupCountingConsumer.consumeEventStream(eventStream);
                        resp.flushBuffer();
                    }
                }
            }

            // If the postProcessor needs to send final data across, give it a chance now...
            if (postProcessor instanceof AfterAllStreams) {
                EventStream finalEventStream = ((AfterAllStreams) postProcessor).anyFinalData();
                if (finalEventStream != null) {
                    mergeDedupCountingConsumer.consumeEventStream(finalEventStream);
                    resp.flushBuffer();
                }
            }

            pmansProfiler.mark("After writing all eventstreams to response");
        }
    } catch (Exception ex) {
        if (ex != null && ex.toString() != null && ex.toString().contains("ClientAbortException")) {
            // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
            logger.debug("Exception when retrieving data ", ex);
        } else {
            logger.error("Exception when retrieving data " + "-->" + ex.toString(), ex);
        }
    }

    long s2 = System.currentTimeMillis();
    logger.info("For the complete request, found a total of " + mergeDedupCountingConsumer.totalEventsForAllPVs
            + " in " + (s2 - s1) + "(ms)" + " skipping " + mergeDedupCountingConsumer.skippedEventsForAllPVs
            + " events" + " deduping involved " + mergeDedupCountingConsumer.comparedEventsForAllPVs
            + " compares.");

    pmansProfiler.mark("After all closes and flushing all buffers");

    // Till we determine all the if conditions where we log this, we log sparingly..
    if (pmansProfiler.totalTimeMS() > 5000) {
        logger.error("Retrieval time for " + StringUtils.join(pvNames, ", ") + " from " + startTimeStr + " to "
                + endTimeStr + ": " + pmansProfiler.toString());
    }

    mergeDedupCountingConsumer.close();
}

From source file:elh.eus.absa.Features.java

/**
 * Given a window check if the ngrams inside (all of them) are present in the feature set, and if so, 
 * update the feature vector accordingly
 * /* w ww. ja  va  2  s . co m*/
 * @param ngrams
 * @param prefix String : possible prefix used to differentiate ngram groups in the attribute set.
 * @param double[] fVector : feature vector for the corresponding instance
 * @param int tokens : number of tokens in the sentence (in case we want to add not a frequency value
 * but a normalized value)
 * 
 */
private void checkNgramFeatures(LinkedList<String> ngrams, double[] fVector, String prefix, int tokens,
        boolean empty) {
    //System.err.println("features::checkNgramFeatures ->"+Arrays.asList(ngrams).toString());

    // if empty is active means that we are checking the end of the sentence and 
    // the ngram list must be emptied 
    if (empty) {
        while (!ngrams.isEmpty()) {
            String ng = featureFromArray(ngrams, prefix);
            //add occurrence to feature vector (the functions checks if the given ngram feature exists).
            addNumericToFeatureVector(ng, fVector, tokens); //tokNum

            ngrams.removeFirst();
        }
    }
    // if empty is false search for all ngrams in the window
    else {
        // add ngrams to the feature list
        for (int i = 0; i < ngrams.size(); i++) {
            String ng = featureFromArray(ngrams.subList(0, i + 1), prefix);
            // add occurrence to feature vector (the functions checks if the given ngram feature exists). 
            addNumericToFeatureVector(ng, fVector, tokens);//tokNum
        }
    }
}

From source file:com.datatorrent.stram.StreamingContainerManager.java

/**
 * Compute checkpoints required for a given operator instance to be recovered.
 * This is done by looking at checkpoints available for downstream dependencies first,
 * and then selecting the most recent available checkpoint that is smaller than downstream.
 *
 * @param operator Operator instance for which to find recovery checkpoint
 * @param ctx      Context into which to collect traversal info
 *//*from   w w w  . j av  a2  s  .  c om*/
public void updateRecoveryCheckpoints(PTOperator operator, UpdateCheckpointsContext ctx) {
    if (operator.getRecoveryCheckpoint().windowId < ctx.committedWindowId.longValue()) {
        ctx.committedWindowId.setValue(operator.getRecoveryCheckpoint().windowId);
    }

    if (operator.getState() == PTOperator.State.ACTIVE && (ctx.currentTms
            - operator.stats.lastWindowIdChangeTms) > operator.stats.windowProcessingTimeoutMillis) {
        // if the checkpoint is ahead, then it is not blocked but waiting for activation (state-less recovery, at-most-once)
        if (ctx.committedWindowId.longValue() >= operator.getRecoveryCheckpoint().windowId) {
            LOG.debug("Marking operator {} blocked committed window {}, recovery window {}", operator,
                    Codec.getStringWindowId(ctx.committedWindowId.longValue()),
                    Codec.getStringWindowId(operator.getRecoveryCheckpoint().windowId));
            ctx.blocked.add(operator);
        }
    }

    // the most recent checkpoint eligible for recovery based on downstream state
    Checkpoint maxCheckpoint = Checkpoint.INITIAL_CHECKPOINT;

    Set<OperatorMeta> checkpointGroup = ctx.checkpointGroups.get(operator.getOperatorMeta());
    if (checkpointGroup == null) {
        checkpointGroup = Collections.singleton(operator.getOperatorMeta());
    }
    // find intersection of checkpoints that group can collectively move to
    TreeSet<Checkpoint> commonCheckpoints = new TreeSet<>(new Checkpoint.CheckpointComparator());
    synchronized (operator.checkpoints) {
        commonCheckpoints.addAll(operator.checkpoints);
    }
    Set<PTOperator> groupOpers = new HashSet<>(checkpointGroup.size());
    boolean pendingDeploy = operator.getState() == PTOperator.State.PENDING_DEPLOY;
    if (checkpointGroup.size() > 1) {
        for (OperatorMeta om : checkpointGroup) {
            Collection<PTOperator> operators = plan.getAllOperators(om);
            for (PTOperator groupOper : operators) {
                synchronized (groupOper.checkpoints) {
                    commonCheckpoints.retainAll(groupOper.checkpoints);
                }
                // visit all downstream operators of the group
                ctx.visited.add(groupOper);
                groupOpers.add(groupOper);
                pendingDeploy |= operator.getState() == PTOperator.State.PENDING_DEPLOY;
            }
        }
        // highest common checkpoint
        if (!commonCheckpoints.isEmpty()) {
            maxCheckpoint = commonCheckpoints.last();
        }
    } else {
        // without logical grouping, treat partitions as independent
        // this is especially important for parallel partitioning
        ctx.visited.add(operator);
        groupOpers.add(operator);
        maxCheckpoint = operator.getRecentCheckpoint();
        if (ctx.recovery && maxCheckpoint.windowId == Stateless.WINDOW_ID && operator.isOperatorStateLess()) {
            long currentWindowId = WindowGenerator.getWindowId(ctx.currentTms, this.vars.windowStartMillis,
                    this.getLogicalPlan().getValue(LogicalPlan.STREAMING_WINDOW_SIZE_MILLIS));
            maxCheckpoint = new Checkpoint(currentWindowId, 0, 0);
        }
    }

    // DFS downstream operators
    for (PTOperator groupOper : groupOpers) {
        for (PTOperator.PTOutput out : groupOper.getOutputs()) {
            for (PTOperator.PTInput sink : out.sinks) {
                PTOperator sinkOperator = sink.target;
                if (groupOpers.contains(sinkOperator)) {
                    continue; // downstream operator within group
                }
                if (!ctx.visited.contains(sinkOperator)) {
                    // downstream traversal
                    updateRecoveryCheckpoints(sinkOperator, ctx);
                }
                // recovery window id cannot move backwards
                // when dynamically adding new operators
                if (sinkOperator.getRecoveryCheckpoint().windowId >= operator
                        .getRecoveryCheckpoint().windowId) {
                    maxCheckpoint = Checkpoint.min(maxCheckpoint, sinkOperator.getRecoveryCheckpoint());
                }

                if (ctx.blocked.contains(sinkOperator)) {
                    if (sinkOperator.stats.getCurrentWindowId() == operator.stats.getCurrentWindowId()) {
                        // downstream operator is blocked by this operator
                        ctx.blocked.remove(sinkOperator);
                    }
                }
            }
        }
    }

    // find the common checkpoint that is <= downstream recovery checkpoint
    if (!commonCheckpoints.contains(maxCheckpoint)) {
        if (!commonCheckpoints.isEmpty()) {
            maxCheckpoint = Objects.firstNonNull(commonCheckpoints.floor(maxCheckpoint), maxCheckpoint);
        }
    }

    for (PTOperator groupOper : groupOpers) {
        // checkpoint frozen during deployment
        if (!pendingDeploy || ctx.recovery) {
            // remove previous checkpoints
            Checkpoint c1 = Checkpoint.INITIAL_CHECKPOINT;
            LinkedList<Checkpoint> checkpoints = groupOper.checkpoints;
            synchronized (checkpoints) {
                if (!checkpoints.isEmpty() && (checkpoints.getFirst()).windowId <= maxCheckpoint.windowId) {
                    c1 = checkpoints.getFirst();
                    Checkpoint c2;
                    while (checkpoints.size() > 1
                            && ((c2 = checkpoints.get(1)).windowId) <= maxCheckpoint.windowId) {
                        checkpoints.removeFirst();
                        //LOG.debug("Checkpoint to delete: operator={} windowId={}", operator.getName(), c1);
                        this.purgeCheckpoints.add(new Pair<PTOperator, Long>(groupOper, c1.windowId));
                        c1 = c2;
                    }
                } else {
                    if (ctx.recovery && checkpoints.isEmpty() && groupOper.isOperatorStateLess()) {
                        LOG.debug("Adding checkpoint for stateless operator {} {}", groupOper,
                                Codec.getStringWindowId(maxCheckpoint.windowId));
                        c1 = groupOper.addCheckpoint(maxCheckpoint.windowId, this.vars.windowStartMillis);
                    }
                }
            }
            //LOG.debug("Operator {} checkpoints: commit {} recent {}", new Object[] {operator.getName(), c1, operator.checkpoints});
            groupOper.setRecoveryCheckpoint(c1);
        } else {
            LOG.debug("Skipping checkpoint update {} during {}", groupOper, groupOper.getState());
        }
    }

}

From source file:jp.zippyzip.impl.GeneratorServiceImpl.java

public void updateBuilding() {

    Collection<String> keys = getParentChildDao().getKeys();
    LinkedList<String> list = new LinkedList<String>();
    String preName = "";
    String preYomi = "";

    for (String key : keys) {

        if (key.length() != 5) {
            continue;
        }/* ww w.  j a  v  a 2s .co m*/

        ParentChild data = getParentChildDao().get(key);

        for (String json : data.getChildren()) {

            if (!json.startsWith("{")) {
                continue;
            }

            Zip zip = Zip.fromJson(json);

            if (zip.getAdd2() == null) {
                continue;
            }
            if ((zip.getNote() != null) && zip.getNote().equals("???")) {
                preName = zip.getAdd1();
                preYomi = zip.getAdd1Yomi();
            }
            if (!zip.getAdd2().equals("?")) {
                continue;
            }

            if (zip.getAdd1().startsWith(preName)) {
                zip.setAdd1(preName + " " + zip.getAdd1().substring(preName.length()));
            }
            if (zip.getAdd1Yomi().startsWith(preYomi)) {
                zip.setAdd1Yomi(preYomi + " " + zip.getAdd1Yomi().substring(preYomi.length()));
            }

            list.add(zip.toJson());
        }
    }

    log.info("count:" + list.size());
    getParentChildDao().store(new ParentChild("building", new Date(), new LinkedList<String>(), list));
}

From source file:com.ikanow.aleph2.enrichment.utils.services.TestJsScriptEngineService.java

public void test_end2end(final String js_name) throws IOException {
    final ObjectMapper mapper = BeanTemplateUtils.configureMapper(Optional.empty());

    final String user_script = Resources.toString(Resources.getResource(js_name), Charsets.UTF_8);

    final JsScriptEngineService service_under_test = new JsScriptEngineService();

    final DataBucketBean bucket = Mockito.mock(DataBucketBean.class);
    //final IEnrichmentModuleContext context = Mockito.mock(IEnrichmentModuleContext.class);

    final LinkedList<ObjectNode> emitted = new LinkedList<>();
    final LinkedList<JsonNode> grouped = new LinkedList<>();
    final LinkedList<JsonNode> externally_emitted = new LinkedList<>();

    final IEnrichmentModuleContext context = Mockito.mock(IEnrichmentModuleContext.class, new Answer<Void>() {
        @SuppressWarnings("unchecked")
        public Void answer(InvocationOnMock invocation) {
            try {
                Object[] args = invocation.getArguments();
                assertTrue("Unexpected call to context object during test: " + invocation.getMethod().getName(),
                        invocation.getMethod().getName().equals("emitMutableObject")
                                || invocation.getMethod().getName().equals("externalEmit")
                                || invocation.getMethod().getName().equals("getLogger"));
                if (invocation.getMethod().getName().equals("emitMutableObject")) {
                    final Optional<JsonNode> grouping = (Optional<JsonNode>) args[3];
                    if (grouping.isPresent()) {
                        grouped.add(grouping.get());
                    }/*  w ww. j a  va  2 s .co m*/
                    emitted.add((ObjectNode) args[1]);
                } else if (invocation.getMethod().getName().equals("externalEmit")) {
                    final DataBucketBean to = (DataBucketBean) args[0];
                    final Either<JsonNode, Map<String, Object>> out = (Either<JsonNode, Map<String, Object>>) args[1];
                    externally_emitted.add(((ObjectNode) out.left().value()).put("bucket", to.full_name()));
                }
            } catch (Exception e) {
                fail(e.getMessage());
            }
            return null;
        }
    });

    final EnrichmentControlMetadataBean control = BeanTemplateUtils.build(EnrichmentControlMetadataBean.class)
            .with(EnrichmentControlMetadataBean::config,
                    new LinkedHashMap<String, Object>(ImmutableMap.<String, Object>builder()
                            .put("script", user_script)
                            .put("config", ImmutableMap.<String, Object>builder().put("test", "config").build())
                            .put("imports", Arrays.asList("underscore-min.js")).build()))
            .done().get();

    service_under_test.onStageInitialize(context, bucket, control,
            Tuples._2T(ProcessingStage.batch, ProcessingStage.grouping),
            Optional.of(Arrays.asList("test1", "test2")));

    final List<Tuple2<Long, IBatchRecord>> batch = Arrays
            .asList(new BatchRecord(mapper.readTree("{\"test\":\"1\"}")),
                    new BatchRecord(mapper.readTree("{\"test\":\"2\"}")),
                    new BatchRecord(mapper.readTree("{\"test\":\"3\"}")),
                    new BatchRecord(mapper.readTree("{\"test\":\"4\"}")),
                    new BatchRecord(mapper.readTree("{\"test\":\"5\"}")))
            .stream().<Tuple2<Long, IBatchRecord>>map(br -> Tuples._2T(0L, br)).collect(Collectors.toList());

    service_under_test.onObjectBatch(batch.stream(), Optional.of(5),
            Optional.of(mapper.readTree("{\"key\":\"static\"}")));
    assertEquals(20, emitted.size());
    emitted.stream().forEach(on -> {
        if (on.has("len"))
            assertEquals(5, on.get("len").asInt());
        else if (on.has("grouping_key"))
            assertEquals("{\"key\":\"static\"}", on.get("grouping_key").toString());
        else if (on.has("prev")) {
            assertEquals("batch", on.get("prev").asText());
            assertEquals("grouping", on.get("next").asText());
            assertEquals("{\"test\":\"config\"}", on.get("config").toString());
            assertEquals(2, on.get("groups").size());
            //DEBUG
            //System.out.println(on.toString());
        } else {
            fail("missing field" + on.toString());
        }
    });

    assertEquals(5, grouped.size());
    assertTrue(grouped.stream().map(j -> j.toString()).allMatch(s -> s.equals("{\"key\":\"static\"}")));
    assertEquals(5, externally_emitted.size());

    // Finally, check cloning

    final IEnrichmentBatchModule service_under_test_2 = service_under_test.cloneForNewGrouping();

    final List<Tuple2<Long, IBatchRecord>> batch2 = Arrays
            .asList(new BatchRecord(mapper.readTree("{\"test\":\"1\"}")),
                    new BatchRecord(mapper.readTree("{\"test\":\"2\"}")),
                    new BatchRecord(mapper.readTree("{\"test\":\"3\"}")),
                    new BatchRecord(mapper.readTree("{\"test\":\"4\"}")),
                    new BatchRecord(mapper.readTree("{\"test\":\"5\"}")))
            .stream().<Tuple2<Long, IBatchRecord>>map(br -> Tuples._2T(0L, br)).collect(Collectors.toList());

    emitted.clear();
    assertEquals(0, emitted.size());
    service_under_test_2.onObjectBatch(batch2.stream(), Optional.empty(), Optional.empty());
    assertEquals(20, emitted.size());
    emitted.stream().forEach(on -> {
        //DEBUG
        //System.out.println(on.toString());

        assertFalse("Wrong format: " + on.toString(), on.has("len"));
        assertFalse("Wrong format: " + on.toString(), on.has("grouping_key"));
        if (on.has("prev")) {
            assertEquals("batch", on.get("prev").asText());
            assertEquals("grouping", on.get("next").asText());
            assertEquals("{\"test\":\"config\"}", on.get("config").toString());
            assertEquals(2, on.get("groups").size());
        }
    });

}