Example usage for java.util ArrayList toString

List of usage examples for java.util ArrayList toString

Introduction

In this page you can find the example usage for java.util ArrayList toString.

Prototype

public String toString() 

Source Link

Document

Returns a string representation of the object.

Usage

From source file:at.ac.tuwien.dsg.cloud.salsa.engine.smartdeployment.main.SmartDeploymentService.java

private String enrich_CAMF_CSAR_Process(String csarTmp, String serviceName) {
    String extractedFolder = csarTmp + ".extracted";
    String toscaFile = extractedFolder + "/Definitions/Application.tosca";
    String scriptDir = extractedFolder + "/Scripts/";
    try {/*from w  w  w.j  a va  2 s.c o m*/
        // extract CSAR
        CSARParser.extractCsar(new File(csarTmp), extractedFolder);

        // enrich with QUELLE for
        String toscaXML = FileUtils.readFileToString(new File(toscaFile));
        EngineLogger.logger.debug("Read tosca string done. 100 first characters: {}", toscaXML);
        EngineLogger.logger.debug("Now trying to enrich with QUELLE....");
        //enrichCAMFToscaWithQuelle(toscaXML, serviceName, new String[]{EnrichFunctions.QuelleCloudServiceRecommendation.toString(), EnrichFunctions.SalsaInfoCompletion.toString()});
        SmartDeploymentService sds = new SmartDeploymentService();
        String result = sds.enrichCAMFToscaWithQuelle(toscaXML, serviceName,
                new String[] { EnrichFunctions.QuelleCloudServiceRecommendation.toString() });
        EngineLogger.logger.debug("After enrich with QUELLE, the result is: {}", result);
        // write back to right place
        FileUtils.writeStringToFile(new File(toscaFile), result);

        // read software requirement in TOSCA for each node, put in a map + artifact
        // a map between node ID and full requirement in Tag
        Map<String, String> allRequirements = new HashMap<>();
        TDefinitions def = ToscaXmlProcess.readToscaFile(toscaFile);
        for (TNodeTemplate node : ToscaStructureQuery.getNodeTemplateList(def)) {
            EngineLogger.logger.debug("Checking node: {}", node.getId());
            String policiesStr = new String();
            if (node.getPolicies() != null) {
                EngineLogger.logger.debug("Found policies of node: " + node.getId() + "/" + node.getName());
                List<TPolicy> policies = node.getPolicies().getPolicy();
                for (TPolicy p : policies) {
                    if (p.getPolicyType().getLocalPart().equals("Requirement")
                            && p.getPolicyType().getPrefix().equals("SmartDeployment")) {
                        if (p.getName().startsWith("CONSTRAINT")) {
                            // TODO: parse SYBL policies
                        } else {
                            policiesStr += p.getName().trim();
                            if (!p.getName().trim().endsWith(";")) {
                                policiesStr += ";";
                                EngineLogger.logger.debug("polociesStr = {}", policiesStr);
                            }
                        }
                    }
                }
            }
            EngineLogger.logger.debug("Collected policies for node {} is : {}", node.getId(), policiesStr);
            allRequirements.put(node.getId(), policiesStr);
        }
        EngineLogger.logger.debug("In total, we got following requirements: " + allRequirements.toString());

        // Load dependency graph knowledge base
        String dependencyDataFile = SmartDeploymentService.class.getResource("/data/salsa.dependencygraph.xml")
                .getFile();
        SalsaStackDependenciesGraph depGraph = SalsaStackDependenciesGraph
                .fromXML(FileUtils.readFileToString(new File(dependencyDataFile)));

        // ENRICH SCRIPT
        // extract all the requirement, put into the hashmap
        for (Map.Entry<String, String> entry : allRequirements.entrySet()) {
            EngineLogger.logger.debug("Analyzing node: {}. Full policies string is: *** {} ***", entry.getKey(),
                    entry.getValue());

            // extract CARL Strings
            CharStream stream = new ANTLRInputStream(entry.getValue());
            CARLLexer lexer = new CARLLexer(stream);
            CommonTokenStream tokens = new CommonTokenStream(lexer);
            CARLParser parser = new CARLParser(tokens);
            RequirementsContext requirementsContext = parser.requirements();

            ParseTreeWalker walker = new ParseTreeWalker(); // create standard walker
            CARLProgramListener extractor = new CARLProgramListener(parser);
            walker.walk(extractor, requirementsContext); // initiate walk of tree with listener    
            org.eclipse.camf.carl.model.Requirements requirements = extractor.getRequirements();

            HashMap<String, String> allReqsOfNode = new HashMap<>();
            ArrayList<String> checkList = new ArrayList<>();
            // os=Ubuntu; os:ver=12.04; sw=jre:1.7 ==> os=Ubuntu, 
            // here flat all the requirement of the node
            for (IRequirement req : requirements.getRequirements()) {
                EngineLogger.logger.debug("Irequirement: " + req.toString());
                if (req.getCategory().equals(RequirementCategory.SOFTWARE)) {
                    SoftwareRequirement swr = (SoftwareRequirement) req;
                    allReqsOfNode.put("sw", removeQuote(swr.getName()));
                    allReqsOfNode.put(removeQuote(swr.getName()) + ":ver", swr.getVersion().getVersion());
                    checkList.add(swr.getName());
                } else {
                    if (req.getCategory().equals(RequirementCategory.OPERATING_SYSTEM)) { // the system part is generated by quelle
                        OSRequirement osReq = (OSRequirement) req;
                        if (osReq.getName() != null) {
                            allReqsOfNode.put("os", removeQuote(osReq.getName()));
                        }
                        if (osReq.getVersion() != null) {
                            allReqsOfNode.put("os:ver", osReq.getVersion().getVersion());
                        }

                    }
                }
            }
            // find all the deploymet script of all "sw" requirements
            LinkedList<String> listOfScripts = new LinkedList<>();
            EngineLogger.logger.debug("The node {} will be enriched based-on the requirements: {}",
                    entry.getKey(), checkList.toString());
            for (String swReq : checkList) {
                EngineLogger.logger.debug("Searching deployment script for software req: {}", swReq);
                SalsaStackDependenciesGraph theNode = depGraph.findNodeByName(swReq);
                EngineLogger.logger.debug("Node found: {}", theNode.getName());
                EngineLogger.logger.debug("All requirements: {}", allReqsOfNode.toString());

                LinkedList<String> tmp = theNode.searchDeploymentScriptTemplate(allReqsOfNode);
                if (tmp != null) {
                    listOfScripts.addAll(tmp);
                }
            }
            EngineLogger.logger.debug(listOfScripts.toString());

            // create a script to solve all dependencies first
            String nodeID = entry.getKey();
            String theDependencyScript = "#!/bin/bash \n\n######## Generated by the Decision Module to solve the software dependencies ######## \n\n";
            for (String appendScript : listOfScripts) {
                String theAppend = SmartDeploymentService.class.getResource("/scriptRepo/" + appendScript)
                        .getFile();
                String stringToAppend = FileUtils.readFileToString(new File(theAppend));
                theDependencyScript += stringToAppend + "\n";
            }
            theDependencyScript += "######## End of generated script ########";
            String tmpScriptFile = scriptDir + "/" + nodeID + ".salsatmp";

            // read original script, remove the #!/bin/bash if having
            String originalScriptFile = null;
            TNodeTemplate node = ToscaStructureQuery.getNodetemplateById(nodeID, def);
            EngineLogger.logger.debug("Getting artifact template of node: {}", node.getId());
            for (TDeploymentArtifact art : node.getDeploymentArtifacts().getDeploymentArtifact()) {
                EngineLogger.logger.debug("Checking art.Name: {}, type: {}", art.getName(),
                        art.getArtifactType().getLocalPart());
                if (art.getArtifactType().getLocalPart().equals("ScriptArtifactPropertiesType")) {
                    String artTemplateID = art.getArtifactRef().getLocalPart();
                    TArtifactTemplate artTemplate = ToscaStructureQuery.getArtifactTemplateById(artTemplateID,
                            def);
                    if (artTemplate != null) {
                        originalScriptFile = artTemplate.getArtifactReferences().getArtifactReference().get(0)
                                .getReference();
                        originalScriptFile = extractedFolder + "/" + originalScriptFile;
                    }
                }
            }
            if (originalScriptFile != null) {
                String originalScript = FileUtils.readFileToString(new File(originalScriptFile));
                originalScript = originalScript.replace("#!/bin/bash", "");
                originalScript = originalScript.replace("#!/bin/sh", "");
                theDependencyScript += originalScript;
                FileUtils.writeStringToFile(new File(tmpScriptFile), theDependencyScript);
                EngineLogger.logger.debug("originalScript: {}, moveto: {}", originalScriptFile,
                        originalScriptFile + ".original");
                FileUtils.moveFile(FileUtils.getFile(originalScriptFile),
                        FileUtils.getFile(originalScriptFile + ".original"));
                FileUtils.moveFile(FileUtils.getFile(tmpScriptFile), FileUtils.getFile(originalScriptFile));
            } else {
                // TODO: there is no original script, just add new template, add tmpScript into that
            }

        } // end for each node in allRequirements analysis

        // repack the CSAR
        FileUtils.deleteQuietly(FileUtils.getFile(csarTmp));
        File directory = new File(extractedFolder);
        File[] fList = directory.listFiles();

        //CSARParser.buildCSAR(fList, csarTmp);
        String builtCSAR = SalsaConfiguration.getToscaTemplateStorage() + "/" + serviceName + ".csar";
        CSARParser.buildCSAR(extractedFolder, builtCSAR);

    } catch (IOException ex) {
        EngineLogger.logger.error("Error when enriching CSAR: " + csarTmp, ex);
        return "Error";
    } catch (JAXBException ex) {
        EngineLogger.logger.error("Cannot parse the Tosca definition in CSAR file: " + toscaFile, ex);
        return "Error";
    }

    // return the link to the CSAR
    String csarURLReturn = SalsaConfiguration.getSalsaCenterEndpoint() + "/rest/smart/CAMFTosca/enrich/CSAR/"
            + serviceName;
    EngineLogger.logger.info("Enrich CSAR done. URL to download is: {}", csarURLReturn);
    return csarURLReturn;
}

From source file:ch.icclab.cyclops.resource.impl.GenerateResource.java

/**
 * Generate the CDR of all the users for the selected meters
 * <p>/*from w  w  w .j a va  2s . co m*/
 * Pseudo Code
 * 1. Get the list of selected meters
 * 2. Query the UDR service to get the usage information under these meters for all the users for a time period
 * 3. Get the rate for the same meters for a same time period
 * 4. Combine the rate and usage to get the charge value
 * 5. Save it in the db
 *
 * @return boolean
 */
private boolean generateCdr() throws IOException, JSONException {
    //TODO: split it into smaller methods
    logger.trace("BEGIN boolean generateCdr() throws IOException, JSONException");
    Object usage;
    double charge;
    String from, to;
    String[] time;
    int indexUserId, indexUsage;
    ArrayList usageListArr, usageArr;
    ArrayList columnArr;
    UDRServiceClient udrClient = new UDRServiceClient();
    RateResource rateResource = new RateResource();
    ArrayList<ArrayList<Object>> objArr = new ArrayList<ArrayList<Object>>();
    Double rate;
    TSDBData tsdbData;
    boolean result;
    String userid;
    ArrayList<ResourceUsage> resourceUsageArray;
    ArrayList<Object> objArrNode;
    HashMap tags;
    POJOUtil pojoUtil = new POJOUtil();

    DateTimeUtil dateTimeUtil = new DateTimeUtil();
    time = dateTimeUtil.getRange();

    from = time[1];
    to = time[0];
    logger.trace("DATA boolean generateCdr() throws IOException, JSONException: enabledResourceList"
            + enabledResourceList);
    for (int i = 0; i < enabledResourceList.size(); i++) {
        tsdbData = rateResource.getResourceRate(enabledResourceList.get(i).toString(), from, to);
        rate = calculateRate(tsdbData);
        resourceUsageArray = udrClient.getResourceUsageData(enabledResourceList.get(i).toString(), from, to);
        logger.trace("DATA boolean generateCdr() throws IOException, JSONException: resourceUsageStr"
                + resourceUsageArray);
        for (ResourceUsage resource : resourceUsageArray) {
            columnArr = resource.getColumn();
            logger.trace("DATA boolean generateCdr()...: columnArr=" + Arrays.toString(columnArr.toArray()));
            usageListArr = resource.getUsage();
            tags = resource.getTags();
            logger.trace("DATA boolean generateCdr()...: tags=" + tags);
            //indexUserId = columnArr.indexOf("userid");
            userid = tags.get("userid").toString();
            //userid = userid.substring(0, userid.length());
            logger.trace("DATA boolean generateCdr()...: userid=" + userid);
            logger.trace("DATA boolean generateCdr()...: usageListArr=" + usageListArr.toString());
            indexUsage = columnArr.indexOf("mean");
            // The below if condition differentiates between the gauge and cumulative meters of openstack
            if (indexUsage < 0) {
                indexUsage = columnArr.indexOf("usage");//usage if we are not using the sum in the sql else "sum"
            }
            // Iterate through the usage arraylist to extract the userid and usage.
            // Multiple the usage with the rate of the resource and save it into an arraylist
            for (int j = 0; j < usageListArr.size(); j++) {
                usageArr = (ArrayList) usageListArr.get(j);
                logger.trace("DATA boolean generateCdr()...: indexUsage=" + indexUsage);
                usage = usageArr.get(indexUsage);
                // Calculate the charge for a resource per user
                Double d = Double.parseDouble(usage.toString());
                charge = (d * rate);
                String resources = enabledResourceList.get(i).toString();
                logger.trace("DATA boolean generateCdr()...: objArr=" + Arrays.toString(objArr.toArray()));
                objArr = pojoUtil.populateList(usageListArr, objArr, resources, userid, usage, charge);
                /*for (int k = 0; k < usageListArr.size(); k++) {//resourceUsageStr.get(usage).size()
                objArrNode = new ArrayList<Object>();
                //userid = (String) usageArr.get(indexUserId);
                        
                objArrNode.add(resources);
                objArrNode.add(userid);
                objArrNode.add(usage);
                objArrNode.add(charge);
                objArr.add(objArrNode);
                }*/
            }
        }

    }
    // Save the charge array into the database
    result = savePrice(objArr);
    logger.trace("END boolean generateCdr() throws IOException, JSONException");
    return result;
}

From source file:org.neo4j.gis.spatial.osm.OSMImporter.java

public void importFile(OSMWriter<?> osmWriter, String dataset, boolean allPoints, Charset charset)
        throws IOException, XMLStreamException {
    System.out.println("Importing with osm-writer: " + osmWriter);
    osmWriter.getOrCreateOSMDataset(layerName);
    osm_dataset = osmWriter.getDatasetId();

    long startTime = System.currentTimeMillis();
    long[] times = new long[] { 0L, 0L, 0L, 0L };
    javax.xml.stream.XMLInputFactory factory = javax.xml.stream.XMLInputFactory.newInstance();
    CountedFileReader reader = new CountedFileReader(dataset, charset);
    javax.xml.stream.XMLStreamReader parser = factory.createXMLStreamReader(reader);
    int countXMLTags = 0;
    beginProgressMonitor(100);/*from www . ja  v a  2  s.c om*/
    setLogContext(dataset);
    boolean startedWays = false;
    boolean startedRelations = false;
    try {
        ArrayList<String> currentXMLTags = new ArrayList<String>();
        int depth = 0;
        Map<String, Object> wayProperties = null;
        ArrayList<Long> wayNodes = new ArrayList<Long>();
        Map<String, Object> relationProperties = null;
        ArrayList<Map<String, Object>> relationMembers = new ArrayList<Map<String, Object>>();
        LinkedHashMap<String, Object> currentNodeTags = new LinkedHashMap<String, Object>();
        while (true) {
            updateProgressMonitor(reader.getPercentRead());
            incrLogContext();
            int event = parser.next();
            if (event == javax.xml.stream.XMLStreamConstants.END_DOCUMENT) {
                break;
            }
            switch (event) {
            case javax.xml.stream.XMLStreamConstants.START_ELEMENT:
                currentXMLTags.add(depth, parser.getLocalName());
                String tagPath = currentXMLTags.toString();
                if (tagPath.equals("[osm]")) {
                    osmWriter.setDatasetProperties(extractProperties(parser));
                } else if (tagPath.equals("[osm, bounds]")) {
                    osmWriter.addOSMBBox(extractProperties("bbox", parser));
                } else if (tagPath.equals("[osm, node]")) {
                    // <node id="269682538" lat="56.0420950"
                    // lon="12.9693483" user="sanna" uid="31450"
                    // visible="true" version="1" changeset="133823"
                    // timestamp="2008-06-11T12:36:28Z"/>
                    osmWriter.createOSMNode(extractProperties("node", parser));
                } else if (tagPath.equals("[osm, way]")) {
                    // <way id="27359054" user="spull" uid="61533"
                    // visible="true" version="8" changeset="4707351"
                    // timestamp="2010-05-15T15:39:57Z">
                    if (!startedWays) {
                        startedWays = true;
                        times[0] = System.currentTimeMillis();
                        osmWriter.optimize();
                        times[1] = System.currentTimeMillis();
                    }
                    wayProperties = extractProperties("way", parser);
                    wayNodes.clear();
                } else if (tagPath.equals("[osm, way, nd]")) {
                    Map<String, Object> properties = extractProperties(parser);
                    wayNodes.add(Long.parseLong(properties.get("ref").toString()));
                } else if (tagPath.endsWith("tag]")) {
                    Map<String, Object> properties = extractProperties(parser);
                    currentNodeTags.put(properties.get("k").toString(), properties.get("v").toString());
                } else if (tagPath.equals("[osm, relation]")) {
                    // <relation id="77965" user="Grillo" uid="13957"
                    // visible="true" version="24" changeset="5465617"
                    // timestamp="2010-08-11T19:25:46Z">
                    if (!startedRelations) {
                        startedRelations = true;
                        times[2] = System.currentTimeMillis();
                        osmWriter.optimize();
                        times[3] = System.currentTimeMillis();
                    }
                    relationProperties = extractProperties("relation", parser);
                    relationMembers.clear();
                } else if (tagPath.equals("[osm, relation, member]")) {
                    relationMembers.add(extractProperties(parser));
                }
                if (startedRelations) {
                    if (countXMLTags < 10) {
                        log("Starting tag at depth " + depth + ": " + currentXMLTags.get(depth) + " - "
                                + currentXMLTags.toString());
                        for (int i = 0; i < parser.getAttributeCount(); i++) {
                            log("\t" + currentXMLTags.toString() + ": " + parser.getAttributeLocalName(i) + "["
                                    + parser.getAttributeNamespace(i) + "," + parser.getAttributePrefix(i) + ","
                                    + parser.getAttributeType(i) + "," + "] = " + parser.getAttributeValue(i));
                        }
                    }
                    countXMLTags++;
                }
                depth++;
                break;
            case javax.xml.stream.XMLStreamConstants.END_ELEMENT:
                if (currentXMLTags.toString().equals("[osm, node]")) {
                    osmWriter.addOSMNodeTags(allPoints, currentNodeTags);
                } else if (currentXMLTags.toString().equals("[osm, way]")) {
                    osmWriter.createOSMWay(wayProperties, wayNodes, currentNodeTags);
                } else if (currentXMLTags.toString().equals("[osm, relation]")) {
                    osmWriter.createOSMRelation(relationProperties, relationMembers, currentNodeTags);
                }
                depth--;
                currentXMLTags.remove(depth);
                // log("Ending tag at depth "+depth+": "+currentTags.get(depth));
                break;
            default:
                break;
            }
        }
    } finally {
        endProgressMonitor();
        parser.close();
        osmWriter.finish();
        this.osm_dataset = osmWriter.getDatasetId();
    }
    describeTimes(startTime, times);
    osmWriter.describeMissing();
    osmWriter.describeLoaded();

    long stopTime = System.currentTimeMillis();
    log("info | Elapsed time in seconds: " + (1.0 * (stopTime - startTime) / 1000.0));
    stats.dumpGeomStats();
    stats.printTagStats();
}

From source file:gima.neo4j.testsuite.osmcheck.OSMImporter.java

public void importFile(OSMWriter<?> osmWriter, String dataset, boolean allPoints, Charset charset)
        throws IOException, XMLStreamException {
    System.out.println("Importing with osm-writer: " + osmWriter);
    osmWriter.getOrCreateOSMDataset(layerName);
    osm_dataset = osmWriter.getDatasetId();

    long startTime = System.currentTimeMillis();
    long[] times = new long[] { 0L, 0L, 0L, 0L };
    javax.xml.stream.XMLInputFactory factory = javax.xml.stream.XMLInputFactory.newInstance();
    CountedFileReader reader = new CountedFileReader(dataset, charset);
    javax.xml.stream.XMLStreamReader parser = factory.createXMLStreamReader(reader);
    int countXMLTags = 0;
    beginProgressMonitor(100);//from   w ww . ja  v  a  2  s . c  o m
    setLogContext(dataset);
    boolean startedWays = false;
    boolean startedRelations = false;
    try {
        ArrayList<String> currentXMLTags = new ArrayList<String>();
        int depth = 0;
        Map<String, Object> wayProperties = null;
        ArrayList<Long> wayNodes = new ArrayList<Long>();
        Map<String, Object> relationProperties = null;
        ArrayList<Map<String, Object>> relationMembers = new ArrayList<Map<String, Object>>();
        LinkedHashMap<String, Object> currentNodeTags = new LinkedHashMap<String, Object>();
        while (true) {
            updateProgressMonitor(reader.getPercentRead());
            incrLogContext();
            int event = parser.next();
            if (event == javax.xml.stream.XMLStreamConstants.END_DOCUMENT) {
                break;
            }
            switch (event) {
            case javax.xml.stream.XMLStreamConstants.START_ELEMENT:
                currentXMLTags.add(depth, parser.getLocalName());
                String tagPath = currentXMLTags.toString();
                if (tagPath.equals("[osm]")) {
                    osmWriter.setDatasetProperties(extractProperties(parser));
                } else if (tagPath.equals("[osm, bounds]")) {
                    osmWriter.addOSMBBox(extractProperties("bbox", parser));
                } else if (tagPath.equals("[osm, node]")) {
                    // <node id="269682538" lat="56.0420950" lon="12.9693483" user="sanna" uid="31450" visible="true" version="1" changeset="133823" timestamp="2008-06-11T12:36:28Z"/>
                    osmWriter.createOSMNode(extractProperties("node", parser));
                } else if (tagPath.equals("[osm, way]")) {
                    // <way id="27359054" user="spull" uid="61533" visible="true" version="8" changeset="4707351" timestamp="2010-05-15T15:39:57Z">
                    if (!startedWays) {
                        startedWays = true;
                        times[0] = System.currentTimeMillis();
                        osmWriter.optimize();
                        times[1] = System.currentTimeMillis();
                    }
                    wayProperties = extractProperties("way", parser);
                    wayNodes.clear();
                } else if (tagPath.equals("[osm, way, nd]")) {
                    Map<String, Object> properties = extractProperties(parser);
                    wayNodes.add(Long.parseLong(properties.get("ref").toString()));
                } else if (tagPath.endsWith("tag]")) {
                    Map<String, Object> properties = extractProperties(parser);
                    currentNodeTags.put(properties.get("k").toString(), properties.get("v").toString());
                } else if (tagPath.equals("[osm, relation]")) {
                    // <relation id="77965" user="Grillo" uid="13957" visible="true" version="24" changeset="5465617" timestamp="2010-08-11T19:25:46Z">
                    if (!startedRelations) {
                        startedRelations = true;
                        times[2] = System.currentTimeMillis();
                        osmWriter.optimize();
                        times[3] = System.currentTimeMillis();
                    }
                    relationProperties = extractProperties("relation", parser);
                    relationMembers.clear();
                } else if (tagPath.equals("[osm, relation, member]")) {
                    relationMembers.add(extractProperties(parser));
                }
                if (startedRelations) {
                    if (countXMLTags < 10) {
                        log("Starting tag at depth " + depth + ": " + currentXMLTags.get(depth) + " - "
                                + currentXMLTags.toString());
                        for (int i = 0; i < parser.getAttributeCount(); i++) {
                            log("\t" + currentXMLTags.toString() + ": " + parser.getAttributeLocalName(i) + "["
                                    + parser.getAttributeNamespace(i) + "," + parser.getAttributePrefix(i) + ","
                                    + parser.getAttributeType(i) + "," + "] = " + parser.getAttributeValue(i));
                        }
                    }
                    countXMLTags++;
                }
                depth++;
                break;
            case javax.xml.stream.XMLStreamConstants.END_ELEMENT:
                if (currentXMLTags.toString().equals("[osm, node]")) {
                    osmWriter.addOSMNodeTags(allPoints, currentNodeTags);
                } else if (currentXMLTags.toString().equals("[osm, way]")) {
                    osmWriter.createOSMWay(wayProperties, wayNodes, currentNodeTags);
                } else if (currentXMLTags.toString().equals("[osm, relation]")) {
                    osmWriter.createOSMRelation(relationProperties, relationMembers, currentNodeTags);
                }
                depth--;
                currentXMLTags.remove(depth);
                // log("Ending tag at depth "+depth+": "+currentTags.get(depth));
                break;
            default:
                break;
            }
        }
    } finally {
        endProgressMonitor();
        parser.close();
        osmWriter.finish();
        this.osm_dataset = osmWriter.getDatasetId();
    }
    describeTimes(startTime, times);
    osmWriter.describeMissing();
    osmWriter.describeLoaded();

    long stopTime = System.currentTimeMillis();
    log("info | Elapsed time in seconds: " + (1.0 * (stopTime - startTime) / 1000.0));
    stats.dumpGeomStats();
    stats.printTagStats();
}

From source file:org.apache.accumulo.examples.wikisearch.logic.AbstractQueryLogic.java

public Results runQuery(Connector connector, List<String> authorizations, String query, Date beginDate,
        Date endDate, Set<String> types) {

    if (StringUtils.isEmpty(query)) {
        throw new IllegalArgumentException(
                "NULL QueryNode reference passed to " + this.getClass().getSimpleName());
    }/*from w ww  . j ava2  s.co m*/

    Set<Range> ranges = new HashSet<Range>();
    Set<String> typeFilter = types;
    String array[] = authorizations.toArray(new String[0]);
    Authorizations auths = new Authorizations(array);
    Results results = new Results();

    // Get the query string
    String queryString = query;

    StopWatch abstractQueryLogic = new StopWatch();
    StopWatch optimizedQuery = new StopWatch();
    StopWatch queryGlobalIndex = new StopWatch();
    StopWatch optimizedEventQuery = new StopWatch();
    StopWatch fullScanQuery = new StopWatch();
    StopWatch processResults = new StopWatch();

    abstractQueryLogic.start();

    StopWatch parseQuery = new StopWatch();
    parseQuery.start();

    QueryParser parser;
    try {
        if (log.isDebugEnabled()) {
            log.debug("ShardQueryLogic calling QueryParser.execute");
        }
        parser = new QueryParser();
        parser.execute(queryString);
    } catch (org.apache.commons.jexl2.parser.ParseException e1) {
        throw new IllegalArgumentException("Error parsing query", e1);
    }
    int hash = parser.getHashValue();
    parseQuery.stop();
    if (log.isDebugEnabled()) {
        log.debug(hash + " Query: " + queryString);
    }

    Set<String> fields = new HashSet<String>();
    for (String f : parser.getQueryIdentifiers()) {
        fields.add(f);
    }
    if (log.isDebugEnabled()) {
        log.debug("getQueryIdentifiers: " + parser.getQueryIdentifiers().toString());
    }
    // Remove any negated fields from the fields list, we don't want to lookup negated fields
    // in the index.
    fields.removeAll(parser.getNegatedTermsForOptimizer());

    if (log.isDebugEnabled()) {
        log.debug("getQueryIdentifiers: " + parser.getQueryIdentifiers().toString());
    }
    // Get the mapping of field name to QueryTerm object from the query. The query term object
    // contains the operator, whether its negated or not, and the literal to test against.
    Multimap<String, QueryTerm> terms = parser.getQueryTerms();

    // Find out which terms are indexed
    // TODO: Should we cache indexed terms or does that not make sense since we are always
    // loading data.
    StopWatch queryMetadata = new StopWatch();
    queryMetadata.start();
    Map<String, Multimap<String, Class<? extends Normalizer>>> metadataResults;
    try {
        metadataResults = findIndexedTerms(connector, auths, fields, typeFilter);
    } catch (Exception e1) {
        throw new RuntimeException("Error in metadata lookup", e1);
    }

    // Create a map of indexed term to set of normalizers for it
    Multimap<String, Normalizer> indexedTerms = HashMultimap.create();
    for (Entry<String, Multimap<String, Class<? extends Normalizer>>> entry : metadataResults.entrySet()) {
        // Get the normalizer from the normalizer cache
        for (Class<? extends Normalizer> clazz : entry.getValue().values()) {
            indexedTerms.put(entry.getKey(), normalizerCacheMap.get(clazz));
        }
    }
    queryMetadata.stop();
    if (log.isDebugEnabled()) {
        log.debug(hash + " Indexed Terms: " + indexedTerms.toString());
    }

    Set<String> orTerms = parser.getOrTermsForOptimizer();

    // Iterate over the query terms to get the operators specified in the query.
    ArrayList<String> unevaluatedExpressions = new ArrayList<String>();
    boolean unsupportedOperatorSpecified = false;
    for (Entry<String, QueryTerm> entry : terms.entries()) {
        if (null == entry.getValue()) {
            continue;
        }

        if (null != this.unevaluatedFields && this.unevaluatedFields.contains(entry.getKey().trim())) {
            unevaluatedExpressions.add(entry.getKey().trim() + " " + entry.getValue().getOperator() + " "
                    + entry.getValue().getValue());
        }

        int operator = JexlOperatorConstants.getJJTNodeType(entry.getValue().getOperator());
        if (!(operator == ParserTreeConstants.JJTEQNODE || operator == ParserTreeConstants.JJTNENODE
                || operator == ParserTreeConstants.JJTLENODE || operator == ParserTreeConstants.JJTLTNODE
                || operator == ParserTreeConstants.JJTGENODE || operator == ParserTreeConstants.JJTGTNODE
                || operator == ParserTreeConstants.JJTERNODE)) {
            unsupportedOperatorSpecified = true;
            break;
        }
    }
    if (null != unevaluatedExpressions)
        unevaluatedExpressions.trimToSize();
    if (log.isDebugEnabled()) {
        log.debug(hash + " unsupportedOperators: " + unsupportedOperatorSpecified + " indexedTerms: "
                + indexedTerms.toString() + " orTerms: " + orTerms.toString() + " unevaluatedExpressions: "
                + unevaluatedExpressions.toString());
    }

    // We can use the intersecting iterator over the field index as an optimization under the
    // following conditions
    //
    // 1. No unsupported operators in the query.
    // 2. No 'or' operators and at least one term indexed
    // or
    // 1. No unsupported operators in the query.
    // 2. and all terms indexed
    // or
    // 1. All or'd terms are indexed. NOTE, this will potentially skip some queries and push to a full table scan
    // // WE should look into finding a better way to handle whether we do an optimized query or not.
    boolean optimizationSucceeded = false;
    boolean orsAllIndexed = false;
    if (orTerms.isEmpty()) {
        orsAllIndexed = false;
    } else {
        orsAllIndexed = indexedTerms.keySet().containsAll(orTerms);
    }

    if (log.isDebugEnabled()) {
        log.debug("All or terms are indexed");
    }

    if (!unsupportedOperatorSpecified && (((null == orTerms || orTerms.isEmpty()) && indexedTerms.size() > 0)
            || (fields.size() > 0 && indexedTerms.size() == fields.size()) || orsAllIndexed)) {
        optimizedQuery.start();
        // Set up intersecting iterator over field index.

        // Get information from the global index for the indexed terms. The results object will contain the term
        // mapped to an object that contains the total count, and partitions where this term is located.

        // TODO: Should we cache indexed term information or does that not make sense since we are always loading data
        queryGlobalIndex.start();
        IndexRanges termIndexInfo;
        try {
            // If fields is null or zero, then it's probably the case that the user entered a value
            // to search for with no fields. Check for the value in index.
            if (fields.isEmpty()) {
                termIndexInfo = this.getTermIndexInformation(connector, auths, queryString, typeFilter);
                if (null != termIndexInfo && termIndexInfo.getRanges().isEmpty()) {
                    // Then we didn't find anything in the index for this query. This may happen for an indexed term that has wildcards
                    // in unhandled locations.
                    // Break out of here by throwing a named exception and do full scan
                    throw new DoNotPerformOptimizedQueryException();
                }
                // We need to rewrite the query string here so that it's valid.
                if (termIndexInfo instanceof UnionIndexRanges) {
                    UnionIndexRanges union = (UnionIndexRanges) termIndexInfo;
                    StringBuilder buf = new StringBuilder();
                    String sep = "";
                    for (String fieldName : union.getFieldNamesAndValues().keySet()) {
                        buf.append(sep).append(fieldName).append(" == ");
                        if (!(queryString.startsWith("'") && queryString.endsWith("'"))) {
                            buf.append("'").append(queryString).append("'");
                        } else {
                            buf.append(queryString);
                        }
                        sep = " or ";
                    }
                    if (log.isDebugEnabled()) {
                        log.debug("Rewrote query for non-fielded single term query: " + queryString + " to "
                                + buf.toString());
                    }
                    queryString = buf.toString();
                } else {
                    throw new RuntimeException("Unexpected IndexRanges implementation");
                }
            } else {
                RangeCalculator calc = this.getTermIndexInformation(connector, auths, indexedTerms, terms,
                        this.getIndexTableName(), this.getReverseIndexTableName(), queryString,
                        this.queryThreads, typeFilter);
                if (null == calc.getResult() || calc.getResult().isEmpty()) {
                    // Then we didn't find anything in the index for this query. This may happen for an indexed term that has wildcards
                    // in unhandled locations.
                    // Break out of here by throwing a named exception and do full scan
                    throw new DoNotPerformOptimizedQueryException();
                }
                termIndexInfo = new UnionIndexRanges();
                termIndexInfo.setIndexValuesToOriginalValues(calc.getIndexValues());
                termIndexInfo.setFieldNamesAndValues(calc.getIndexEntries());
                termIndexInfo.getTermCardinality().putAll(calc.getTermCardinalities());
                for (Range r : calc.getResult()) {
                    // foo is a placeholder and is ignored.
                    termIndexInfo.add("foo", r);
                }
            }
        } catch (TableNotFoundException e) {
            log.error(this.getIndexTableName() + "not found", e);
            throw new RuntimeException(this.getIndexTableName() + "not found", e);
        } catch (org.apache.commons.jexl2.parser.ParseException e) {
            throw new RuntimeException("Error determining ranges for query: " + queryString, e);
        } catch (DoNotPerformOptimizedQueryException e) {
            log.info("Indexed fields not found in index, performing full scan");
            termIndexInfo = null;
        }
        queryGlobalIndex.stop();

        // Determine if we should proceed with optimized query based on results from the global index
        boolean proceed = false;
        if (null == termIndexInfo || termIndexInfo.getFieldNamesAndValues().values().size() == 0) {
            proceed = false;
        } else if (null != orTerms && orTerms.size() > 0
                && (termIndexInfo.getFieldNamesAndValues().values().size() == indexedTerms.size())) {
            proceed = true;
        } else if (termIndexInfo.getFieldNamesAndValues().values().size() > 0) {
            proceed = true;
        } else if (orsAllIndexed) {
            proceed = true;
        } else {
            proceed = false;
        }
        if (log.isDebugEnabled()) {
            log.debug("Proceed with optimized query: " + proceed);
            if (null != termIndexInfo)
                log.debug("termIndexInfo.getTermsFound().size(): "
                        + termIndexInfo.getFieldNamesAndValues().values().size() + " indexedTerms.size: "
                        + indexedTerms.size() + " fields.size: " + fields.size());
        }
        if (proceed) {

            if (log.isDebugEnabled()) {
                log.debug(hash + " Performing optimized query");
            }
            // Use the scan ranges from the GlobalIndexRanges object as the ranges for the batch scanner
            ranges = termIndexInfo.getRanges();
            if (log.isDebugEnabled()) {
                log.info(hash + " Ranges: count: " + ranges.size() + ", " + ranges.toString());
            }

            // Create BatchScanner, set the ranges, and setup the iterators.
            optimizedEventQuery.start();
            BatchScanner bs = null;
            try {
                bs = connector.createBatchScanner(this.getTableName(), auths, queryThreads);
                bs.setRanges(ranges);
                IteratorSetting si = new IteratorSetting(21, "eval", OptimizedQueryIterator.class);

                if (log.isDebugEnabled()) {
                    log.debug("Setting scan option: " + EvaluatingIterator.QUERY_OPTION + " to " + queryString);
                }
                // Set the query option
                si.addOption(EvaluatingIterator.QUERY_OPTION, queryString);
                // Set the Indexed Terms List option. This is the field name and normalized field value pair separated
                // by a comma.
                StringBuilder buf = new StringBuilder();
                String sep = "";
                for (Entry<String, String> entry : termIndexInfo.getFieldNamesAndValues().entries()) {
                    buf.append(sep);
                    buf.append(entry.getKey());
                    buf.append(":");
                    buf.append(termIndexInfo.getIndexValuesToOriginalValues().get(entry.getValue()));
                    buf.append(":");
                    buf.append(entry.getValue());
                    if (sep.equals("")) {
                        sep = ";";
                    }
                }
                if (log.isDebugEnabled()) {
                    log.debug("Setting scan option: " + FieldIndexQueryReWriter.INDEXED_TERMS_LIST + " to "
                            + buf.toString());
                }
                FieldIndexQueryReWriter rewriter = new FieldIndexQueryReWriter();
                String q = "";
                try {
                    q = queryString;
                    q = rewriter.applyCaseSensitivity(q, true, false);// Set upper/lower case for fieldname/fieldvalue
                    Map<String, String> opts = new HashMap<String, String>();
                    opts.put(FieldIndexQueryReWriter.INDEXED_TERMS_LIST, buf.toString());
                    q = rewriter.removeNonIndexedTermsAndInvalidRanges(q, opts);
                    q = rewriter.applyNormalizedTerms(q, opts);
                    if (log.isDebugEnabled()) {
                        log.debug("runServerQuery, FieldIndex Query: " + q);
                    }
                } catch (org.apache.commons.jexl2.parser.ParseException ex) {
                    log.error("Could not parse query, Jexl ParseException: " + ex);
                } catch (Exception ex) {
                    log.error("Problem rewriting query, Exception: " + ex.getMessage());
                }
                si.addOption(BooleanLogicIterator.FIELD_INDEX_QUERY, q);

                // Set the term cardinality option
                sep = "";
                buf.delete(0, buf.length());
                for (Entry<String, Long> entry : termIndexInfo.getTermCardinality().entrySet()) {
                    buf.append(sep);
                    buf.append(entry.getKey());
                    buf.append(":");
                    buf.append(entry.getValue());
                    sep = ",";
                }
                if (log.isDebugEnabled())
                    log.debug("Setting scan option: " + BooleanLogicIterator.TERM_CARDINALITIES + " to "
                            + buf.toString());
                si.addOption(BooleanLogicIterator.TERM_CARDINALITIES, buf.toString());
                if (this.useReadAheadIterator) {
                    if (log.isDebugEnabled()) {
                        log.debug("Enabling read ahead iterator with queue size: " + this.readAheadQueueSize
                                + " and timeout: " + this.readAheadTimeOut);
                    }
                    si.addOption(ReadAheadIterator.QUEUE_SIZE, this.readAheadQueueSize);
                    si.addOption(ReadAheadIterator.TIMEOUT, this.readAheadTimeOut);

                }

                if (null != unevaluatedExpressions) {
                    StringBuilder unevaluatedExpressionList = new StringBuilder();
                    String sep2 = "";
                    for (String exp : unevaluatedExpressions) {
                        unevaluatedExpressionList.append(sep2).append(exp);
                        sep2 = ",";
                    }
                    if (log.isDebugEnabled())
                        log.debug("Setting scan option: " + EvaluatingIterator.UNEVALUTED_EXPRESSIONS + " to "
                                + unevaluatedExpressionList.toString());
                    si.addOption(EvaluatingIterator.UNEVALUTED_EXPRESSIONS,
                            unevaluatedExpressionList.toString());
                }

                bs.addScanIterator(si);

                processResults.start();
                processResults.suspend();
                long count = 0;
                for (Entry<Key, Value> entry : bs) {
                    count++;
                    // The key that is returned by the EvaluatingIterator is not the same key that is in
                    // the table. The value that is returned by the EvaluatingIterator is a kryo
                    // serialized EventFields object.
                    processResults.resume();
                    Document d = this.createDocument(entry.getKey(), entry.getValue());
                    results.getResults().add(d);
                    processResults.suspend();
                }
                log.info(count + " matching entries found in optimized query.");
                optimizationSucceeded = true;
                processResults.stop();
            } catch (TableNotFoundException e) {
                log.error(this.getTableName() + "not found", e);
                throw new RuntimeException(this.getIndexTableName() + "not found", e);
            } finally {
                if (bs != null) {
                    bs.close();
                }
            }
            optimizedEventQuery.stop();
        }
        optimizedQuery.stop();
    }

    // WE should look into finding a better way to handle whether we do an optimized query or not.
    // We are not setting up an else condition here because we may have aborted the logic early in the if statement.
    if (!optimizationSucceeded || ((null != orTerms && orTerms.size() > 0)
            && (indexedTerms.size() != fields.size()) && !orsAllIndexed)) {
        // if (!optimizationSucceeded || ((null != orTerms && orTerms.size() > 0) && (indexedTerms.size() != fields.size()))) {
        fullScanQuery.start();
        if (log.isDebugEnabled()) {
            log.debug(hash + " Performing full scan query");
        }

        // Set up a full scan using the date ranges from the query
        // Create BatchScanner, set the ranges, and setup the iterators.
        BatchScanner bs = null;
        try {
            // The ranges are the start and end dates
            Collection<Range> r = getFullScanRange(beginDate, endDate, terms);
            ranges.addAll(r);

            if (log.isDebugEnabled()) {
                log.debug(hash + " Ranges: count: " + ranges.size() + ", " + ranges.toString());
            }

            bs = connector.createBatchScanner(this.getTableName(), auths, queryThreads);
            bs.setRanges(ranges);
            IteratorSetting si = new IteratorSetting(22, "eval", EvaluatingIterator.class);
            // Create datatype regex if needed
            if (null != typeFilter) {
                StringBuilder buf = new StringBuilder();
                String s = "";
                for (String type : typeFilter) {
                    buf.append(s).append(type).append(".*");
                    s = "|";
                }
                if (log.isDebugEnabled())
                    log.debug("Setting colf regex iterator to: " + buf.toString());
                IteratorSetting ri = new IteratorSetting(21, "typeFilter", RegExFilter.class);
                RegExFilter.setRegexs(ri, null, buf.toString(), null, null, false);
                bs.addScanIterator(ri);
            }
            if (log.isDebugEnabled()) {
                log.debug("Setting scan option: " + EvaluatingIterator.QUERY_OPTION + " to " + queryString);
            }
            si.addOption(EvaluatingIterator.QUERY_OPTION, queryString);
            if (null != unevaluatedExpressions) {
                StringBuilder unevaluatedExpressionList = new StringBuilder();
                String sep2 = "";
                for (String exp : unevaluatedExpressions) {
                    unevaluatedExpressionList.append(sep2).append(exp);
                    sep2 = ",";
                }
                if (log.isDebugEnabled())
                    log.debug("Setting scan option: " + EvaluatingIterator.UNEVALUTED_EXPRESSIONS + " to "
                            + unevaluatedExpressionList.toString());
                si.addOption(EvaluatingIterator.UNEVALUTED_EXPRESSIONS, unevaluatedExpressionList.toString());
            }
            bs.addScanIterator(si);
            long count = 0;
            processResults.start();
            processResults.suspend();
            for (Entry<Key, Value> entry : bs) {
                count++;
                // The key that is returned by the EvaluatingIterator is not the same key that is in
                // the partition table. The value that is returned by the EvaluatingIterator is a kryo
                // serialized EventFields object.
                processResults.resume();
                Document d = this.createDocument(entry.getKey(), entry.getValue());
                results.getResults().add(d);
                processResults.suspend();
            }
            processResults.stop();
            log.info(count + " matching entries found in full scan query.");
        } catch (TableNotFoundException e) {
            log.error(this.getTableName() + "not found", e);
        } finally {
            if (bs != null) {
                bs.close();
            }
        }
        fullScanQuery.stop();
    }

    log.info("AbstractQueryLogic: " + queryString + " " + timeString(abstractQueryLogic.getTime()));
    log.info("  1) parse query " + timeString(parseQuery.getTime()));
    log.info("  2) query metadata " + timeString(queryMetadata.getTime()));
    log.info("  3) full scan query " + timeString(fullScanQuery.getTime()));
    log.info("  3) optimized query " + timeString(optimizedQuery.getTime()));
    log.info("  1) process results " + timeString(processResults.getTime()));
    log.info("      1) query global index " + timeString(queryGlobalIndex.getTime()));
    log.info(hash + " Query completed.");

    return results;
}

From source file:com.yaniv.online.MainActivity.java

public int[] takeFromLastDrop() {
    Log.d(TAG, "takeFromLastDrop() called ");
    String[] splitString = takeCardEditText.getText().toString().split(",");
    int[] split = tokensStringToInt(splitString);

    Log.d(TAG, "takeFromLastDrop() - splitString[]: " + Arrays.toString(splitString));
    Log.d(TAG, "primaryDeck=" + primaryDeck.toString());
    ArrayList<Card> lastDrop = primaryDeck.peek();
    Log.d(TAG, "lastDrop=" + lastDrop.toString());

    Log.d(TAG, "takeFromLastDrop() - splitInt[]: " + Arrays.toString(split));
    if (split == invalidDrop || split.length != 1) {
        Log.d(TAG,// w  w w .j  av a2  s  .c  o  m
                "invalidInput - Enter 0 to take the card from the Primary Deck or 1 from the Card Deck, Try Again");
        Toast.makeText(this, "You enter illegal paramater to take, Try again. ", Toast.LENGTH_LONG).show();
        return invalidDrop;
    }

    //Taking card from 3 optional options to take.
    if (lastDropType == 1) { //case one card - taking the only one card are in primary deck.
        if (split[0] >= 0 && split[0] <= lastDrop.size() - 1) {

        } else {
            return invalidDrop;
        }

    } else if (lastDropType == 2) { //case equal
        if (split[0] >= 0 && split[0] <= lastDrop.size() - 1) {

        } else {
            return invalidDrop;
        }
    } else { //case order
        if (split[0] == 0 || split[0] == lastDrop.size() - 1) {

        } else {
            return invalidDrop;
        }

    }
    return split;
}

From source file:com.wolvereness.bluebutton.crash.ExceptionMessage.java

public Report(final String text) {
    final ArrayList<String> lines;
    { // _LINES_//from  w w w. j a  va 2 s. c  om
        int i;
        if ((i = text.indexOf('\r')) != -1) {
            if (text.startsWith("\r\n", i)) { // StartsWith prevents OOB exception
                lines = Splitter.splitAll(text, "\r\n");
            } else {
                lines = Splitter.splitAll(text, '\r');
            }
        } else {
            lines = Splitter.splitAll(text, '\n');
        }
    } // _LINES_

    final ImmutableMap.Builder<Nodes, Node> nodes = ImmutableMap.builder();
    final ListIterator<String> it = lines.listIterator(lines.size() - 1);

    { // _DETAILS_
        int count = 0; // Number of lines with no "- " for sub-listing
        while (true) {
            String entry = it.previous();
            if (entry.startsWith(START)) {
                if (entry.startsWith(WORLD, 2)) {
                    continue; // Process this at end
                }

                final int colon = entry.indexOf(':');
                Nodes type = Nodes.valueOf(entry.substring(2, colon).toUpperCase().replace(' ', '_'));
                if (type == null) {
                    type = Nodes._NA;
                }
                final List<String> subList = lines.subList(it.nextIndex(), it.nextIndex() + count);
                final Node node = type.makeNode(entry.substring(colon + 1), subList);
                nodes.put(type, node);

                count = 0; // Reset count, as it is used for sub-listing
            } else if (entry.equals(DETAILS_START)) {
                {
                    final ArrayList<String> worlds = new ArrayList<String>();
                    while (it.hasNext()) {
                        entry = it.next();
                        if (entry.startsWith(WORLD_START)) {
                            worlds.add(entry);
                        }
                    }
                    nodes.put(Nodes.WORLD, Nodes.WORLD.makeNode(null, worlds));
                }

                while (!it.previous().equals(DETAILS_START)) {
                }
                if (!it.previous().equals("")) // NOTE_0- blank line preceding details check, see NOTE_0- below
                    throw new IllegalStateException("Expected blank line in " + lines.toString());
                while (!it.previous().startsWith(DESCRIPTION_START)) {
                }
                it.next(); // Description_start
                it.next(); // Blank line
                break; // We're done in the loop
            } else {
                count++;
            }
        }
    } // _DETAILS_

    { // _STACK_
        final LinkedList<ExceptionMessage> exceptions = new LinkedList<ExceptionMessage>();
        final List<StackTraceElement> stacks = new ArrayList<StackTraceElement>();
        final List<String> description = new ArrayList<String>();
        description.add(it.next()); // Initialize; first line is always first exception
        for (String entry = it.next(); !entry.equals(DETAILS_START); entry = it.next()) {
            // Read in all the exception information.
            // Apocalypse if the formating changes...
            if (entry.startsWith(STACK_START)) {
                // Normal stack element
                stacks.add(toStackTraceElement(entry));
            } else if (entry.startsWith(STACK_MORE_START)) {
                // "... n more" final line
                final ExceptionMessage previous = exceptions.getLast();
                final List<StackTraceElement> previousTrace = previous.getStackTrace();
                entry = entry.substring(STACK_MORE_START.length(), entry.length() - STACK_MORE_END.length());
                final int stackCount = Integer.valueOf(entry);
                stacks.addAll(previousTrace.subList(previousTrace.size() - stackCount, previousTrace.size()));
                exceptions.add(new ExceptionMessage(description, stacks));

                // Reset our counters
                description.clear();
                stacks.clear();
            } else if (entry.startsWith(CAUSED_START)) {
                // Finish old exception
                if (description.size() != 0) {
                    exceptions.add(new ExceptionMessage(description, stacks));
                    description.clear();
                    stacks.clear();
                }

                // New exception
                description.add(entry.substring(CAUSED_START.length()));
            } else {
                // Random description information
                description.add(entry);
            }
        }
        description.remove(description.size() - 1); // NOTE_0- There will be a blank line here, see NOTE_0- above
        if (description.size() != 0) {
            exceptions.add(new ExceptionMessage(description, stacks));
        }

        this.exceptions = ImmutableList.copyOf(exceptions);
    } // _STACK_

    while (!it.previous().startsWith(DESCRIPTION_START)) {
    } // This puts us on the line before the "description:"
    it.next(); // Push iterator for description_start to hit twice

    this.description = it.previous().substring(DESCRIPTION_START.length());

    { // _TIMESTAMP_
        final String timeStamp = it.previous().substring(TIME_START.length());
        Date time = null;
        try {
            time = (Date) DateFormatUtils.ISO_DATETIME_TIME_ZONE_FORMAT.parseObject(timeStamp);
        } catch (final ParseException ex) {
            try {
                time = new SimpleDateFormat().parse(timeStamp);
            } catch (final ParseException e) {
            }
        }
        this.time = time == null ? new Date(0l) : time;
    } // _TIMESTAMP_

    it.previous(); // Blank line after joke
    this.fun = it.previous();

    this.nodes = nodes.build();
}

From source file:com.yaniv.online.MainActivity.java

@Override
public void onRealTimeMessageReceived(RealTimeMessage rtm) {

    byte[] buf = rtm.getMessageData();
    String sender = rtm.getSenderParticipantId();
    Log.d(TAG, "[onRealTimeMessageReceived] - Got message from Participant " + sender + ",msg: "
            + Arrays.toString(buf));

    // Another player finished his turn
    if (buf[0] == 20) {

        turn = buf[1];/*from   ww w. j a  v a  2 s.c om*/
        Card newCard;
        TextView lastPlayerPick = (TextView) (findViewById(R.id.lastPlayerPick));
        if (buf[2] == 5) {
            // if the last player took from the deck, remove the first card
            lastPlayerPick.setText("Last player picked from the deck");
            newCard = cardDeck.jp.remove(0);
        } else {
            lastPlayerPick.setText("Last player picked: " + primaryDeck.peek().get(buf[2]).toString());
            ArrayList<Card> lastDrop = primaryDeck.peek();
            newCard = lastDrop.get(buf[2]);
        }

        lastDropType = buf[3];

        ArrayList<Card> droppedCards = fromGson(buf, 5, buf.length, DATA_TYPE_CARDS);
        Log.d(TAG, "Player dropped cards: " + droppedCards.toString());
        primaryDeck.push(droppedCards);
        Log.d(TAG, "Player cards before: " + mParticipantCards.get(sender));

        Vector<Card> playerCards = mParticipantCards.get(sender);
        for (Card c : droppedCards) {
            Log.d(TAG, "Deleting card: " + c.toString());
            boolean contains = playerCards.contains(c);
            boolean b = playerCards.remove(c);
            Log.d(TAG, "Delete worked: " + b + " card exist? " + contains);
        }
        playerCards.add(newCard);
        Log.d(TAG, "Player cards changed: " + playerCards);
        mParticipantCards.put(sender, playerCards);
        Log.d(TAG, "Player cards after: " + mParticipantCards.get(sender));

        updateParticipantUI(sender);
        updatePrimaryDeckUI();

    }
    //Game is started, owner send the cardsDeck, any participant needs to reload the cards into cardDeck or shuffle cards.
    else if ((int) buf[0] == 0) {
        // Checking shuffle
        if ((int) buf[1] == 1) {
            displayToastForLimitTime("Shuffling Card Deck...", 3000);
        }
        cardDeck = fromGson(buf, 5, buf.length, DATA_TYPE_CARD_DECK);
        Log.d(TAG, "[onRealTimeMessageReceived] - cardDeck " + cardDeck.jp);
        if (cardDeck != null) {
        }
    }
    // Owner create the cards for all participant. needs to save it on Mycards.
    else if ((int) buf[0] == 1) {
        // starting highscores array if null
        if (highscores == null) {
            highscores = new int[mParticipants.size()];
        }
        mParticipantCards = fromGson(buf, 5, buf.length, DATA_TYPE_M_PARTICIPANT_CARDS);
        myCards = mParticipantCards.containsKey(mMyId) ? mParticipantCards.get(mMyId) : null;
        calculateSum();
        setPlayerPositonUI();
        Log.d(TAG, "[onRealTimeMessageReceived] -mycards after" + myCards);
        updateTurnUi();
        updateParticipantsNamesAndUI();
    }
    // When participant played a turn
    else if ((int) buf[0] == 2) {

        mParticipantCards.put(sender, (Vector<Card>) fromGson(buf, 5, buf.length, DATA_TYPE_MY_CARDS));
        Log.d(TAG, "[onRealTimeMessageReceived] -participant " + sender + " finished his turn, his new cards: "
                + mParticipantCards.get(sender));
        updateParticipantUI(sender);

    }
    // take from 0-primary 1-deck           take from the primary or deck (and update in each screen)
    // Cards he take (only if 0)
    else if ((int) buf[0] == 5) {
        primaryDeck = fromGson(buf, 5, buf.length, DATA_TYPE_PRIMARY_DECK);
        Log.d(TAG, "[onRealTimeMessageReceived] - primaryDeck " + primaryDeck);
        if (primaryDeck != null) {
            updatePrimaryDeckUI();
        }
    }
    //when player declare yaniv
    else if ((int) buf[0] == 7) {
        yanivCalled(buf);
    }

    else if ((int) buf[0] == 8) {
        readyToPlayPlayers.put(sender, true);
        checkReadyList();
    }

    else if ((int) buf[0] == 10) {

    }

    // Regular messages to change the turn.
    else {

        turn = buf[3];
        updateTurnUi();
        Log.d(TAG, "[onRealTimeMessageReceived] - regular message ");

        if (buf[1] == 'F' || buf[1] == 'U') {
            turn = buf[3];
            lastDropType = (int) buf[4];
        }
    }
}

From source file:org.hyperic.hq.product.SNMPMeasurementPlugin.java

private int getIndex(String indexName, String indexValue, SNMPSession session)
        throws MetricUnreachableException, MetricNotFoundException {
    long timeNow = System.currentTimeMillis();

    Integer ix;/*ww  w .j  ava 2s.c om*/

    boolean expired = false;

    if ((timeNow - ixTimestamp) > ixExpire) {
        if (ixTimestamp == 0) {
            this.log.debug("initializing index cache");
        } else {
            this.log.debug("clearing index cache");
        }

        ixCache.clear();

        ixTimestamp = timeNow;

        expired = true;
    } else {
        if ((ix = (Integer) ixCache.get(indexValue)) != null) {
            return ix.intValue();
        }
    }

    // For multiple indices we iterate through indexNames and
    // combine the values which we later attempt to match against
    // indexValue...
    List indexNames = StringUtil.explode(indexName, "->");

    ArrayList data = new ArrayList();

    // This can be optimized, esp. if indexNames.size() == 1...
    for (int i = 0; i < indexNames.size(); i++) {
        String name = (String) indexNames.get(i);

        List values = null;

        try {
            values = session.getBulk(name);

            for (int j = 0; j < values.size(); j++) {
                String value = values.get(j).toString();

                StringBuffer buf = null;

                if (data.size() - 1 >= j) {
                    buf = (StringBuffer) data.get(j);

                    buf.append("->").append(value);
                } else {
                    buf = new StringBuffer(value);

                    data.add(buf);
                }
            }
        } catch (SNMPException e) {
            throw new MetricInvalidException(e);
        }
    }

    // We go in reverse in the case of apache having
    // two servername->80 entries, the first being the default (unused)
    // second being the vhost on 80 which is actually handling the requests
    // -- We could/should? enforce uniqueness here...
    for (int i = data.size() - 1; i >= 0; i--) {
        StringBuffer buf = (StringBuffer) data.get(i);
        String cur = buf.toString();

        // Since we fetched all the data might as well build
        // up the index cache for future reference...
        Integer index = new Integer(i);

        ixCache.put(cur, index);

        // Only seen w/ microsoft snmp server
        // where interface name has a trailing null byte...
        ixCache.put(cur.trim(), index);
    }

    if (this.log.isDebugEnabled()) {
        if (expired) {
            this.log.debug("built index cache:");

            for (Iterator it = ixCache.entrySet().iterator(); it.hasNext();) {
                Map.Entry ent = (Map.Entry) it.next();

                this.log.debug("   " + ent.getKey() + "=>" + ent.getValue());
            }
        } else {
            this.log.debug("forced to rebuild index cache looking for: " + indexValue);
        }
    }

    if ((ix = (Integer) ixCache.get(indexValue)) != null) {
        return ix.intValue();
    }

    String possibleValues = ", possible values=";

    if (listIsEmpty(data)) {
        possibleValues += "[NONE FOUND]";
    } else {
        possibleValues += data.toString();
    }

    throw new MetricNotFoundException(
            "could not find value '" + indexValue + "' in column '" + indexName + "'" + possibleValues);
}

From source file:org.ejbca.util.CertTools.java

/**
* Gets a list of all custom OIDs defined in the string. A custom OID is defined as an OID, simply as that. Otherwise, if it is not a custom oid, the DNpart is defined by a name such as CN och rfc822Name.
* This method only returns a oid once, so if the input string has multiple of the same oid, only one value is returned.
*
* @param dn String containing DN, The DN string has the format "C=SE, O=xx, OU=yy, CN=zz", or "rfc822Name=foo@bar.com", etc.
* @param dnpart String specifying which part of the DN to get, should be "CN" or "OU" etc.
*
* @return ArrayList containing unique oids or empty list if no custom OIDs are present
*///from   w w w.  j a v  a  2 s .  co  m
public static ArrayList<String> getCustomOids(String dn) {
    if (log.isTraceEnabled()) {
        log.trace(">getCustomOids: dn:'" + dn);
    }
    ArrayList<String> parts = new ArrayList<String>();
    if (dn != null) {
        String o;
        X509NameTokenizer xt = new X509NameTokenizer(dn);
        while (xt.hasMoreTokens()) {
            o = xt.nextToken();
            // Try to see if it is a valid OID
            try {
                int i = o.indexOf('=');
                // An oid is never shorter than 3 chars and must start with 1.
                if ((i > 2) && (o.charAt(1) == '.')) {
                    String oid = o.substring(0, i);
                    // If we have multiple of the same custom oid, don't claim that we have more
                    // This method will only return "unique" custom oids.
                    if (!parts.contains(oid)) {
                        // Check if it is a real oid, if it is not we will ignore it (IllegalArgumentException will be thrown)
                        new DERObjectIdentifier(oid);
                        parts.add(oid);
                    }
                }
            } catch (IllegalArgumentException e) {
                // Not a valid oid
            }
        }
    }
    if (log.isTraceEnabled()) {
        log.trace("<getpartsFromDN: resulting DN part=" + parts.toString());
    }
    return parts;
}