Example usage for java.util Scanner next

List of usage examples for java.util Scanner next

Introduction

In this page you can find the example usage for java.util Scanner next.

Prototype

public String next() 

Source Link

Document

Finds and returns the next complete token from this scanner.

Usage

From source file:com.gatf.executor.core.AcceptanceTestContext.java

private void initSoapContextAndHttpHeaders() throws Exception {
    Field[] declaredFields = HttpHeaders.class.getDeclaredFields();
    for (Field field : declaredFields) {
        if (java.lang.reflect.Modifier.isStatic(field.getModifiers()) && field.getType().equals(String.class)) {
            httpHeaders.put(field.get(null).toString().toLowerCase(), field.get(null).toString());
        }/*from   ww  w .  jav a 2  s  .co m*/
    }

    File file = null;
    if (gatfExecutorConfig.getWsdlLocFile() != null && !gatfExecutorConfig.getWsdlLocFile().trim().isEmpty())
        file = getResourceFile(gatfExecutorConfig.getWsdlLocFile());

    if (file != null) {
        Scanner s = new Scanner(file);
        s.useDelimiter("\n");
        List<String> list = new ArrayList<String>();
        while (s.hasNext()) {
            list.add(s.next().replace("\r", ""));
        }
        s.close();

        for (String wsdlLoc : list) {
            if (!wsdlLoc.trim().isEmpty()) {
                String[] wsdlLocParts = wsdlLoc.split(",");
                logger.info("Started Parsing WSDL location - " + wsdlLocParts[1]);
                Wsdl wsdl = Wsdl.parse(wsdlLocParts[1]);
                for (QName bindingName : wsdl.getBindings()) {
                    SoapBuilder builder = wsdl.getBuilder(bindingName);
                    for (SoapOperation operation : builder.getOperations()) {
                        String request = builder.buildInputMessage(operation);
                        DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
                        DocumentBuilder db = dbf.newDocumentBuilder();
                        Document soapMessage = db.parse(new ByteArrayInputStream(request.getBytes()));

                        if (gatfExecutorConfig.isDistributedLoadTests()) {
                            soapStrMessages.put(wsdlLocParts[0] + operation.getOperationName(), request);
                        }

                        soapMessages.put(wsdlLocParts[0] + operation.getOperationName(), soapMessage);
                        if (operation.getSoapAction() != null) {
                            soapActions.put(wsdlLocParts[0] + operation.getOperationName(),
                                    operation.getSoapAction());
                        }
                        logger.info("Adding message for SOAP operation - " + operation.getOperationName());
                    }
                    soapEndpoints.put(wsdlLocParts[0], builder.getServiceUrls().get(0));
                    logger.info("Adding SOAP Service endpoint - " + builder.getServiceUrls().get(0));
                }
                logger.info("Done Parsing WSDL location - " + wsdlLocParts[1]);
            }
        }
    }
}

From source file:com.ibm.devops.dra.AbstractDevOpsAction.java

/**
 * Get the build number//ww w.j av  a 2s  .co  m
 * @param build
 * @return
 */
public String getBuildNumber(String jobName, Run build) {

    String jName = "";
    Scanner s = new Scanner(jobName).useDelimiter("/");
    while (s.hasNext()) { // this will loop through the string until the last string(job name) is reached.
        jName = s.next();
    }
    s.close();

    String buildNumber = jName + ":" + build.getNumber();
    return buildNumber;
}

From source file:org.fao.geonet.kernel.csw.services.GetRecords.java

/**
 * If the request contains a Query element, it must have attribute typeNames.
 *
 * The OGC 07-045 spec is more restrictive than OGC 07-006.
 *
 * OGC 07-006 10.8.4.8:/*from   ww  w.  j  a  va  2  s  .co m*/
 * The typeNames parameter is a list of one or more names of queryable entities in the catalogue's information model
 * that may be constrained in the predicate of the query. In the case of XML realization of the OGC core metadata
 * properties (Subclause 10.2.5), the element csw:Record is the only queryable entity. Other information models may
 * include more than one queryable component. For example, queryable components for the XML realization of the ebRIM
 * include rim:Service, rim:ExtrinsicObject and rim:Association. In such cases the application profile shall
 * describe how multiple typeNames values should be processed.
 * In addition, all or some of the these queryable entity names may be specified in the query to define which
 * metadata record elements the query should present in the response to the GetRecords operation.
 *
 * OGC 07-045 8.2.2.1.1:
 * Mandatory: Must support *one* of csw:Record? or gmd:MD_Metadata? in a query. Default value is csw:Record?.
 *
 * (note how OGC 07-045 mixes up a mandatory parameter that has a default value !!)
 *
 * We'll go for the default value option rather than the mandatory-ness. So: if typeNames is not present or empty,
 * "csw:Record" is used.
 *
 * If the request does not contain exactly one (or comma-separated, both) of the values specified in OGC 07-045,
 * an exception is thrown. If both are present "gmd:MD_Metadata" is preferred.
 *
 * @param query query element
 * @return typeName
 * @throws MissingParameterValueEx if typeNames is missing
 * @throws InvalidParameterValueEx if typeNames does not have one of the mandated values
 */
private String checkTypenames(Element query) throws MissingParameterValueEx, InvalidParameterValueEx {
    if (Log.isDebugEnabled(Geonet.CSW_SEARCH)) {
        Log.debug(Geonet.CSW_SEARCH, "checking typenames in query:\n" + Xml.getString(query));
    }
    //
    // get the prefix used for CSW namespace used in this input document
    //
    String cswPrefix = getPrefixForNamespace(query, Csw.NAMESPACE_CSW);
    if (cswPrefix == null) {
        if (Log.isDebugEnabled(Geonet.CSW_SEARCH)) {
            Log.debug(Geonet.CSW_SEARCH,
                    "checktypenames: csw prefix not found, using " + Csw.NAMESPACE_CSW.getPrefix());
        }
        cswPrefix = Csw.NAMESPACE_CSW.getPrefix();
    }
    //
    // get the prefix used for GMD namespace used in this input document
    //
    String gmdPrefix = getPrefixForNamespace(query, Csw.NAMESPACE_GMD);
    if (gmdPrefix == null) {
        if (Log.isDebugEnabled(Geonet.CSW_SEARCH)) {
            Log.debug(Geonet.CSW_SEARCH,
                    "checktypenames: gmd prefix not found, using " + Csw.NAMESPACE_GMD.getPrefix());
        }
        gmdPrefix = Csw.NAMESPACE_GMD.getPrefix();
    }
    if (Log.isDebugEnabled(Geonet.CSW_SEARCH)) {
        Log.debug(Geonet.CSW_SEARCH,
                "checktypenames: csw prefix set to " + cswPrefix + ", gmd prefix set to " + gmdPrefix);
    }

    Attribute typeNames = query.getAttribute("typeNames", query.getNamespace());
    typeNames = query.getAttribute("typeNames");
    if (typeNames != null) {
        String typeNamesValue = typeNames.getValue();
        // empty typenames element
        if (StringUtils.isEmpty(typeNamesValue)) {
            return cswPrefix + ":Record";
        }
        // not empty: scan comma-separated string
        Scanner commaSeparator = new Scanner(typeNamesValue);
        commaSeparator.useDelimiter(",");
        String result = cswPrefix + ":Record";
        while (commaSeparator.hasNext()) {
            String typeName = commaSeparator.next();
            typeName = typeName.trim();
            if (Log.isDebugEnabled(Geonet.CSW_SEARCH)) {
                Log.debug(Geonet.CSW_SEARCH, "checking typename in query:" + typeName);
            }
            if (!(typeName.equals(cswPrefix + ":Record") || typeName.equals(gmdPrefix + ":MD_Metadata"))) {
                throw new InvalidParameterValueEx("typeNames", "invalid value");
            }
            if (typeName.equals(gmdPrefix + ":MD_Metadata")) {
                return typeName;
            }
        }
        return result;
    }
    // missing typeNames element
    else {
        return cswPrefix + ":Record";
    }
}

From source file:com.inmobi.conduit.local.LocalStreamServiceTest.java

private void testClusterName(String configName, String currentClusterName) throws Exception {
    ConduitConfigParser parser = new ConduitConfigParser(configName);
    ConduitConfig config = parser.getConfig();
    Set<String> streamsToProcess = new HashSet<String>();
    streamsToProcess.addAll(config.getSourceStreams().keySet());
    Set<String> clustersToProcess = new HashSet<String>();
    Set<TestLocalStreamService> services = new HashSet<TestLocalStreamService>();
    Cluster currentCluster = null;/*from  w w w  .j av  a  2 s  .  co m*/
    for (SourceStream sStream : config.getSourceStreams().values()) {
        for (String cluster : sStream.getSourceClusters()) {
            clustersToProcess.add(cluster);
        }
    }
    if (currentClusterName != null) {
        currentCluster = config.getClusters().get(currentClusterName);
    }
    for (String clusterName : clustersToProcess) {
        Cluster cluster = config.getClusters().get(clusterName);
        cluster.getHadoopConf().set("mapred.job.tracker", super.CreateJobConf().get("mapred.job.tracker"));
        TestLocalStreamService service = new TestLocalStreamService(config, cluster, currentCluster,
                new NullCheckPointProvider(), streamsToProcess);
        services.add(service);
    }

    for (TestLocalStreamService service : services) {
        FileSystem fs = service.getFileSystem();
        service.preExecute();
        if (currentClusterName != null)
            Assert.assertEquals(service.getCurrentCluster().getName(), currentClusterName);
        // creating a job with empty input path
        Path tmpJobInputPath = new Path("/tmp/job/input/path");
        Map<FileStatus, String> fileListing = new TreeMap<FileStatus, String>();
        Set<FileStatus> trashSet = new HashSet<FileStatus>();
        // checkpointKey, CheckPointPath
        Table<String, String, String> checkpointPaths = HashBasedTable.create();
        service.createMRInput(tmpJobInputPath, fileListing, trashSet, checkpointPaths);
        Job testJobConf = service.createJob(tmpJobInputPath, 1000);
        testJobConf.waitForCompletion(true);

        int numberOfCountersPerFile = 0;
        long sumOfCounterValues = 0;
        Path outputCounterPath = new Path(new Path(service.getCluster().getTmpPath(), service.getName()),
                "counters");
        FileStatus[] statuses = fs.listStatus(outputCounterPath, new PathFilter() {
            public boolean accept(Path path) {
                return path.toString().contains("part");
            }
        });
        for (FileStatus fileSt : statuses) {
            Scanner scanner = new Scanner(fs.open(fileSt.getPath()));
            while (scanner.hasNext()) {
                String counterNameValue = null;
                try {
                    counterNameValue = scanner.next();
                    String tmp[] = counterNameValue.split(ConduitConstants.AUDIT_COUNTER_NAME_DELIMITER);
                    Assert.assertEquals(4, tmp.length);
                    Long numOfMsgs = Long.parseLong(tmp[3]);
                    numberOfCountersPerFile++;
                    sumOfCounterValues += numOfMsgs;
                } catch (Exception e) {
                    LOG.error("Counters file has malformed line with counter name =" + counterNameValue
                            + "..skipping the line", e);
                }
            }
        }
        // Should have 2 counters for each file
        Assert.assertEquals(NUMBER_OF_FILES * 2, numberOfCountersPerFile);
        // sum of all counter values should be equal to total number of messages
        Assert.assertEquals(NUMBER_OF_FILES * 3, sumOfCounterValues);

        Assert.assertEquals(testJobConf.getConfiguration().get(FS_DEFAULT_NAME_KEY),
                service.getCurrentCluster().getHadoopConf().get(FS_DEFAULT_NAME_KEY));
        Assert.assertEquals(testJobConf.getConfiguration().get(SRC_FS_DEFAULT_NAME_KEY),
                service.getCluster().getHadoopConf().get(FS_DEFAULT_NAME_KEY));
        if (currentCluster == null)
            Assert.assertEquals(testJobConf.getConfiguration().get(FS_DEFAULT_NAME_KEY),
                    testJobConf.getConfiguration().get(SRC_FS_DEFAULT_NAME_KEY));
        service.getFileSystem().delete(new Path(service.getCluster().getRootDir()), true);
    }

}

From source file:com.concursive.connect.web.modules.wiki.utils.WikiPDFUtils.java

private static String parseTable(WikiPDFContext context, Wiki wiki, String line, Document document,
        Connection db, ArrayList<Integer> wikiListTodo, ArrayList<Integer> wikiListDone, BufferedReader in)
        throws Exception {
    if (line == null) {
        return null;
    }/*www. j  a v a2s .  c  o m*/
    PdfPTable pdfTable = null;
    int columnCount = 0;
    int rowCount = 0;

    // Keep track of the table's custom styles
    HashMap<Integer, String> cStyle = new HashMap<Integer, String>();

    while (line != null && (line.startsWith("|") || line.startsWith("!"))) {

        // Build a complete line
        String lineToParse = line;
        while (!line.endsWith("|")) {
            line = in.readLine();
            if (line == null) {
                // there is an error in the line to process
                return null;
            }
            if (line.startsWith("!")) {
                lineToParse += CRLF + line.substring(1);
            }
        }
        line = lineToParse;

        // Determine if the row can output
        boolean canOutput = true;

        ++rowCount;

        String cellType = null;
        Scanner sc = null;
        if (line.startsWith("||") && line.endsWith("||")) {
            cellType = "th";
            sc = new Scanner(line).useDelimiter("[|][|]");
            //        sc = new Scanner(line.substring(2, line.length() - 2)).useDelimiter("[|][|]");
        } else if (line.startsWith("|")) {
            cellType = "td";
            sc = new Scanner(line.substring(1, line.length() - 1)).useDelimiter("\\|(?=[^\\]]*(?:\\[|$))");
        }

        if (sc != null) {

            if (rowCount == 1) {
                // Count the columns, get the specified widths too...
                while (sc.hasNext()) {
                    ++columnCount;
                    sc.next();
                }
                // Reset the scanner now that the columns have been counted
                if (line.startsWith("||") && line.endsWith("||")) {
                    sc = new Scanner(line).useDelimiter("[|][|]");
                } else if (line.startsWith("|")) {
                    sc = new Scanner(line.substring(1, line.length() - 1))
                            .useDelimiter("\\|(?=[^\\]]*(?:\\[|$))");
                }

                // Start the table
                pdfTable = new PdfPTable(columnCount);
                //pdfTable.setWidthPercentage(100);
                pdfTable.setHorizontalAlignment(Element.ALIGN_LEFT);
                pdfTable.setSpacingBefore(10);
                pdfTable.setWidthPercentage(100);
                pdfTable.setKeepTogether(true);
            }

            // Determine the column span
            int colSpan = 1;
            // Determine the cell being output
            int cellCount = 0;

            while (sc.hasNext()) {
                String cellData = sc.next();
                if (cellData.length() == 0) {
                    ++colSpan;
                    continue;
                }

                // Track the cell count being output
                ++cellCount;

                if (rowCount == 1) {
                    // Parse and validate the style input
                    LOG.debug("Checking style value: " + cellData);
                    if (cellData.startsWith("{") && cellData.endsWith("}")) {
                        String[] style = cellData.substring(1, cellData.length() - 1).split(":");
                        String attribute = style[0].trim();
                        String value = style[1].trim();
                        // Determine the width of each column and store it
                        if ("width".equals(attribute)) {
                            // Validate the width style
                            if (StringUtils.hasAllowedOnly("0123456789%.", value)) {
                                cStyle.put(cellCount, attribute + ": " + value + ";");
                            }
                        } else {
                            LOG.debug("Unsupported style: " + cellData);
                        }
                        canOutput = false;
                    }
                }

                // Output the header
                if (canOutput) {

                    PdfPCell cell = new PdfPCell();
                    cell.setPadding(10);
                    cell.setBorderColor(new Color(100, 100, 100));
                    if ("th".equals(cellType)) {
                        cell.setHorizontalAlignment(Element.ALIGN_CENTER);
                        cell.setBackgroundColor(new Color(0xC0, 0xC0, 0xC0));
                    }
                    if (colSpan > 1) {
                        cell.setColspan(colSpan);
                    }

                    // Output the data
                    if (" ".equals(cellData) || "".equals(cellData)) {
                        // Put a blank space in blank cells for output consistency
                        cell.addElement(new Chunk(" "));
                        LOG.debug("   OUTPUTTING A BLANK");
                    } else {
                        // Output the cell as a complete wiki
                        float cellWidth = (100.0f / columnCount);
                        parseContent(context, wiki, cellData, document, cell, db, wikiListTodo, wikiListDone,
                                cellWidth);
                        LOG.debug("   OUTPUTTING CONTENT");
                    }
                    pdfTable.addCell(cell);
                }
            }
        }
        // read another line to see if it's part of the table
        line = in.readLine();
    }
    if (pdfTable != null) {
        LOG.debug("document.add(pdfTable)");
        document.add(pdfTable);
        //          document.add(Chunk.NEWLINE);
    }
    return line;
}

From source file:com.github.pffy.chinese.freq.ChineseFrequency.java

private void analyze() {

    int inputCount = 0;
    int removedCount = 0;
    int hanziCount = 0;
    int uniqueHanziCount = 0;
    int processedCount = 0;

    int freq = 0;

    String csvOutput = this.HEADER_ROW_CSV;
    String tsvOutput = this.HEADER_ROW_TSV;
    String txtOutput = this.HEADER_ROW_TXT;

    String csv, tsv, txt;//from  w  w w . ja va  2  s.com
    String str, input, pinyin, hanzi;
    Scanner sc;
    List<String> hanziList;
    Map<String, Integer> freqMap;
    JSONObject hpdx;
    String[] arr;

    Set<String> unmappedCharacters;

    hpdx = this.hpdx;

    input = this.input;
    inputCount = input.length();

    input = retainHanzi(input);
    removedCount = inputCount - input.length();

    hanziCount = input.length();

    sc = new Scanner(input);
    sc.useDelimiter("");

    hanziList = new ArrayList<String>();
    freqMap = new HashMap<String, Integer>();

    // counts occurrences
    while (sc.hasNext()) {

        str = sc.next();
        hanziList.add(str);

        if (freqMap.containsKey(str)) {
            freqMap.put(str, (Integer) freqMap.get(str).intValue() + 1);
        } else {
            freqMap.put(str, 1);
        }
    }

    // done with Scanner
    sc.close();

    uniqueHanziCount = freqMap.keySet().size();

    SortedMap<String, String> freqTreeMap = new TreeMap<String, String>(Collections.reverseOrder());

    unmappedCharacters = new HashSet<String>();
    for (Entry<String, Integer> counts : freqMap.entrySet()) {

        try {

            hanzi = counts.getKey();
            pinyin = hpdx.getString(hanzi);

        } catch (JSONException je) {

            // add this unmapped character to the list
            unmappedCharacters.add(counts.getKey());

            // not idx mapped yet. that's ok. move on.
            continue;
        }

        if (pinyin.isEmpty()) {
            // if character is unmapped in idx, do not process.
            continue;
        }

        freq = counts.getValue();

        freqTreeMap.put(String.format("%" + this.PADSIZE_FREQ + "s", freq).replace(' ', '0') + "-" + hanzi + "-"
                + pinyin, hanzi + "," + pinyin + "," + freq);
        processedCount++;
    }

    // outputs
    for (Entry<String, String> outputs : freqTreeMap.entrySet()) {

        csv = this.CRLF + outputs.getValue();
        csvOutput += csv;

        tsv = csv.replaceAll(",", "\t");
        tsvOutput += tsv;

        arr = csv.split(",");

        // arr[0] is hanzi. arr[1] is pinyin. arr[2] is freq.
        txt = padSummary(arr[0] + " [" + arr[1] + "]", this.PADSIZE_SUMMARY + 1) + arr[2];
        txtOutput += txt;
    }

    // cleanup
    csvOutput = csvOutput.trim();
    tsvOutput = tsvOutput.trim();
    txtOutput = txtOutput.trim();

    // post-process
    this.csvOutput = csvOutput;
    this.tsvOutput = tsvOutput;
    this.txtOutput = txtOutput;

    // counts
    this.inputCount = inputCount;
    this.removedCount = removedCount;
    this.hanziCount = hanziCount;
    this.uniqueHanziCount = uniqueHanziCount;
    this.processedCount = processedCount;

    this.unmappedCharacters = unmappedCharacters;

    // summary
    String summaryString = "";

    summaryString += padSummary(this.MSG_TOTAL_COUNT, this.PADSIZE_SUMMARY) + inputCount;
    summaryString += this.CRLF + padSummary(this.MSG_REMOVED_COUNT, this.PADSIZE_SUMMARY) + removedCount;
    summaryString += this.CRLF + padSummary(this.MSG_HANZI_COUNT, this.PADSIZE_SUMMARY) + hanziCount;
    summaryString += this.CRLF + padSummary(this.MSG_UNIQUE_COUNT, this.PADSIZE_SUMMARY) + uniqueHanziCount;
    summaryString += this.CRLF + padSummary(this.MSG_PROCESSED_COUNT, this.PADSIZE_SUMMARY) + processedCount;

    this.summary = summaryString;
}

From source file:org.fao.geonet.component.csw.GetRecords.java

/**
 * If the request contains a Query element, it must have attribute typeNames.
 *
 * The OGC 07-045 spec is more restrictive than OGC 07-006.
 *
 * OGC 07-006 10.8.4.8: The typeNames parameter is a list of one or more names of queryable
 * entities in the catalogue's information model that may be constrained in the predicate of the
 * query. In the case of XML realization of the OGC core metadata properties (Subclause 10.2.5),
 * the element csw:Record is the only queryable entity. Other information models may include
 * more than one queryable component. For example, queryable components for the XML realization
 * of the ebRIM include rim:Service, rim:ExtrinsicObject and rim:Association. In such cases the
 * application profile shall describe how multiple typeNames values should be processed. In
 * addition, all or some of the these queryable entity names may be specified in the query to
 * define which metadata record elements the query should present in the response to the
 * GetRecords operation.//from  www .  j a  va  2  s.  c om
 *
 * OGC 07-045 8.2.2.1.1: Mandatory: Must support *one* of csw:Record or
 * gmd:MD_Metadata in a query. Default value is csw:Record.
 *
 * (note how OGC 07-045 mixes up a mandatory parameter that has a default value !!)
 *
 * We'll go for the default value option rather than the mandatory-ness. So: if typeNames is not
 * present or empty, "csw:Record" is used.
 *
 * If the request does not contain exactly one (or comma-separated, both) of the values
 * specified in OGC 07-045, an exception is thrown. If both are present "gmd:MD_Metadata" is
 * preferred.
 *
 * @param query    query element
 * @param isStrict enable strict error message to comply with GDI-DE Testsuite test
 *                 csw:InterfaceBindings.GetRecords-InvalidRequest
 * @return typeName
 * @throws MissingParameterValueEx if typeNames is missing
 * @throws InvalidParameterValueEx if typeNames does not have one of the mandated values
 */
private String checkTypenames(Element query, boolean isStrict)
        throws MissingParameterValueEx, InvalidParameterValueEx {
    if (Log.isDebugEnabled(Geonet.CSW_SEARCH)) {
        Log.debug(Geonet.CSW_SEARCH, "checking typenames in query:\n" + Xml.getString(query));
    }
    //
    // get the prefix used for CSW namespace used in this input document
    //
    String cswPrefix = getPrefixForNamespace(query, Csw.NAMESPACE_CSW);
    if (cswPrefix == null) {
        if (Log.isDebugEnabled(Geonet.CSW_SEARCH)) {
            Log.debug(Geonet.CSW_SEARCH,
                    "checktypenames: csw prefix not found, using " + Csw.NAMESPACE_CSW.getPrefix());
        }
        cswPrefix = Csw.NAMESPACE_CSW.getPrefix();
    }
    //
    // get the prefix used for GMD namespace used in this input document
    //
    String gmdPrefix = getPrefixForNamespace(query, Csw.NAMESPACE_GMD);
    if (gmdPrefix == null) {
        if (Log.isDebugEnabled(Geonet.CSW_SEARCH)) {
            Log.debug(Geonet.CSW_SEARCH,
                    "checktypenames: gmd prefix not found, using " + Csw.NAMESPACE_GMD.getPrefix());
        }
        gmdPrefix = Csw.NAMESPACE_GMD.getPrefix();
    }
    if (Log.isDebugEnabled(Geonet.CSW_SEARCH)) {
        Log.debug(Geonet.CSW_SEARCH,
                "checktypenames: csw prefix set to " + cswPrefix + ", gmd prefix set to " + gmdPrefix);
    }

    Attribute typeNames = query.getAttribute("typeNames", query.getNamespace());
    typeNames = query.getAttribute("typeNames");
    if (typeNames != null) {
        String typeNamesValue = typeNames.getValue();
        // empty typenames element
        if (StringUtils.isEmpty(typeNamesValue)) {
            return cswPrefix + ":Record";
        }
        // not empty: scan space-separated string
        @SuppressWarnings("resource")
        Scanner spaceScanner = new Scanner(typeNamesValue);
        spaceScanner.useDelimiter(" ");
        String result = cswPrefix + ":Record";
        while (spaceScanner.hasNext()) {
            String typeName = spaceScanner.next();
            typeName = typeName.trim();
            if (Log.isDebugEnabled(Geonet.CSW_SEARCH)) {
                Log.debug(Geonet.CSW_SEARCH, "checking typename in query:" + typeName);
            }

            if (!_schemaManager.getListOfTypeNames().contains(typeName)) {
                throw new InvalidParameterValueEx("typeNames",
                        String.format("'%s' typename is not valid. Supported values are: %s", typeName,
                                _schemaManager.getListOfTypeNames()));
            }
            if (typeName.equals(gmdPrefix + ":MD_Metadata")) {
                return typeName;
            }
        }
        return result;
    }
    // missing typeNames element
    else {
        if (isStrict) {
            //Mandatory check if strict.
            throw new MissingParameterValueEx("typeNames", String.format(
                    "Attribute 'typeNames' is missing. Supported values are: %s. Default is csw:Record according to OGC 07-045.",
                    _schemaManager.getListOfTypeNames()));
        } else {
            //Return default value according to OGC 07-045.
            return cswPrefix + ":Record";
        }
    }
}

From source file:ca.weblite.jdeploy.JDeploy.java

private void updatePackageJson(String commandName) throws IOException {
    File candidate = findBestCandidate();
    if (commandName == null) {
        if (candidate == null) {

        } else if (candidate.getName().endsWith(".jar") || candidate.getName().endsWith(".war")) {
            commandName = candidate.getName().substring(0, candidate.getName().lastIndexOf(".")).toLowerCase();
        } else {/*from   ww w. j  av  a  2s  . co  m*/
            commandName = candidate.getName().toLowerCase();
        }
    }
    File packageJson = new File(directory, "package.json");
    System.err.println("A package.json file already exists.  Updating mandatory fields...");
    JSONParser p = new JSONParser();
    String str = FileUtils.readFileToString(packageJson, "UTF-8");
    Map pj = (Map) p.parseJSON(new StringReader(str));
    if (!pj.containsKey("bin")) {
        pj.put("bin", new HashMap());
    }
    Map bin = (Map) pj.get("bin");
    if (bin.isEmpty()) {
        bin.put(commandName, getBinDir() + "/jdeploy.js");
    }

    if (!pj.containsKey("dependencies")) {
        pj.put("dependencies", new HashMap());
    }
    Map deps = (Map) pj.get("dependencies");
    deps.put("shelljs", "^0.7.5");

    if (!pj.containsKey("jdeploy")) {
        pj.put("jdeploy", new HashMap());
    }
    Map jdeploy = (Map) pj.get("jdeploy");
    if (candidate != null && !jdeploy.containsKey("war") && !jdeploy.containsKey("jar")) {
        if (candidate.getName().endsWith(".jar")) {
            jdeploy.put("jar", getRelativePath(candidate));
        } else {
            jdeploy.put("war", getRelativePath(candidate));
        }
    }

    String jsonStr = Result.fromContent(pj).toString();
    System.out.println("Updating your package.json file as follows:\n ");
    System.out.println(jsonStr);
    System.out.println("");
    System.out.print("Proceed? (y/N)");
    Scanner reader = new Scanner(System.in);
    String response = reader.next();
    if ("y".equals(response.toLowerCase())) {
        System.out.println("Writing package.json...");
        FileUtils.writeStringToFile(packageJson, jsonStr, "UTF-8");
        System.out.println("Complete!");
    } else {
        System.out.println("Cancelled");
    }
}

From source file:ca.weblite.jdeploy.JDeploy.java

private void init(String commandName) throws IOException {
    commandName = directory.getAbsoluteFile().getName().toLowerCase();
    if (".".equals(commandName)) {
        commandName = directory.getAbsoluteFile().getParentFile().getName().toLowerCase();
    }/*from   w w  w. jav  a  2 s .c o  m*/
    File packageJson = new File(directory, "package.json");
    if (packageJson.exists()) {
        updatePackageJson(commandName);
    } else {
        File candidate = findBestCandidate();
        if (commandName == null) {

            /*
            if (candidate == null) {
            commandName = directory.getAbsoluteFile().getName().toLowerCase();
            if (".".equals(commandName)) {
                commandName = directory.getAbsoluteFile().getParentFile().getName().toLowerCase();
            }
            } else if (candidate.getName().endsWith(".jar") || candidate.getName().endsWith(".war")) {
            commandName = candidate.getName().substring(0, candidate.getName().lastIndexOf(".")).toLowerCase();
            } else {
            commandName = candidate.getName().toLowerCase();
            }*/
        }

        Map m = new HashMap(); // for package.json
        m.put("name", commandName);
        m.put("version", "1.0.0");
        m.put("repository", "");
        m.put("description", "");
        m.put("main", "index.js");
        Map bin = new HashMap();
        bin.put(commandName, getBinDir() + "/jdeploy.js");
        m.put("bin", bin);
        m.put("preferGlobal", true);
        m.put("author", "");

        Map scripts = new HashMap();
        scripts.put("test", "echo \"Error: no test specified\" && exit 1");

        m.put("scripts", scripts);
        m.put("license", "ISC");

        Map dependencies = new HashMap();
        dependencies.put("shelljs", "^0.7.5");
        m.put("dependencies", dependencies);

        List files = new ArrayList();
        files.add("jdeploy-bundle");

        m.put("files", files);

        Map jdeploy = new HashMap();
        if (candidate == null) {
        } else if (candidate.getName().endsWith(".jar")) {
            jdeploy.put("jar", getRelativePath(candidate));
        } else {
            jdeploy.put("war", getRelativePath(candidate));
        }

        m.put("jdeploy", jdeploy);

        Result res = Result.fromContent(m);
        String jsonStr = res.toString();
        System.out.println("Creating your package.json file with following content:\n ");
        System.out.println(jsonStr);
        System.out.println("");
        System.out.print("Proceed? (y/N)");
        Scanner reader = new Scanner(System.in);
        String response = reader.next();
        if ("y".equals(response.toLowerCase().trim())) {
            System.out.println("Writing package.json...");
            FileUtils.writeStringToFile(packageJson, jsonStr, "UTF-8");
            System.out.println("Complete!");
        } else {
            System.out.println("Cancelled");
        }
    }
}

From source file:org.olat.core.util.vfs.version.VersionsFileManager.java

private boolean isVersionsXmlFile(VFSLeaf fVersions) {
    if (fVersions == null || !fVersions.exists()) {
        return false;
    }/*from  ww  w . jav  a  2 s  .  c  om*/
    InputStream in = fVersions.getInputStream();
    if (in == null) {
        return false;
    }

    Scanner scanner = new Scanner(in);
    scanner.useDelimiter(TAG_PATTERN);

    boolean foundVersionsTag = false;
    while (scanner.hasNext()) {
        String tag = scanner.next();
        if ("versions".equals(tag)) {
            foundVersionsTag = true;
            break;
        }
    }

    scanner.close();
    IOUtils.closeQuietly(in);
    return foundVersionsTag;
}