Example usage for java.util Scanner hasNextLine

List of usage examples for java.util Scanner hasNextLine

Introduction

In this page you can find the example usage for java.util Scanner hasNextLine.

Prototype

public boolean hasNextLine() 

Source Link

Document

Returns true if there is another line in the input of this scanner.

Usage

From source file:org.sasabus.export2Freegis.network.SubscriptionManager.java

public void unsubscribe() throws IOException {
    Scanner sc = new Scanner(new File(UNSUBSCRIPTIONFILE));
    String subscriptionstring = "";
    while (sc.hasNextLine()) {
        subscriptionstring += sc.nextLine();
    }/*from  w w  w .  j  ava  2 s .co m*/
    sc.close();
    SimpleDateFormat date_date = new SimpleDateFormat("yyyy-MM-dd");
    SimpleDateFormat date_time = new SimpleDateFormat("HH:mm:ssZ");

    Date d = new Date();
    String timestamp = date_date.format(d) + "T" + date_time.format(d);
    timestamp = timestamp.substring(0, timestamp.length() - 2) + ":"
            + timestamp.substring(timestamp.length() - 2);

    subscriptionstring = subscriptionstring.replaceAll(":timestamp", timestamp);

    String requestString = "http://" + this.address + ":" + this.portnumber_sender
            + "/TmEvNotificationService/gms/subscription.xml";

    HttpPost subrequest = new HttpPost(requestString);

    StringEntity requestEntity = new StringEntity(subscriptionstring,
            ContentType.create("text/xml", "ISO-8859-1"));

    CloseableHttpClient httpClient = HttpClients.createDefault();

    subrequest.setEntity(requestEntity);

    CloseableHttpResponse response = httpClient.execute(subrequest);

    if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
        try {
            System.out.println("Stauts Response: " + response.getStatusLine().getStatusCode());
            System.out.println("Status Phrase: " + response.getStatusLine().getReasonPhrase());
            HttpEntity responseEntity = response.getEntity();
            if (responseEntity != null) {
                String responsebody = EntityUtils.toString(responseEntity);
                System.out.println(responsebody);
            }
        } finally {
            response.close();
            httpClient.close();
        }
    }
}

From source file:org.apache.mahout.df.mapreduce.TestForest.java

private void testFile(Path inPath, Path outPath, DataConverter converter, DecisionForest forest,
        Dataset dataset, ResultAnalyzer analyzer, Random rng) throws IOException {
    // create the predictions file
    FSDataOutputStream ofile = null;//from   ww w .  ja v a 2  s .  c  om

    if (outPath != null) {
        ofile = outFS.create(outPath);
    }

    FSDataInputStream input = dataFS.open(inPath);
    Scanner scanner = new Scanner(input);

    while (scanner.hasNextLine()) {
        String line = scanner.nextLine();
        if (line.isEmpty()) {
            continue; // skip empty lines
        }

        Instance instance = converter.convert(0, line);
        int prediction = forest.classify(rng, instance);

        if (outputPath != null) {
            ofile.writeChars(Integer.toString(prediction)); // write the prediction
            ofile.writeChar('\n');
        }

        if (analyze) {
            analyzer.addInstance(dataset.getLabel(instance.label),
                    new ClassifierResult(dataset.getLabel(prediction), 1.0));
        }
    }

    scanner.close();
    input.close();
}

From source file:com.joliciel.csvLearner.EventCombinationGenerator.java

public void readDesiredCounts(File file) {
    try {/* w ww . jav  a2  s.c o  m*/
        this.desiredCountPerOutcome = new LinkedHashMap<String, Integer>();
        Scanner scanner = new Scanner(new FileInputStream(file), "UTF-8");
        try {
            while (scanner.hasNextLine()) {
                String line = scanner.nextLine();
                List<String> cells = CSVFormatter.getCSVCells(line);
                String outcome = cells.get(0);
                int count = Integer.parseInt(cells.get(1));
                this.desiredCountPerOutcome.put(outcome, count);
            }
        } finally {
            scanner.close();
        }
    } catch (IOException ioe) {
        throw new RuntimeException(ioe);
    }
}

From source file:edu.lafayette.metadb.web.controlledvocab.UpdateVocab.java

/**
* @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse response)
*///  w w  w .j av  a2  s  . com
@SuppressWarnings("unchecked")
protected void doPost(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    // TODO Auto-generated method stub
    PrintWriter out = response.getWriter();
    String vocabName = null;
    String name = "nothing";
    String status = "Upload failed ";

    try {

        if (ServletFileUpload.isMultipartContent(request)) {
            status = "isMultiPart";
            ServletFileUpload servletFileUpload = new ServletFileUpload(new DiskFileItemFactory());
            List fileItemsList = servletFileUpload.parseRequest(request);
            DiskFileItemFactory diskFileItemFactory = new DiskFileItemFactory();
            diskFileItemFactory.setSizeThreshold(40960); /* the unit is bytes */

            InputStream input = null;
            Iterator it = fileItemsList.iterator();
            String result = "";
            String vocabs = null;
            int assigned = -1;

            while (it.hasNext()) {
                FileItem fileItem = (FileItem) it.next();
                result += "UpdateVocab: Form Field: " + fileItem.isFormField() + " Field name: "
                        + fileItem.getFieldName() + " Name: " + fileItem.getName() + " String: "
                        + fileItem.getString("utf-8") + "\n";
                if (fileItem.isFormField()) {
                    /* The file item contains a simple name-value pair of a form field */
                    if (fileItem.getFieldName().equals("vocab-name"))
                        vocabName = fileItem.getString();
                    else if (fileItem.getFieldName().equals("vocab-terms"))
                        vocabs = fileItem.getString("utf-8");
                    else if (fileItem.getFieldName().equals("assigned-field"))
                        assigned = Integer.parseInt(fileItem.getString());
                } else {
                    if (fileItem.getString() != null && !fileItem.getString().equals("")) {
                        @SuppressWarnings("unused")
                        String content = "nothing";
                        /* The file item contains an uploaded file */

                        /* Create new File object
                        File uploadedFile = new File("test.txt");
                        if(!uploadedFile.exists())
                           uploadedFile.createNewFile();
                        // Write the uploaded file to the system
                        fileItem.write(uploadedFile);
                        */
                        name = fileItem.getName();
                        content = fileItem.getContentType();
                        input = fileItem.getInputStream();
                    }
                }
            }
            //MetaDbHelper.note(result);
            if (vocabName != null) {
                Set<String> vocabList = new TreeSet<String>();
                if (input != null) {
                    Scanner fileSc = new Scanner(input);
                    while (fileSc.hasNextLine()) {
                        String vocabEntry = fileSc.nextLine();
                        vocabList.add(vocabEntry.trim());
                    }

                    HttpSession session = request.getSession(false);
                    if (session != null) {
                        String userName = (String) session.getAttribute("username");
                        SysLogDAO.log(userName, Global.SYSLOG_PROJECT,
                                "User " + userName + " created vocab " + vocabName);
                    }
                    status = "Vocab name: " + vocabName + ". File name: " + name + "\n";

                } else {
                    //MetaDbHelper.note(vocabs);
                    for (String vocabTerm : vocabs.split("\n"))
                        vocabList.add(vocabTerm);
                }
                if (!vocabList.isEmpty()) {
                    boolean updated = ControlledVocabDAO.addControlledVocab(vocabName, vocabList)
                            || ControlledVocabDAO.updateControlledVocab(vocabName, vocabList);
                    if (updated) {
                        status = "Vocab " + vocabName + " updated successfully ";
                        if (assigned != -1)
                            if (!AdminDescAttributesDAO.setControlledVocab(assigned, vocabName)) {
                                status = "Vocab " + vocabName + " cannot be assigned to " + assigned;
                            }

                    } else
                        status = "Vocab " + vocabName + " cannot be updated/created";
                } else
                    status = "Vocab " + vocabName + " has empty vocabList";
            }
        }
    } catch (Exception e) {
        MetaDbHelper.logEvent(e);
    }
    MetaDbHelper.note(status);
    out.print(status);
    out.flush();
}

From source file:org.jsecurity.io.IniResource.java

public void load(Scanner scanner) {

    String currSectionName = null;

    Map<String, String> section = new LinkedHashMap<String, String>();

    while (scanner.hasNextLine()) {

        String line = clean(scanner.nextLine());

        if (line == null || line.startsWith(COMMENT_POUND) || line.startsWith(COMMENT_SEMICOLON)) {
            //skip empty lines and comments:
            continue;
        }/*from  w  ww .j a  v a 2  s.c o m*/

        String sectionName = getSectionName(line.toLowerCase());
        if (sectionName != null) {
            if (!section.isEmpty()) {
                sections.put(currSectionName, section);
            }
            currSectionName = sectionName;
            section = new LinkedHashMap<String, String>();

            if (log.isDebugEnabled()) {
                log.debug("Parsing " + HEADER_PREFIX + currSectionName + HEADER_SUFFIX);
            }
        } else {
            //normal line - split it into Key Value pairs and add it to the section:
            try {
                String[] keyValue = splitKeyValue(line);
                section.put(keyValue[0], keyValue[1]);
            } catch (ParseException e) {
                String msg = "Unable to read key value pair for line [" + line + "].";
                throw new ResourceException(msg, e);
            }
        }
    }

    if (!section.isEmpty()) {
        sections.put(currSectionName, section);
    }
}

From source file:com.wsc.myexample.decisionForest.MyTestForest.java

private void testFile(String inPath, String outPath, DataConverter converter, MyDecisionForest forest,
        Dataset dataset, /*List<double[]> results,*/ Random rng, ResultAnalyzer analyzer) throws IOException {
    // create the predictions file
    DataOutputStream ofile = null;

    if (outPath != null) {
        ofile = new DataOutputStream(new FileOutputStream(outPath));
    }// w  ww . j a va 2  s  . c  o  m

    DataInputStream input = new DataInputStream(new FileInputStream(inPath));
    try {
        Scanner scanner = new Scanner(input);

        while (scanner.hasNextLine()) {
            String line = scanner.nextLine();
            if (line.isEmpty()) {
                continue; // skip empty lines
            }

            Instance instance = converter.convert(line);
            if (instance == null)
                continue;

            double prediction = forest.classify(dataset, rng, instance);

            if (ofile != null) {
                ofile.writeChars(Double.toString(prediction)); // write the prediction
                ofile.writeChar('\n');
            }

            //        results.add(new double[] {dataset.getLabel(instance), prediction});

            analyzer.addInstance(dataset.getLabelString(dataset.getLabel(instance)),
                    new ClassifierResult(dataset.getLabelString(prediction), 1.0));
        }

        scanner.close();
    } finally {
        Closeables.closeQuietly(input);
        ofile.close();
    }
}

From source file:org.mrgeo.data.accumulo.partitioners.AccumuloMrGeoRangePartitioner.java

private synchronized TileIdWritable[] getCutPoints() throws IOException {
    if (cutPointArray == null) {
        String cutFileName = conf.get(CUTFILE_KEY);
        Path[] cf = DistributedCache.getLocalCacheFiles(conf);

        if (cf != null) {
            for (Path path : cf) {
                if (path.toUri().getPath().endsWith(cutFileName.substring(cutFileName.lastIndexOf('/')))) {
                    TreeSet<Text> cutPoints = new TreeSet<Text>();
                    Scanner in = new Scanner(new BufferedReader(new FileReader(path.toString())));
                    try {
                        while (in.hasNextLine())
                            cutPoints.add(new Text(Base64.decodeBase64(in.nextLine().getBytes())));
                    } finally {
                        in.close();/*from ww  w. jav  a2s  . c om*/
                    }
                    cutPointArray = cutPoints.toArray(new Text[cutPoints.size()]);
                    break;
                }
            }
        }
        if (cutPointArray == null)
            throw new FileNotFoundException(cutFileName + " not found in distributed cache");
    }
    tileIdPointArray = new TileIdWritable[cutPointArray.length];
    for (int x = 0; x < cutPointArray.length; x++) {
        byte[] b = cutPointArray[x].getBytes();
        ByteBuffer buffer = ByteBuffer.wrap(b);
        long k = buffer.getLong();
        tileIdPointArray[x] = new TileIdWritable(k);
    }

    return tileIdPointArray;
}

From source file:com.bealearts.template.SimpleTemplate.java

/**
 * Load a template file//from  ww  w . ja v  a2s.co m
 * @throws FileNotFoundException 
 */
public void loadTemplate(File template) throws FileNotFoundException {
    this.blockMap = new HashMap<String, BlockContent>();
    this.blockData = null;

    StringBuilder text = new StringBuilder();
    String NL = System.getProperty("line.separator");
    Scanner scanner = new Scanner(new FileInputStream(template), "UTF-8");
    try {
        while (scanner.hasNextLine()) {
            text.append(scanner.nextLine() + NL);
        }
    } finally {
        scanner.close();
    }

    this.parseTemplate(text.toString());
}

From source file:com.joliciel.talismane.lexicon.LexiconSerializer.java

public void serializeLexicons(String[] args) {
    try {//from  ww  w  .j  ava  2 s  .c  o m
        String lexiconDirPath = "";
        String outDirPath = "";
        String posTagSetPath = "";
        String lexiconPatternPath = "";

        for (String arg : args) {
            int equalsPos = arg.indexOf('=');
            String argName = arg.substring(0, equalsPos);
            String argValue = arg.substring(equalsPos + 1);
            if (argName.equals("lexiconDir"))
                lexiconDirPath = argValue;
            else if (argName.equals("outDir"))
                outDirPath = argValue;
            else if (argName.equals("posTagSet"))
                posTagSetPath = argValue;
            else if (argName.equals("lexiconPattern"))
                lexiconPatternPath = argValue;
            else
                throw new RuntimeException("Unknown argument: " + argName);
        }

        if (lexiconDirPath.length() == 0)
            throw new RuntimeException("Missing argument: lexiconDir");
        if (outDirPath.length() == 0)
            throw new RuntimeException("Missing argument: outDir");
        if (posTagSetPath.length() == 0)
            throw new RuntimeException("Missing argument: posTagSet");
        if (lexiconPatternPath.length() == 0)
            throw new RuntimeException("Missing argument: lexiconPattern");

        File outDir = new File(outDirPath);
        outDir.mkdirs();

        TalismaneServiceLocator talismaneServiceLocator = TalismaneServiceLocator.getInstance();

        PosTaggerService posTaggerService = talismaneServiceLocator.getPosTaggerServiceLocator()
                .getPosTaggerService();
        File posTagSetFile = new File(posTagSetPath);
        PosTagSet posTagSet = posTaggerService.getPosTagSet(posTagSetFile);

        TalismaneSession.setPosTagSet(posTagSet);

        File lexiconDir = new File(lexiconDirPath);
        File[] lexiconFiles = lexiconDir.listFiles();

        File lexiconPatternFile = new File(lexiconPatternPath);
        Scanner lexiconPatternScanner = new Scanner(lexiconPatternFile);
        String regex = null;
        if (lexiconPatternScanner.hasNextLine()) {
            regex = lexiconPatternScanner.nextLine();
        }
        RegexLexicalEntryReader lexicalEntryReader = new RegexLexicalEntryReader(this.getMorphologyReader());
        lexicalEntryReader.setRegex(regex);
        for (File inFile : lexiconFiles) {
            LOG.debug("Serializing: " + inFile.getName());
            LexiconFile lexiconFile = new LexiconFile(lexicalEntryReader, inFile);
            lexiconFile.setPosTagSet(posTagSet);

            FileOutputStream fos = null;
            ObjectOutputStream out = null;
            String fileNameBase = inFile.getName();
            if (fileNameBase.indexOf('.') >= 0) {
                fileNameBase = fileNameBase.substring(0, fileNameBase.lastIndexOf('.'));

                File outFile = new File(outDir, fileNameBase + ".obj");
                try {
                    fos = new FileOutputStream(outFile);
                    out = new ObjectOutputStream(fos);

                    try {
                        out.writeObject(lexiconFile);
                    } finally {
                        out.flush();
                        out.close();
                    }
                } catch (IOException ioe) {
                    throw new RuntimeException(ioe);
                }
            }
        }
    } catch (IOException ioe) {
        LogUtils.logError(LOG, ioe);
        throw new RuntimeException(ioe);
    }
}

From source file:org.apache.nifi.processors.enrich.AbstractEnrichProcessor.java

/**
 * This method returns the parsed record string in the form of
 * a map of two strings, consisting of a iteration aware attribute
 * names and its values/*from   w w  w .j a va2 s . c  om*/
 *
        
 * @param  rawResult the raw query results to be parsed
 * @param queryParser The parsing mechanism being used to parse the data into groups
 * @param queryRegex The regex to be used to split the query results into groups. The regex MUST implement at least on named capture group "KEY" to be used to populate the table rows
 * @param lookupKey The regular expression number or the column of a split to be used for matching
 * @return  Table with attribute names and values where each Table row uses the value of the KEY named capture group specified in @param queryRegex
 */
protected Table<String, String, String> parseBatchResponse(String rawResult, String queryParser,
        String queryRegex, int lookupKey, String schema) {
    // Note the hardcoded record0.
    //  Since iteration is done within the parser and Multimap is used, the record number here will always be 0.
    // Consequentially, 0 is hardcoded so that batched and non batched attributes follow the same naming
    // conventions
    final String recordPosition = ".record0";

    final Table<String, String, String> results = HashBasedTable.create();

    switch (queryParser) {
    case "Split":
        Scanner scanner = new Scanner(rawResult);
        while (scanner.hasNextLine()) {
            String line = scanner.nextLine();
            // Time to Split the results...
            String[] splitResult = line.split(queryRegex);

            for (int r = 0; r < splitResult.length; r++) {
                results.put(splitResult[lookupKey - 1],
                        "enrich." + schema + recordPosition + ".group" + String.valueOf(r), splitResult[r]);
            }
        }
        break;
    case "RegEx":
        // prepare the regex
        Pattern p;
        // Regex is multiline. Each line should include a KEY for lookup
        p = Pattern.compile(queryRegex, Pattern.MULTILINE);

        Matcher matcher = p.matcher(rawResult);
        while (matcher.find()) {
            try {
                // Note that RegEx matches capture group 0 is usually broad but starting with it anyway
                // for the sake of purity
                for (int r = 0; r <= matcher.groupCount(); r++) {
                    results.put(matcher.group(lookupKey),
                            "enrich." + schema + recordPosition + ".group" + String.valueOf(r),
                            matcher.group(r));
                }
            } catch (IndexOutOfBoundsException e) {
                getLogger().warn(
                        "Could not find capture group {} while processing result. You may want to review your "
                                + "Regular Expression to match against the content \"{}\"",
                        new Object[] { lookupKey, rawResult });
            }
        }
        break;
    }

    return results;
}