Example usage for java.io BufferedReader ready

List of usage examples for java.io BufferedReader ready

Introduction

In this page you can find the example usage for java.io BufferedReader ready.

Prototype

public boolean ready() throws IOException 

Source Link

Document

Tells whether this stream is ready to be read.

Usage

From source file:edu.ucdenver.ccp.nlp.ae.dict_util.GeneInfoToDictionary.java

public void convert(File geneFile, File outputFile) throws IOException, FileNotFoundException {

    BufferedReader reader = null;
    try {/*from w ww  .jav  a 2  s  . c om*/
        CharsetDecoder csd = Charset.forName("UTF-8").newDecoder().onMalformedInput(CodingErrorAction.REPORT)
                .onUnmappableCharacter(CodingErrorAction.REPORT);
        InputStream ins = new FileInputStream(geneFile);
        reader = new BufferedReader(new InputStreamReader(ins, csd));

        // Create Dictionary
        BufferedWriter writer = new BufferedWriter(
                new OutputStreamWriter(new FileOutputStream(outputFile, true),
                        Charset.forName("UTF-8").newEncoder().onMalformedInput(CodingErrorAction.REPORT)
                                .onUnmappableCharacter(CodingErrorAction.REPORT)));
        writer.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n");
        writer.write("<synonym>\n");

        reader.readLine();
        while (reader.ready()) {
            String line = reader.readLine();
            //Pair<id,name>
            ImmutablePair<String, String> data = parseLine(line);
            String entry = createEntry(data.left, data.right);
            writer.write(entry);
        }

        writer.write("</synonym>\n");
        writer.close();
    } finally {
        try {
            reader.close();
        } catch (Exception x) {
        }
    }
}

From source file:edu.ksu.cis.indus.tools.slicer.SliceXMLizerCLI.java

/**
 * Extracts the FQN of classes that need to retained in the system when optimizing the slice for space.
 * // ww w. j  a  va  2s.  c om
 * @param fileName obviously.
 * @pre fileName != null
 */
private void extractExclusionListForCompaction(final String fileName) {
    retentionList = new ArrayList<String>();

    try {
        final BufferedReader _br = new BufferedReader(new FileReader(new File(fileName)));

        while (_br.ready()) {
            retentionList.add(_br.readLine());
        }
        _br.close();
    } catch (final FileNotFoundException _e) {
        LOGGER.error(
                "File does not exists - " + fileName + ". Hence the slice will not be optimized for space.",
                _e);
        retentionList = null;
    } catch (final IOException _e) {
        LOGGER.error(
                "Error reading the file - " + fileName + ". Hence the slice will not be optimized for space.",
                _e);
        retentionList = null;
    }
}

From source file:ffx.potential.parsers.ForceFieldFilter.java

private void parse(InputStream stream) {
    try {/*from www  . ja  v  a2 s  .co m*/
        BufferedReader br = new BufferedReader(new InputStreamReader(stream));
        while (br.ready()) {
            String input = br.readLine();
            parse(input, br);
        }
    } catch (IOException e) {
        String message = "Error parsing force field parameters.\n";
        logger.log(Level.SEVERE, message, e);
    }
}

From source file:edu.cmu.ark.AnalysisUtilities.java

public ParseResult parseSentence(String sentence) {
    String result = "";
    //System.err.println(sentence);
    //see if a parser socket server is available
    int port = new Integer(GlobalProperties.getProperties().getProperty("parserServerPort", "5556"));
    String host = "127.0.0.1";
    Socket client;//from  w  w  w .ja  v a  2s . co m
    PrintWriter pw;
    BufferedReader br;
    String line;
    Tree parse = null;
    double parseScore = Double.MIN_VALUE;

    try {
        client = new Socket(host, port);

        pw = new PrintWriter(client.getOutputStream());
        br = new BufferedReader(new InputStreamReader(client.getInputStream()));
        pw.println(sentence);
        pw.flush(); //flush to complete the transmission

        while ((line = br.readLine()) != null) {
            //if(!line.matches(".*\\S.*")){
            //        System.out.println();
            //}
            if (br.ready()) {
                line = line.replaceAll("\n", "");
                line = line.replaceAll("\\s+", " ");
                result += line + " ";
            } else {
                parseScore = new Double(line);
            }
        }

        br.close();
        pw.close();
        client.close();

        if (parse == null) {
            parse = readTreeFromString("(ROOT (. .))");
            parseScore = -99999.0;
        }

        if (GlobalProperties.getDebug())
            System.err.println("result (parse):" + result);
        parse = readTreeFromString(result);
        return new ParseResult(true, parse, parseScore);

    } catch (Exception ex) {
        if (GlobalProperties.getDebug())
            System.err.println("Could not connect to parser server.");
        //ex.printStackTrace();
    }

    System.err.println("parsing:" + sentence);

    //if socket server not available, then use a local parser object
    if (parser == null) {
        try {
            Options op = new Options();
            String serializedInputFileOrUrl = GlobalProperties.getProperties().getProperty("parserGrammarFile",
                    "config" + File.separator + "englishFactored.ser.gz");
            parser = new LexicalizedParser(serializedInputFileOrUrl, op);
            int maxLength = new Integer(GlobalProperties.getProperties().getProperty("parserMaxLength", "40"))
                    .intValue();
            parser.setMaxLength(maxLength);
            parser.setOptionFlags("-outputFormat", "oneline");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    try {
        if (parser.parse(sentence)) {
            parse = parser.getBestParse();

            //remove all the parent annotations (this is a hacky way to do it)
            String ps = parse.toString().replaceAll("\\[[^\\]]+/[^\\]]+\\]", "");
            parse = AnalysisUtilities.getInstance().readTreeFromString(ps);

            parseScore = parser.getPCFGScore();
            return new ParseResult(true, parse, parseScore);
        }
    } catch (Exception e) {
    }

    parse = readTreeFromString("(ROOT (. .))");
    parseScore = -99999.0;
    return new ParseResult(false, parse, parseScore);
}

From source file:MedArkRef.AnalysisUtilities.java

public arkref.parsestuff.AnalysisUtilities.ParseResult parseSentence(String sentence) {
    String result = "";
    //System.err.println(sentence);
    //see if a parser socket server is available
    int port = new Integer(GlobalProperties.getProperties().getProperty("parserServerPort", "5556"));
    String host = "127.0.0.1";
    Socket client;//w w  w  .  ja  v a 2 s .  c  o m
    PrintWriter pw;
    BufferedReader br;
    String line;
    Tree parse = null;
    double parseScore = Double.MIN_VALUE;

    try {
        client = new Socket(host, port);

        pw = new PrintWriter(client.getOutputStream());
        br = new BufferedReader(new InputStreamReader(client.getInputStream()));
        pw.println(sentence);
        pw.flush(); //flush to complete the transmission

        while ((line = br.readLine()) != null) {
            //if(!line.matches(".*\\S.*")){
            //        System.out.println();
            //}
            if (br.ready()) {
                line = line.replaceAll("\n", "");
                line = line.replaceAll("\\s+", " ");
                result += line + " ";
            } else {
                parseScore = new Double(line);
            }
        }

        br.close();
        pw.close();
        client.close();

        if (parse == null) {
            parse = readTreeFromString("(ROOT (. .))");
            parseScore = -99999.0;
        }

        if (GlobalProperties.getDebug())
            System.err.println("result (parse):" + result);
        parse = readTreeFromString(result);
        return new arkref.parsestuff.AnalysisUtilities.ParseResult(true, parse, parseScore);

    } catch (Exception ex) {
        if (GlobalProperties.getDebug())
            System.err.println("Could not connect to parser server.");
        //ex.printStackTrace();
    }

    System.err.println("parsing:" + sentence);

    //if socket server not available, then use a local parser object
    if (parser == null) {
        try {
            Options op = new Options();
            String serializedInputFileOrUrl = GlobalProperties.getProperties().getProperty("parserGrammarFile",
                    "config" + File.separator + "englishFactored.ser.gz");
            parser = new LexicalizedParser(serializedInputFileOrUrl, op);
            int maxLength = new Integer(GlobalProperties.getProperties().getProperty("parserMaxLength", "40"))
                    .intValue();
            parser.setMaxLength(maxLength);
            parser.setOptionFlags("-outputFormat", "oneline");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    try {
        if (parser.parse(sentence)) {
            parse = parser.getBestParse();

            //remove all the parent annotations (this is a hacky way to do it)
            String ps = parse.toString().replaceAll("\\[[^\\]]+/[^\\]]+\\]", "");
            parse = AnalysisUtilities.getInstance().readTreeFromString(ps);

            parseScore = parser.getPCFGScore();
            return new arkref.parsestuff.AnalysisUtilities.ParseResult(true, parse, parseScore);
        }
    } catch (Exception e) {
    }

    parse = readTreeFromString("(ROOT (. .))");
    parseScore = -99999.0;
    return new arkref.parsestuff.AnalysisUtilities.ParseResult(false, parse, parseScore);
}

From source file:org.kuali.kfs.module.bc.document.service.impl.BudgetRequestImportServiceImpl.java

/**
 * @see org.kuali.kfs.module.bc.document.service.BudgetRequestImportService#processImportFile(java.io.InputStream, java.lang.String,
 *      java.lang.String, java.lang.String)
 *//*  w w  w .  j av  a 2s  .  co m*/
@Transactional
public List processImportFile(InputStream fileImportStream, String principalId, String fieldSeperator,
        String textDelimiter, String fileType, Integer budgetYear) throws IOException {
    List fileErrorList = new ArrayList();

    deleteBudgetConstructionMoveRecords(principalId);

    BudgetConstructionRequestMove budgetConstructionRequestMove = new BudgetConstructionRequestMove();

    BufferedReader fileReader = new BufferedReader(new InputStreamReader(fileImportStream));
    int currentLine = 1;
    while (fileReader.ready()) {
        String line = StringUtils.strip(fileReader.readLine());
        boolean isAnnualFile = (fileType.equalsIgnoreCase(RequestImportFileType.ANNUAL.toString())) ? true
                : false;

        if (StringUtils.isNotBlank(line)) {
            budgetConstructionRequestMove = ImportRequestFileParsingHelper.parseLine(line, fieldSeperator,
                    textDelimiter, isAnnualFile);

            // check if there were errors parsing the line
            if (budgetConstructionRequestMove == null) {
                fileErrorList.add(BCConstants.REQUEST_IMPORT_FILE_PROCESSING_ERROR_MESSAGE_GENERIC + " "
                        + currentLine + ".");
                // clean out table since file processing has stopped
                deleteBudgetConstructionMoveRecords(principalId);
                return fileErrorList;
            }

            String lineValidationError = validateLine(budgetConstructionRequestMove, currentLine, isAnnualFile);

            if (StringUtils.isNotEmpty(lineValidationError)) {
                fileErrorList.add(lineValidationError);
                // clean out table since file processing has stopped
                deleteBudgetConstructionMoveRecords(principalId);
                return fileErrorList;
            }

            // set default values
            if (StringUtils.isBlank(budgetConstructionRequestMove.getSubAccountNumber())) {
                budgetConstructionRequestMove.setSubAccountNumber(KFSConstants.getDashSubAccountNumber());
            }

            if (StringUtils.isBlank(budgetConstructionRequestMove.getFinancialSubObjectCode())) {
                budgetConstructionRequestMove
                        .setFinancialSubObjectCode(KFSConstants.getDashFinancialSubObjectCode());
            }
            //set object type code
            Collection<String> revenueObjectTypesParamValues = BudgetParameterFinder.getRevenueObjectTypes();
            Collection<String> expenditureObjectTypesParamValues = BudgetParameterFinder
                    .getExpenditureObjectTypes();
            ObjectCode objectCode = getObjectCode(budgetConstructionRequestMove, budgetYear);
            if (objectCode != null) {
                if (expenditureObjectTypesParamValues.contains(objectCode.getFinancialObjectTypeCode())) {
                    budgetConstructionRequestMove
                            .setFinancialObjectTypeCode(objectCode.getFinancialObjectTypeCode());

                    // now using type from object code table
                    //budgetConstructionRequestMove.setFinancialObjectTypeCode(optionsService.getOptions(budgetYear).getFinObjTypeExpenditureexpCd());
                } else if (revenueObjectTypesParamValues.contains(objectCode.getFinancialObjectTypeCode())) {
                    budgetConstructionRequestMove
                            .setFinancialObjectTypeCode(objectCode.getFinancialObjectTypeCode());

                    // now using type from object code table
                    //budgetConstructionRequestMove.setFinancialObjectTypeCode(optionsService.getOptions(budgetYear).getFinObjectTypeIncomecashCode());
                }
            }

            //check for duplicate key exception, since it requires a different error message
            Map searchCriteria = new HashMap();
            searchCriteria.put("principalId", principalId);
            searchCriteria.put("chartOfAccountsCode", budgetConstructionRequestMove.getChartOfAccountsCode());
            searchCriteria.put("accountNumber", budgetConstructionRequestMove.getAccountNumber());
            searchCriteria.put("subAccountNumber", budgetConstructionRequestMove.getSubAccountNumber());
            searchCriteria.put("financialObjectCode", budgetConstructionRequestMove.getFinancialObjectCode());
            searchCriteria.put("financialSubObjectCode",
                    budgetConstructionRequestMove.getFinancialSubObjectCode());
            if (this.businessObjectService.countMatching(BudgetConstructionRequestMove.class,
                    searchCriteria) != 0) {
                LOG.error("Move table store error, import aborted");
                fileErrorList.add(
                        "Duplicate Key for " + budgetConstructionRequestMove.getErrorLinePrefixForLogFile());
                fileErrorList.add("Move table store error, import aborted");
                deleteBudgetConstructionMoveRecords(principalId);

                return fileErrorList;
            }
            try {
                budgetConstructionRequestMove.setPrincipalId(principalId);
                importRequestDao.save(budgetConstructionRequestMove, false);
            } catch (RuntimeException e) {
                LOG.error("Move table store error, import aborted");
                fileErrorList.add("Move table store error, import aborted");
                return fileErrorList;
            }
        }

        currentLine++;
    }

    return fileErrorList;
}

From source file:org.agnitas.service.impl.NewImportWizardServiceImpl.java

public void doParse() throws Exception {
    nearLimit = false;/*from   w  w w . j a  va  2 s.c om*/
    recipientLimitReached = false;
    importLimitReached = false;
    int linesInFile = AgnUtils.getLineCountOfFile(inputFile);

    if (logger.isInfoEnabled())
        logger.info("Import ID: " + importProfile.getImportId() + " Number of recipients in file: "
                + (linesInFile - 1));

    columns = null;
    // check the count of recipients to import
    int importMaxRows = Integer.parseInt(AgnUtils.getDefaultValue("import.maxRows"));
    if (linesInFile > importMaxRows) {
        importLimitReached = true;
        return;
    }

    setCompletedPercent(0);

    int linesRead = 0;
    BufferedReader in = null;
    try {
        in = new BufferedReader(new InputStreamReader(new FileInputStream(inputFile),
                Charset.getValue(importProfile.getCharset())));
        while (in.ready()) {
            invalidRecipients.clear();
            validRecipients.clear();
            rootNode = new RootNode();
            nodeStack.clear();
            nodeStack.push(rootNode);
            push("list");
            int currentRow = 0;

            // Execute the file read in blocks of "blockSize" rows
            String line;
            while (currentRow < blockSize && (line = in.readLine()) != null) {
                String[] row = null;
                try {
                    row = new CsvTokenizer(line, Separator.getValue(importProfile.getSeparator()),
                            TextRecognitionChar.getValue(importProfile.getTextRecognitionChar())).toArray();
                } catch (Exception e) {
                    status.addError(NewImportWizardServiceImpl.STRUCTURE_ERROR);
                }

                // Null indicates that the row should be ignored
                if (row != null) {
                    handleRow(row);
                    currentRow++;
                }
            }

            linesRead += currentRow;
            setCompletedPercent(linesRead * 100 / linesInFile);

            // Show the new progress status after each block
            pop("list");

            doValidate(false);
            rootNode = null;
        }
    } finally {
        IOUtils.closeQuietly(in);
    }
}

From source file:org.trnltk.apps.morphology.contextless.parser.FolderContextlessMorphologicParsingApp.java

@App
public void splitResultFiles() throws IOException {
    // ignore IOExceptions

    final File folder = new File("D:\\devl\\data\\1MSentences");

    final List<File> files = new ArrayList<File>();

    for (File file : folder.listFiles()) {
        if (file.getName().endsWith("_parsed.txt"))
            files.add(file);/*from   w  ww.ja va 2  s  .  co  m*/
    }

    int wordsForEachFile = 30000;

    for (File file : files) {
        int wordCount = 0;
        int fileCount = 0;
        final BufferedReader reader = Files.newReader(file, Charsets.UTF_8);
        BufferedWriter writer = null;
        do {
            final String line = reader.readLine();
            if (line.startsWith("- word:")) {
                if (wordCount % wordsForEachFile == 0) {
                    if (writer != null)
                        writer.close();

                    final String srcFileName = file.getName();
                    final File targetFile = new File(file.getParent() + "\\split",
                            srcFileName + "." + String.format("%03d", fileCount));
                    writer = new BufferedWriter(new FileWriter(targetFile));
                    fileCount++;
                }
                wordCount++;
            }

            writer.write(line + "\n");
        } while (reader.ready());

        if (writer != null)
            writer.close();
    }
}

From source file:org.trnltk.apps.tokenizer.TextTokenizerCorpusApp.java

@App
public void splitCorpusFiles() throws IOException {
    // ignore IOExceptions

    final File folder = new File("D:\\devl\\data\\aakindan");

    final List<File> files = new ArrayList<File>();

    for (File file : folder.listFiles()) {
        if (file.isDirectory())
            continue;
        if (file.getName().endsWith(".txt"))
            files.add(file);/*  ww w  . j  a va  2s .c  o  m*/
    }

    int linesForEachFile = 100000;

    for (File file : files) {
        System.out.println("Processing file " + file);
        int lineCount = 0;
        int fileCount = 0;
        final BufferedReader reader = Files.newReader(file, Charsets.UTF_8);
        BufferedWriter writer = null;
        do {
            final String line = reader.readLine();
            if (lineCount % linesForEachFile == 0) {
                if (writer != null)
                    writer.close();

                final String srcFileName = file.getName();
                final File targetFile = new File(file.getParent() + "\\src_split",
                        srcFileName + "." + String.format("%04d", fileCount));
                writer = new BufferedWriter(new FileWriter(targetFile));
                fileCount++;
                System.out.println("Using new target file " + targetFile);
            }
            lineCount++;

            writer.write(line + "\n");
        } while (reader.ready());

        if (writer != null)
            writer.close();
    }
}

From source file:edu.uci.ics.jung.io.PajekNetReader.java

/**
 * Populates the graph <code>g</code> with the graph represented by the
 * Pajek-format data supplied by <code>reader</code>.  Stores edge weights,
 * if any, according to <code>nev</code> (if non-null).
 * /* w w w. ja va2 s  . c om*/
 * <p>Any existing vertices/edges of <code>g</code>, if any, are unaffected.
 * 
 * <p>The edge data are filtered according to <code>g</code>'s constraints, if any; thus, if 
 * <code>g</code> only accepts directed edges, any undirected edges in the 
 * input are ignored.
 * 
 * @throws IOException
 */
public G load(Reader reader, G g) throws IOException {
    BufferedReader br = new BufferedReader(reader);

    // ignore everything until we see '*Vertices'
    String curLine = skip(br, v_pred);

    if (curLine == null) // no vertices in the graph; return empty graph
        return g;

    // create appropriate number of vertices
    StringTokenizer st = new StringTokenizer(curLine);
    st.nextToken(); // skip past "*vertices";
    int num_vertices = Integer.parseInt(st.nextToken());
    List<V> id = null;
    if (vertex_factory != null) {
        for (int i = 1; i <= num_vertices; i++)
            g.addVertex(vertex_factory.create());
        id = new ArrayList<V>(g.getVertices());
    }

    // read vertices until we see any Pajek format tag ('*...')
    curLine = null;
    while (br.ready()) {
        curLine = br.readLine();
        if (curLine == null || t_pred.evaluate(curLine))
            break;
        if (curLine == "") // skip blank lines
            continue;

        try {
            readVertex(curLine, id, num_vertices);
        } catch (IllegalArgumentException iae) {
            br.close();
            reader.close();
            throw iae;
        }
    }

    // skip over the intermediate stuff (if any) 
    // and read the next arcs/edges section that we find
    curLine = readArcsOrEdges(curLine, br, g, id, edge_factory);

    // ditto
    readArcsOrEdges(curLine, br, g, id, edge_factory);

    br.close();
    reader.close();

    return g;
}