List of usage examples for org.apache.commons.io FileUtils readLines
public static List readLines(File file) throws IOException
From source file:hu.bme.mit.trainbenchmark.generator.sql.SqlSerializer.java
@Override public void persistModel() throws IOException, InterruptedException { final String footerFilePath = gc.getConfigBase().getWorkspaceDir() + SQL_METAMODEL_DIR + "railway-footer.sql"; final File footerFile = new File(footerFilePath); final List<String> lines = FileUtils.readLines(footerFile); for (final String line : lines) { write(line);//from w ww . ja v a 2s . co m } writer.close(); compact(); }
From source file:de.tudarmstadt.ukp.dkpro.tc.mallet.report.MalletBatchCrossValidationReport.java
@Override public void execute() throws Exception { StorageService store = getContext().getStorageService(); FlexTable<String> table = FlexTable.forClass(String.class); Map<String, List<Double>> key2resultValues = new HashMap<String, List<Double>>(); for (TaskContextMetadata subcontext : getSubtasks()) { String name = BatchTask.class.getSimpleName() + "CrossValidation"; // one CV batch (which internally ran numFolds times) if (subcontext.getLabel().startsWith(name)) { Map<String, String> discriminatorsMap = store .retrieveBinary(subcontext.getId(), Task.DISCRIMINATORS_KEY, new PropertiesAdapter()) .getMap();// w w w .j ava2 s . co m File eval = store.getStorageFolder(subcontext.getId(), EVAL_FILE_NAME + SUFFIX_CSV); Map<String, String> resultMap = new HashMap<String, String>(); String[][] evalMatrix = null; int i = 0; for (String line : FileUtils.readLines(eval)) { String[] tokenizedLine = StrTokenizer.getCSVInstance(line).getTokenArray(); if (evalMatrix == null) { evalMatrix = new String[FileUtils.readLines(eval).size()][tokenizedLine.length]; } evalMatrix[i] = tokenizedLine; i++; } // columns for (int j = 0; j < evalMatrix[0].length; j++) { String header = evalMatrix[0][j]; String[] vals = new String[evalMatrix.length - 1]; // rows for (int k = 1; k < evalMatrix.length; k++) { if (evalMatrix[k][j].equals("null")) { vals[k - 1] = String.valueOf(0.); } else { vals[k - 1] = evalMatrix[k][j]; } } Mean mean = new Mean(); Sum sum = new Sum(); StandardDeviation std = new StandardDeviation(); double[] dVals = new double[vals.length]; Set<String> sVals = new HashSet<String>(); for (int k = 0; k < vals.length; k++) { try { dVals[k] = Double.parseDouble(vals[k]); sVals = null; } catch (NumberFormatException e) { dVals = null; sVals.add(vals[k]); } } if (dVals != null) { if (nonAveragedResultsMeasures.contains(header)) { resultMap.put(header, String.valueOf(sum.evaluate(dVals))); } else { resultMap.put(header, String.valueOf(mean.evaluate(dVals)) + "\u00B1" + String.valueOf(std.evaluate(dVals))); } } else { if (sVals.size() > 1) { resultMap.put(header, "---"); } else { resultMap.put(header, vals[0]); } } } String key = getKey(discriminatorsMap); List<Double> results; if (key2resultValues.get(key) == null) { results = new ArrayList<Double>(); } else { results = key2resultValues.get(key); } key2resultValues.put(key, results); Map<String, String> values = new HashMap<String, String>(); Map<String, String> cleanedDiscriminatorsMap = new HashMap<String, String>(); for (String disc : discriminatorsMap.keySet()) { if (!ReportUtils.containsExcludePattern(disc, discriminatorsToExclude)) { cleanedDiscriminatorsMap.put(disc, discriminatorsMap.get(disc)); } } values.putAll(cleanedDiscriminatorsMap); values.putAll(resultMap); table.addRow(subcontext.getLabel(), values); } } getContext().getLoggingService().message(getContextLabel(), ReportUtils.getPerformanceOverview(table)); // Excel cannot cope with more than 255 columns if (table.getColumnIds().length <= 255) { getContext().storeBinary(EVAL_FILE_NAME + "_compact" + SUFFIX_EXCEL, table.getExcelWriter()); } getContext().storeBinary(EVAL_FILE_NAME + "_compact" + SUFFIX_CSV, table.getCsvWriter()); table.setCompact(false); // Excel cannot cope with more than 255 columns if (table.getColumnIds().length <= 255) { getContext().storeBinary(EVAL_FILE_NAME + SUFFIX_EXCEL, table.getExcelWriter()); } getContext().storeBinary(EVAL_FILE_NAME + SUFFIX_CSV, table.getCsvWriter()); // output the location of the batch evaluation folder // otherwise it might be hard for novice users to locate this File dummyFolder = store.getStorageFolder(getContext().getId(), "dummy"); // TODO can we also do this without creating and deleting the dummy folder? getContext().getLoggingService().message(getContextLabel(), "Storing detailed results in:\n" + dummyFolder.getParent() + "\n"); dummyFolder.delete(); }
From source file:de.tudarmstadt.ukp.dkpro.tc.ml.uima.TcAnnotatorDocument.java
private List<String> initFeatureExtractors() throws Exception { List<String> featureExtractors = new ArrayList<>(); List<String> featureConfiguration = FileUtils .readLines(new File(tcModelLocation, MODEL_FEATURE_EXTRACTORS)); for (String featureExtractor : featureConfiguration) { featureExtractors.add(featureExtractor); }/*from w w w . j a v a 2 s.c om*/ return featureExtractors; }
From source file:edu.isistan.carcha.lsa.TraceabilityComparator.java
/** * Run./* w ww . ja v a2 s .c o m*/ * * @param args the args * @throws IOException Signals that an I/O exception has occurred. */ private void run(String[] args) throws IOException { //read the requirement concerns and the architectural concerns List<String> reqConcerns = FileUtils.readLines(new File(args[2])); List<String> archConcerns = FileUtils.readLines(new File(args[3])); //where we will put the results String outputDirectory = args[4]; double[] thresholds = { 0.70, 0.75, 0.80, 0.90 }; //check if the space is a file o a directory //directory: run the traceability for each file in the directory //file: run the traceability File sspaceInput = new File(args[1]); List<File> filesToAnalyze = new ArrayList<File>(); if (sspaceInput.isDirectory()) { for (File sspaceFile : sspaceInput.listFiles()) { if (sspaceFile.isFile()) { filesToAnalyze.add(sspaceFile); } } } else { filesToAnalyze.add(sspaceInput); } for (File sspaceFile : filesToAnalyze) { //build the document vector to compare sentences StaticSemanticSpace sspace = new StaticSemanticSpace(sspaceFile); DocumentVectorBuilder builder = new DocumentVectorBuilder(sspace); this.setOutputDirectory(new File(outputDirectory + File.separator + sspaceFile.getName())); for (Double threshold : thresholds) { this.setSspaceFileName(sspaceFile.getName()); this.setThreshold(threshold); this.run(builder, reqConcerns, archConcerns); } } }
From source file:de.tudarmstadt.ukp.dkpro.tc.crfsuite.CRFSuiteBatchCrossValidationReport.java
@Override public void execute() throws Exception { StorageService store = getContext().getStorageService(); FlexTable<String> table = FlexTable.forClass(String.class); Map<String, List<Double>> key2resultValues = new HashMap<String, List<Double>>(); for (TaskContextMetadata subcontext : getSubtasks()) { String name = ExperimentCrossValidation.class.getSimpleName(); // one CV batch (which internally ran numFolds times) if (subcontext.getLabel().startsWith(name)) { Map<String, String> discriminatorsMap = store .retrieveBinary(subcontext.getId(), Task.DISCRIMINATORS_KEY, new PropertiesAdapter()) .getMap();//from w w w . java2s. c o m File eval = store.getStorageFolder(subcontext.getId(), EVAL_FILE_NAME + SUFFIX_CSV); Map<String, String> resultMap = new HashMap<String, String>(); String[][] evalMatrix = null; int i = 0; for (String line : FileUtils.readLines(eval)) { String[] tokenizedLine = StrTokenizer.getCSVInstance(line).getTokenArray(); if (evalMatrix == null) { evalMatrix = new String[FileUtils.readLines(eval).size()][tokenizedLine.length]; } evalMatrix[i] = tokenizedLine; i++; } // columns for (int j = 0; j < evalMatrix[0].length; j++) { String header = evalMatrix[0][j]; String[] vals = new String[evalMatrix.length - 1]; // rows for (int k = 1; k < evalMatrix.length; k++) { if (evalMatrix[k][j].equals("null")) { vals[k - 1] = String.valueOf(0.); } else { vals[k - 1] = evalMatrix[k][j]; } } Mean mean = new Mean(); Sum sum = new Sum(); StandardDeviation std = new StandardDeviation(); double[] dVals = new double[vals.length]; Set<String> sVals = new HashSet<String>(); for (int k = 0; k < vals.length; k++) { try { dVals[k] = Double.parseDouble(vals[k]); sVals = null; } catch (NumberFormatException e) { dVals = null; sVals.add(vals[k]); } } if (dVals != null) { if (nonAveragedResultsMeasures.contains(header)) { resultMap.put(header + foldSum, String.valueOf(sum.evaluate(dVals))); } else { resultMap.put(header + foldAveraged, String.valueOf( mean.evaluate(dVals) + "\u00B1" + String.valueOf(std.evaluate(dVals)))); } } else { if (sVals.size() > 1) { resultMap.put(header, "---"); } else { resultMap.put(header, vals[0]); } } } String key = getKey(discriminatorsMap); List<Double> results; if (key2resultValues.get(key) == null) { results = new ArrayList<Double>(); } else { results = key2resultValues.get(key); } key2resultValues.put(key, results); Map<String, String> values = new HashMap<String, String>(); Map<String, String> cleanedDiscriminatorsMap = new HashMap<String, String>(); for (String disc : discriminatorsMap.keySet()) { if (!ReportUtils.containsExcludePattern(disc, discriminatorsToExclude)) { cleanedDiscriminatorsMap.put(disc, discriminatorsMap.get(disc)); } } values.putAll(cleanedDiscriminatorsMap); values.putAll(resultMap); table.addRow(subcontext.getLabel(), values); } } getContext().getLoggingService().message(getContextLabel(), ReportUtils.getPerformanceOverview(table)); // Excel cannot cope with more than 255 columns if (table.getColumnIds().length <= 255) { getContext().storeBinary(EVAL_FILE_NAME + "_compact" + SUFFIX_EXCEL, table.getExcelWriter()); } getContext().storeBinary(EVAL_FILE_NAME + "_compact" + SUFFIX_CSV, table.getCsvWriter()); table.setCompact(false); // Excel cannot cope with more than 255 columns if (table.getColumnIds().length <= 255) { getContext().storeBinary(EVAL_FILE_NAME + SUFFIX_EXCEL, table.getExcelWriter()); } getContext().storeBinary(EVAL_FILE_NAME + SUFFIX_CSV, table.getCsvWriter()); // output the location of the batch evaluation folder // otherwise it might be hard for novice users to locate this File dummyFolder = store.getStorageFolder(getContext().getId(), "dummy"); // TODO can we also do this without creating and deleting the dummy folder? getContext().getLoggingService().message(getContextLabel(), "Storing detailed results in:\n" + dummyFolder.getParent() + "\n"); dummyFolder.delete(); }
From source file:dk.nsi.haiba.lprimporter.testdata.SQLStatementsFromCSVFiles.java
private void generateProceduresData() throws IOException { File file = FileUtils.toFile(getClass().getClassLoader().getResource("data/T_PROCEDURER.csv")); boolean first = true; List<String> lines = FileUtils.readLines(file); for (String line : lines) { if (first) { // first row is column metadata first = false;/*from w w w .ja v a 2 s.com*/ continue; } String[] splits = line.split(","); String recnum = splits[0]; String code = splits[1]; String odate = splits[2]; String otime = splits[3]; if (otime.length() == 0) { otime = "0"; } String tillaeg = splits[4]; String sygehus = splits[5]; String afdeling = splits[6]; String type = "A"; StringBuffer sql = new StringBuffer(); sql.append( "INSERT INTO T_PROCEDURER (V_RECNUM, C_OPR, C_TILOPR, C_OPRART, D_ODTO, V_OTIME, C_OSGH, C_OAFD) VALUES ("); sql.append(recnum); sql.append(", '"); sql.append(code); sql.append("', '"); sql.append(tillaeg); sql.append("', '"); sql.append(type); sql.append("', '"); sql.append(odate); sql.append("', "); sql.append(otime); sql.append(", '"); sql.append(sygehus); sql.append("', '"); sql.append(afdeling); sql.append("');"); System.out.println(sql.toString()); } }
From source file:de.unisb.cs.st.javalanche.mutation.runtime.testDriver.junit.Junit4MutationTestDriver.java
private void removeExludedTests() { String excludes = ConfigurationLocator.getJavalancheConfiguration().getExcludedTests(); if (excludes != null) { List<String> excludeList = new ArrayList<String>(); if (excludes.startsWith("file://")) { String fileName = excludes.substring(7); try { excludeList = FileUtils.readLines(new File(fileName)); } catch (IOException e) { e.printStackTrace();//from w w w .j a v a2 s . c o m } } else { String[] split = excludes.split(":"); excludeList = Arrays.asList(split); } for (String string : excludeList) { if (allTests.containsKey(string)) { allTests.remove(string); } } } }
From source file:biz.gabrys.lesscss.extended.compiler.storage.DataStorageImpl.java
/** * {@inheritDoc}//w ww. jav a 2s . c o m * @throws DataStorageException if an I/O error occurred. * @since 1.0 */ public List<String> getLines(final String fileName) { synchronized (mutex) { final File cache = getFile(fileName); if (cache == null) { return null; } try { return FileUtils.readLines(cache); } catch (final IOException e) { throw new DataStorageException(e); } } }
From source file:de.tudarmstadt.ukp.similarity.experiments.coling2012.util.ColingUtils.java
@SuppressWarnings("unchecked") public static List<String> readGoldstandard(Dataset dataset) throws IOException { List<String> gold = new ArrayList<String>(); if (dataset.equals(Dataset.MeterCorpus)) { List<String> originalLines = FileUtils.readLines(getGoldstandard(dataset)); originalLines.remove(0); // remove header List<String> order = FileUtils .readLines(new File(UTILS_DIR + "/doc-order/" + dataset.toString() + ".txt")); // Process documents in the correct order for (String line : order) { String[] linesplit = line.split("\t"); String docID = linesplit[0].substring(linesplit[0].indexOf("/newspapers/") + 1) + ".txt"; // Look up document in the original gold standard file for (String origLine : originalLines) { String[] origLineSplit = origLine.split("\t"); if (origLineSplit[0].equals(docID)) { gold.add(origLineSplit[4]); break; }/*from w w w. j a v a 2 s. co m*/ } } } else if (dataset.equals(Dataset.WikipediaRewriteCorpus)) { List<String> originalLines = FileUtils.readLines(getGoldstandard(dataset)); originalLines.remove(0); // remove header List<String> order = FileUtils .readLines(new File(UTILS_DIR + "/doc-order/" + dataset.toString() + ".txt")); // Process documents in the correct order for (String line : order) { String[] linesplit = line.split("\t"); String docID = linesplit[0] + ".txt"; // Look up document in the original gold standard file for (String origLine : originalLines) { String[] origLineSplit = origLine.split("\t"); if (origLineSplit[0].equals(docID)) { gold.add(origLineSplit[4]); break; } } } } else if (dataset.equals(Dataset.WebisCrowdParaphraseCorpus)) { gold = FileUtils.readLines(getGoldstandard(dataset)); } return gold; }
From source file:edu.cmu.cs.lti.discoursedb.io.bazaar.converter.BazaarConverter.java
private void convert(String messageFileDir, String roomFileDir, String agentname) throws ParseException, IOException { Map<String, String> roomIdNameMap = new HashMap<>(); List<String> messages = new ArrayList<>(); //Read input file and preprocess String lineFragment = null;//from ww w. j a va2 s. co m for (String line : FileUtils.readLines(new File(messageFileDir))) { //line fragments occur in case we have line feeds in a column if (lineFragment != null) { line = lineFragment + line; lineFragment = null; } if (line.endsWith("\\") || line.endsWith("\\\r\f")) { line = line.replaceAll("\\\r\f", ""); lineFragment = line; } else { if (line.contains("\\\"We're Ready\\\"")) { line = line.replaceAll("\"We're Ready\\\\\"", "We're Ready\\\\"); } if (line.contains("\\\"ready\\\"")) { line = line.replaceAll("\\\\\"ready\\\\\"", "\\\\ready\\\\"); } if (line.contains("\\\"" + agentname + "\\\"")) { line = line.replaceAll("\\\\\"" + agentname + "\\\\\"", "\\\\" + agentname + "\\\\"); } messages.add(line); } } // Phase 1: read through input room file once and map all entities try (InputStream in = new FileInputStream(roomFileDir)) { CsvMapper mapper = new CsvMapper(); CsvSchema schema = mapper.schemaFor(Room.class).withColumnSeparator(','); MappingIterator<Room> rIter = mapper.readerFor(Room.class).with(schema).readValues(in); while (rIter.hasNextValue()) { Room r = rIter.next(); if (!roomIdNameMap.containsKey(r.getId())) roomIdNameMap.put(r.getId(), r.getName()); converterService.mapRoom(r, dataSetName, discourseName); } } catch (IOException e) { log.error("Error reading room file", e); } // Phase 2: read through input message file and map relationships between room and message CsvMapper mapper = new CsvMapper(); CsvSchema schema = mapper.schemaFor(Message.class).withColumnSeparator(','); for (String message : messages) { Message m = mapper.readerFor(Message.class).with(schema).readValue(message); if (m.getType().equals("text") || m.getType().equals("image") || m.getType().equals("private")) { converterService.mapMessage(m, dataSetName, discourseName, roomIdNameMap); } else { converterService.mapInteraction(m, dataSetName, discourseName, roomIdNameMap); } } }