Example usage for org.apache.commons.io IOUtils readLines

List of usage examples for org.apache.commons.io IOUtils readLines

Introduction

In this page you can find the example usage for org.apache.commons.io IOUtils readLines.

Prototype

public static List readLines(Reader input) throws IOException 

Source Link

Document

Get the contents of a Reader as a list of Strings, one entry per line.

Usage

From source file:com.adobe.acs.commons.redirectmaps.models.RedirectMapModel.java

public List<MapEntry> getEntries() throws IOException {
    log.trace("getEntries");

    List<MapEntry> entries = new ArrayList<MapEntry>();
    InputStream is = redirectMap.adaptTo(InputStream.class);
    for (String line : IOUtils.readLines(is)) {
        MapEntry entry = toEntry(line);/*  w  ww.  ja  v  a  2  s  . co m*/
        if (entry != null) {
            entries.add(entry);
        }
    }

    if (redirects != null) {
        redirects.forEach(r -> entries.addAll(gatherEntries(r)));
    } else {
        log.debug("No redirect configurations specified");
    }

    Map<String, Integer> sources = new HashMap<String, Integer>();

    for (MapEntry entry : entries) {
        if (!sources.containsKey(entry.getSource())) {
            sources.put(entry.getSource(), 1);
        } else {
            log.trace("Found duplicate entry for {}", entry.getSource());
            sources.put(entry.getSource(), sources.get(entry.getSource()) + 1);
        }
    }
    sources.entrySet().removeIf(e -> e.getValue() <= 1);
    log.debug("Found {} duplicate entries", sources.keySet().size());

    entries.stream().filter(e -> sources.containsKey(e.getSource())).forEach(e -> {
        e.setValid(false);
        e.setStatus("Duplicate entry for " + e.getSource() + ", found redirect to " + e.getTarget());
    });

    return entries;
}

From source file:ml.shifu.shifu.core.dvarsel.wrapper.ValidationConductorTest.java

public void testPartershipModel() throws IOException {
    ModelConfig modelConfig = CommonUtils.loadModelConfig(
            "/Users/zhanhu/temp/partnership_varselect/ModelConfig.json", RawSourceData.SourceType.LOCAL);
    List<ColumnConfig> columnConfigList = CommonUtils.loadColumnConfigList(
            "/Users/zhanhu/temp/partnership_varselect/ColumnConfig.json", RawSourceData.SourceType.LOCAL);

    List<Integer> columnIdList = new ArrayList<Integer>();
    boolean hasCandidates = CommonUtils.hasCandidateColumns(columnConfigList);
    for (ColumnConfig columnConfig : columnConfigList) {
        if (CommonUtils.isGoodCandidate(columnConfig, hasCandidates)) {
            columnIdList.add(columnConfig.getColumnNum());
        }/*from   w  ww . ja v a 2 s.co m*/
    }

    TrainingDataSet trainingDataSet = new TrainingDataSet(columnIdList);
    List<String> recordsList = IOUtils
            .readLines(new FileInputStream("/Users/zhanhu/temp/partnership_varselect/part-m-00479"));
    for (String record : recordsList) {
        addNormalizedRecordIntoTrainDataSet(modelConfig, columnConfigList, trainingDataSet, record);
    }

    Set<Integer> workingList = new HashSet<Integer>();
    for (Integer columnId : trainingDataSet.getDataColumnIdList()) {
        workingList.clear();
        workingList.add(columnId);
        ValidationConductor conductor = new ValidationConductor(modelConfig, columnConfigList, workingList,
                trainingDataSet);

        double error = conductor.runValidate();
        System.out.println("The error is - " + error + ", for columnId - " + columnId);
    }
}

From source file:net.lyonlancer5.mcmp.karasu.util.ModFileUtils.java

public void doHashCheck(File jarFile) {
    if (!hasInitialized)
        initialize();//from   w  w  w.j a  v  a 2  s . c o m

    if (doHashCheck) {
        LOGGER.info("Mod source file located at " + jarFile.getPath());
        if (jarFile.isFile() && !jarFile.getName().endsWith("bin")) {
            try {
                List<String> lines = IOUtils.readLines(new FileInputStream(remoteHashes));
                for (String s : lines) {
                    //version:jarName:md5:sha1:sha256:sha512
                    String[] params = s.split(":");
                    if (params[0].equals(Constants.VERSION)) {
                        if (!params[1].equals(jarFile.getName()))
                            LOGGER.warn("JAR filename has been changed!");

                        FileInputStream fis = new FileInputStream(jarFile);
                        String var0 = DigestUtils.md5Hex(fis);
                        String var1 = DigestUtils.sha1Hex(fis);
                        String var2 = DigestUtils.sha256Hex(fis);
                        String var3 = DigestUtils.sha512Hex(fis);

                        assert ((params[2].equals(var0)) && (params[3].equals(var1)) && (params[4].equals(var2))
                                && (params[5].equals(var3))) : new ValidationError(
                                        "Mod integrity check FAILED: mismatched hashes (Has the mod file been edited?)");

                        LOGGER.info("Validation success!");
                        return;
                    }
                }
            } catch (IOException e) {
                throw new ValidationError("Validation FAILED - I/O error", e);
            }
        } else {
            isDevEnv = true;
            LOGGER.warn(
                    "The mod is currently running on a development environment - Integrity checking will not proceed");
        }
    } else {
        LOGGER.warn("#########################################################################");
        LOGGER.warn("WARNING: Integrity checks have been DISABLED!");
        LOGGER.warn("Hash checks will not be performed - this mod may not run as intended");
        LOGGER.warn("Any changes made to this mod will not be validated, whether it came from");
        LOGGER.warn("a legitimate source or an attempt to insert code into this modification");
        LOGGER.warn("#########################################################################");
    }
}

From source file:edu.cmu.cs.lti.ark.fn.data.prep.ParsePreparation.java

/**
 * @param file Path to the file// w  w  w.  j  a va2  s  .  c  o m
 * @return List of all lines from the given file
 */
public static List<String> readLines(String file) throws IOException {
    return IOUtils.readLines(new BufferedReader(new FileReader(file)));
}

From source file:de.tudarmstadt.ukp.experiments.argumentation.sequence.evaluation.helpers.FinalTableExtractor.java

public static void extractCVResults(String inFile, String outFile) throws IOException {
    File file = new File(inFile);

    Table<String, String, String> table = TreeBasedTable.create();

    List<String> lines = IOUtils.readLines(new FileInputStream(file));
    Iterator<String> iterator = lines.iterator();
    while (iterator.hasNext()) {
        String featureSet = iterator.next();

        featureSet = featureSet.replaceAll("^\\./", "");

        System.out.println(featureSet);
        // shorten fs name
        featureSet = featureSet.replaceAll("/.*", "");

        String[] fsSettings = featureSet.split("_", 6);
        String clusters = fsSettings[4];

        if (includeRow(clusters)) {
            table.put(featureSet, "FS", fsSettings[1]);
            table.put(featureSet, "Clusters", clusters);
        }//w  ww  .  j  a va  2  s . c o  m

        // 12 lines with results
        for (int i = 0; i < 12; i++) {
            String line = iterator.next();
            String[] split = line.split("\\s+");
            String measure = split[0];
            Double value = Double.valueOf(split[1]);

            if (includeRow(clusters)) {
                table.put(featureSet, "Clusters", clusters);
                if (includeColumn(measure)) {
                    table.put(featureSet, measure, String.format(Locale.ENGLISH, "%.3f", value));
                }
            }
        }
    }

    //        tableToCsv(table, new FileWriter(outFile));
}

From source file:com.redhat.red.build.koji.it.ImportBuildConnectionStressIT.java

@BeforeClass
public static void loadWords() throws IOException {
    words = IOUtils.readLines(Thread.currentThread().getContextClassLoader().getResourceAsStream("words"));
}

From source file:com.streamsets.pipeline.stage.executor.s3.TestAmazonS3Executor.java

@Test
public void testCreateObject() throws Exception {
    AmazonS3ExecutorConfig config = getConfig();
    config.taskConfig.taskType = TaskType.CREATE_NEW_OBJECT;
    config.taskConfig.content = "${record:value('/content')}";

    AmazonS3Executor executor = new AmazonS3Executor(config);
    TargetRunner runner = new TargetRunner.Builder(AmazonS3DExecutor.class, executor).build();
    runner.runInit();/*from  w ww .j a  v a 2s.  c  o m*/

    try {
        runner.runWrite(ImmutableList.of(getTestRecord()));

        //Make sure the prefix is empty
        ObjectListing objectListing = s3client.listObjects(BUCKET_NAME, objectName);
        Assert.assertEquals(1, objectListing.getObjectSummaries().size());

        S3Object object = s3client.getObject(BUCKET_NAME, objectName);
        S3ObjectInputStream objectContent = object.getObjectContent();

        List<String> stringList = IOUtils.readLines(objectContent);
        Assert.assertEquals(1, stringList.size());
        Assert.assertEquals("Secret", stringList.get(0));

        Assert.assertEquals(1, runner.getEventRecords().size());
        assertEvent(runner.getEventRecords().get(0), objectName);
    } finally {
        runner.runDestroy();
    }
}

From source file:bixo.examples.webmining.WebMiningWorkflow.java

public static void importSeedUrls(Path crawlDbPath, String fileName) throws IOException, InterruptedException {

    SimpleUrlNormalizer normalizer = new SimpleUrlNormalizer();
    JobConf defaultJobConf = HadoopUtils.getDefaultJobConf();

    InputStream is = null;// w  w w  . java  2 s .  com
    TupleEntryCollector writer = null;
    try {
        Tap urlSink = new Hfs(new TextLine(), crawlDbPath.toString(), true);
        writer = urlSink.openForWrite(defaultJobConf);

        is = WebMiningWorkflow.class.getResourceAsStream(fileName);
        if (is == null) {
            throw new FileNotFoundException("The seed urls file doesn't exist");
        }

        List<String> lines = IOUtils.readLines(is);
        for (String line : lines) {
            line = line.trim();
            if (line.startsWith("#")) {
                continue;
            }

            CrawlDbDatum datum = new CrawlDbDatum(normalizer.normalize(line), 0, UrlStatus.UNFETCHED, 0.0f,
                    0.0f);
            writer.add(datum.getTuple());
        }

        writer.close();
    } catch (IOException e) {
        HadoopUtils.safeRemove(crawlDbPath.getFileSystem(defaultJobConf), crawlDbPath);
        throw e;
    } finally {
        IoUtils.safeClose(is);
        if (writer != null) {
            writer.close();
        }
    }

}

From source file:com.linkedin.pinot.perf.QueryRunner.java

/**
 * Use multiple threads to run queries as fast as possible.
 *
 * Start {numThreads} worker threads to send queries (blocking call) back to back, and use the main thread to collect
 * the statistic information and log them periodically.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.//from  ww  w .  j av a 2 s .  c  om
 * @param numThreads number of threads sending queries.
 * @throws Exception
 */
@SuppressWarnings("InfiniteLoopStatement")
public static void multiThreadedsQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        final int numThreads) throws Exception {
    final long randomSeed = 123456789L;
    final Random random = new Random(randomSeed);
    final int reportIntervalMillis = 3000;

    final List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    final int numQueries = queries.size();
    final PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

    final DescriptiveStatistics stats = new DescriptiveStatistics();
    final CountDownLatch latch = new CountDownLatch(numThreads);

    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                for (int j = 0; j < numQueries; j++) {
                    String query = queries.get(random.nextInt(numQueries));
                    long startTime = System.currentTimeMillis();
                    try {
                        driver.postQuery(query);
                        long clientTime = System.currentTimeMillis() - startTime;
                        synchronized (stats) {
                            stats.addValue(clientTime);
                        }

                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(clientTime);
                    } catch (Exception e) {
                        LOGGER.error("Caught exception while running query: {}", query, e);
                        return;
                    }
                }
                latch.countDown();
            }
        });
    }

    executorService.shutdown();

    int iter = 0;
    long startTime = System.currentTimeMillis();
    while (latch.getCount() > 0) {
        Thread.sleep(reportIntervalMillis);
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.get();
        double avgResponseTime = ((double) totalResponseTime.get()) / count;
        LOGGER.info("Time Passed: {}s, Query Executed: {}, QPS: {}, Avg Response Time: {}ms", timePassedSeconds,
                count, count / timePassedSeconds, avgResponseTime);

        iter++;
        if (iter % 10 == 0) {
            printStats(stats);
        }
    }

    printStats(stats);
}

From source file:com.datatorrent.lib.io.HttpInputOperator.java

private boolean processBytes(byte[] bytes) throws IOException, JSONException {
    StringBuilder chunkStr = new StringBuilder();
    // hack: when line is a number we skip to next object instead of using it to read length chunk bytes
    List<String> lines = IOUtils.readLines(new ByteArrayInputStream(bytes));
    boolean endStream = false;
    int currentChunkLength = 0;
    for (String line : lines) {
        try {/*from   w  w w  .ja v  a  2  s.com*/
            int nextLength = Integer.parseInt(line);
            if (nextLength == 0) {
                endStream = true;
                break;
            }
            // switch to next chunk
            processChunk(chunkStr, currentChunkLength);
            currentChunkLength = nextLength;

            //LOG.debug("chunk length: " + line);
        } catch (NumberFormatException e) {
            // add to chunk
            chunkStr.append(line);
            chunkStr.append("\n");
        }
    }
    // process current chunk, if any
    processChunk(chunkStr, currentChunkLength);
    return endStream;
}