Example usage for java.nio.file Files newBufferedWriter

List of usage examples for java.nio.file Files newBufferedWriter

Introduction

In this page you can find the example usage for java.nio.file Files newBufferedWriter.

Prototype

public static BufferedWriter newBufferedWriter(Path path, OpenOption... options) throws IOException 

Source Link

Document

Opens or creates a file for writing, returning a BufferedWriter to write text to the file in an efficient manner.

Usage

From source file:ca.polymtl.dorsal.libdelorean.statedump.Statedump.java

/**
 * Save this statedump at the given location.
 *
 * @param parentPath/*  w w  w .  ja va2s .  c  o  m*/
 *            The location where to save the statedump file, usually in or
 *            close to its corresponding trace. It will be put under a Trace
 *            Compass-specific sub-directory.
 * @param ssid
 *            The state system ID of the state system we are saving. This
 *            will be used for restoration.
 * @throws IOException
 *             If there are problems creating or writing to the target
 *             directory
 */
public void dumpState(Path parentPath, String ssid) throws IOException {
    /* Create directory if it does not exist */
    Path sdPath = parentPath.resolve(STATEDUMP_DIRECTORY);
    if (!Files.exists(sdPath)) {
        Files.createDirectory(sdPath);
    }

    /* Create state dump file */
    String fileName = ssid + FILE_SUFFIX;
    Path filePath = sdPath.resolve(fileName);
    if (Files.exists(filePath)) {
        Files.delete(filePath);
    }
    Files.createFile(filePath);

    JSONObject root = new JSONObject();

    try (Writer bw = Files.newBufferedWriter(filePath, Charsets.UTF_8)) {
        /* Create the root object */
        root.put(Serialization.FORMAT_VERSION_KEY, STATEDUMP_FORMAT_VERSION);
        root.put(Serialization.ID_KEY, ssid);
        root.put(Serialization.STATEDUMP_VERSION_KEY, getVersion());

        /* Create the root state node */
        JSONObject rootNode = new JSONObject();
        rootNode.put(Serialization.CHILDREN_KEY, new JSONObject());
        root.put(Serialization.STATE_KEY, rootNode);

        /* Insert all the paths, types, and values */
        for (int i = 0; i < getAttributes().size(); i++) {
            String[] attribute = getAttributes().get(i);
            StateValue sv = getStates().get(i);

            Serialization.insertFrom(rootNode, attribute, 0, sv);
        }

        bw.write(root.toString(2));

    } catch (JSONException e) {
        /*
         * This should never happen. Any JSON exception means that there's a
         * bug in this code.
         */
        throw new IllegalStateException(e);
    }
}

From source file:org.elasticsearch.plugins.PluginManagerIT.java

private void writeSha1(Path file, boolean corrupt) throws IOException {
    String sha1Hex = MessageDigests.toHexString(MessageDigests.sha1().digest(Files.readAllBytes(file)));
    try (BufferedWriter out = Files.newBufferedWriter(file.resolveSibling(file.getFileName() + ".sha1"),
            StandardCharsets.UTF_8)) {
        out.write(sha1Hex);/*from w  w  w.  ja  v  a  2s  .co m*/
        if (corrupt) {
            out.write("bad");
        }
    }
}

From source file:com.vaushell.superpipes.tools.scribe.OAuthClient.java

private static void saveToken(final Token accessToken, final Path path) throws IOException {
    if (path == null) {
        throw new IllegalArgumentException();
    }// www .  j  ava 2 s . c  o  m

    if (LOGGER.isTraceEnabled()) {
        LOGGER.trace("[" + OAuthClient.class.getSimpleName() + "] saveToken() : accessToken=" + accessToken
                + " / path=" + path);
    }

    if (accessToken == null) {
        return;
    }

    if (Files.notExists(path.getParent())) {
        Files.createDirectories(path.getParent());
    }

    try (final BufferedWriter bfr = Files.newBufferedWriter(path, Charset.forName("utf-8"))) {
        bfr.write(accessToken.getToken());
        bfr.newLine();

        bfr.write(accessToken.getSecret());
        bfr.newLine();

        if (accessToken.getRawResponse() != null) {
            bfr.write(accessToken.getRawResponse());
            bfr.newLine();
        }
    }
}

From source file:eu.itesla_project.modules.validation.OfflineValidationTool.java

private static void writeComparisonFiles(Set<RuleId> rulesIds,
        Map<String, Map<RuleId, ValidationStatus>> statusPerRulePerCase, Path outputDir) throws IOException {
    for (RuleId ruleId : rulesIds) {
        Path comparisonFile = outputDir.resolve("comparison_" + ruleId.toString() + ".csv");

        System.out.println("writing " + comparisonFile + "...");

        try (BufferedWriter writer = Files.newBufferedWriter(comparisonFile, StandardCharsets.UTF_8)) {
            writer.write("base case");
            writer.write(CSV_SEPARATOR);
            writer.write("simulation");
            writer.write(CSV_SEPARATOR);
            writer.write("rule");
            writer.newLine();/*from ww  w .ja v  a2  s. co  m*/

            for (Map.Entry<String, Map<RuleId, ValidationStatus>> e : statusPerRulePerCase.entrySet()) {
                String baseCaseName = e.getKey();
                Map<RuleId, ValidationStatus> statusPerRule = e.getValue();
                writer.write(baseCaseName);
                ValidationStatus status = statusPerRule.get(ruleId);
                if (status == null) {
                    status = new ValidationStatus(null, null);
                }
                writer.write(CSV_SEPARATOR);
                writer.write(status.isSimulationOkToStr());
                writer.write(CSV_SEPARATOR);
                writer.write(status.isRuleOkToStr());
                writer.newLine();
            }
        }
    }
}

From source file:io.seqware.pipeline.plugins.FileProvenanceQueryTool.java

@Override
public ReturnValue do_run() {
    Path randomTempDirectory = null;
    Path originalReport = null;/*w ww .ja va2s  .c  om*/
    Path bulkImportFile = null;
    try {
        if (options.has(this.inFileSpec)) {
            originalReport = FileSystems.getDefault().getPath(options.valueOf(inFileSpec));
        } else {
            originalReport = populateOriginalReportFromWS();
        }

        List<String> headers;
        List<Boolean> numericDataType;
        // construct column name and datatypes
        // convert file provenance report into derby bulk load format
        try (BufferedReader originalReader = Files.newBufferedReader(originalReport,
                Charset.defaultCharset())) {
            // construct column name and datatypes
            String headerLine = originalReader.readLine();
            headers = Lists.newArrayList();
            numericDataType = Lists.newArrayList();
            for (String column : headerLine.split("\t")) {
                String editedColumnName = StringUtils.lowerCase(column).replaceAll(" ", "_").replaceAll("-",
                        "_");
                headers.add(editedColumnName);
                // note that Parent Sample SWID is a silly column that has colons in it
                numericDataType.add(
                        !editedColumnName.contains("parent_sample") && (editedColumnName.contains("swid")));
            }
            bulkImportFile = Files.createTempFile("import", "txt");
            try (BufferedWriter derbyImportWriter = Files.newBufferedWriter(bulkImportFile,
                    Charset.defaultCharset())) {
                Log.debug("Bulk import file written to " + bulkImportFile.toString());
                while (originalReader.ready()) {
                    String line = originalReader.readLine();
                    StringBuilder builder = new StringBuilder();
                    int i = 0;
                    for (String colValue : line.split("\t")) {
                        if (i != 0) {
                            builder.append("\t");
                        }
                        if (numericDataType.get(i)) {
                            if (!colValue.trim().isEmpty()) {
                                builder.append(colValue);
                            }
                        } else {
                            // assume that this is a string
                            // need to double quotes to preserve them, see
                            // https://db.apache.org/derby/docs/10.4/tools/ctoolsimportdefaultformat.html
                            builder.append("\"").append(colValue.replaceAll("\"", "\"\"")).append("\"");
                        }
                        i++;
                    }
                    derbyImportWriter.write(builder.toString());
                    derbyImportWriter.newLine();
                }
            }
        }
        randomTempDirectory = Files.createTempDirectory("randomFileProvenanceQueryDir");

        // try using in-memory for better performance
        String protocol = "jdbc:h2:";
        if (options.has(useH2InMemorySpec)) {
            protocol = protocol + "mem:";
        }
        Connection connection = spinUpEmbeddedDB(randomTempDirectory, "org.h2.Driver", protocol);

        // drop table if it exists already (running in IDE?)
        Statement dropTableStatement = null;
        try {
            dropTableStatement = connection.createStatement();
            dropTableStatement.executeUpdate("DROP TABLE " + TABLE_NAME);
        } catch (SQLException e) {
            Log.debug("Report table didn't exist (normal)");
        } finally {
            DbUtils.closeQuietly(dropTableStatement);
        }

        // create table creation query
        StringBuilder tableCreateBuilder = new StringBuilder();
        // tableCreateBuilder
        tableCreateBuilder.append("CREATE TABLE " + TABLE_NAME + " (");
        for (int i = 0; i < headers.size(); i++) {
            if (i != 0) {
                tableCreateBuilder.append(",");
            }
            if (numericDataType.get(i)) {
                tableCreateBuilder.append(headers.get(i)).append(" INT ");
            } else {
                tableCreateBuilder.append(headers.get(i)).append(" VARCHAR ");
            }
        }
        tableCreateBuilder.append(")");

        bulkImportH2(tableCreateBuilder, connection, bulkImportFile);

        // query the database and dump the results to
        try (BufferedWriter outputWriter = Files.newBufferedWriter(Paths.get(options.valueOf(outFileSpec)),
                Charset.defaultCharset(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)) {
            // query the database and dump the results to
            QueryRunner runner = new QueryRunner();
            List<Map<String, Object>> mapList = runner.query(connection, options.valueOf(querySpec),
                    new MapListHandler());
            // output header
            if (mapList.isEmpty()) {
                Log.fatal("Query had no results");
                System.exit(-1);
            }
            StringBuilder builder = new StringBuilder();
            for (String columnName : mapList.get(0).keySet()) {
                if (builder.length() != 0) {
                    builder.append("\t");
                }
                builder.append(StringUtils.lowerCase(columnName));
            }
            outputWriter.append(builder);
            outputWriter.newLine();
            for (Map<String, Object> rowMap : mapList) {
                StringBuilder rowBuilder = new StringBuilder();
                for (Entry<String, Object> e : rowMap.entrySet()) {
                    if (rowBuilder.length() != 0) {
                        rowBuilder.append("\t");
                    }
                    rowBuilder.append(e.getValue());
                }
                outputWriter.append(rowBuilder);
                outputWriter.newLine();
            }
        }
        DbUtils.closeQuietly(connection);
        Log.stdoutWithTime("Wrote output to " + options.valueOf(outFileSpec));
        return new ReturnValue();
    } catch (IOException | SQLException | ClassNotFoundException | InstantiationException
            | IllegalAccessException ex) {
        throw new RuntimeException(ex);
    } finally {
        if (originalReport != null) {
            FileUtils.deleteQuietly(originalReport.toFile());
        }
        if (bulkImportFile != null) {
            FileUtils.deleteQuietly(bulkImportFile.toFile());
        }
        if (randomTempDirectory != null && randomTempDirectory.toFile().exists()) {
            FileUtils.deleteQuietly(randomTempDirectory.toFile());
        }

    }
}

From source file:io.anserini.search.SearchCollection.java

@SuppressWarnings("unchecked")
public <K> int runTopics() throws IOException {
    IndexSearcher searcher = new IndexSearcher(reader);
    searcher.setSimilarity(similarity);/*w  w  w  . j a  v a  2s  .  c o m*/

    Path topicsFile = Paths.get(args.topics);

    if (!Files.exists(topicsFile) || !Files.isRegularFile(topicsFile) || !Files.isReadable(topicsFile)) {
        throw new IllegalArgumentException(
                "Topics file : " + topicsFile + " does not exist or is not a (readable) file.");
    }

    TopicReader<K> tr;
    SortedMap<K, Map<String, String>> topics;
    try {
        tr = (TopicReader<K>) Class.forName("io.anserini.search.query." + args.topicReader + "TopicReader")
                .getConstructor(Path.class).newInstance(topicsFile);
        topics = tr.read();
    } catch (Exception e) {
        throw new IllegalArgumentException("Unable to load topic reader: " + args.topicReader);
    }

    final String runTag = "Anserini_" + args.topicfield + "_" + (args.keepstop ? "KeepStopwords_" : "")
            + FIELD_BODY + "_" + (args.searchtweets ? "SearchTweets_" : "") + similarity.toString();

    PrintWriter out = new PrintWriter(
            Files.newBufferedWriter(Paths.get(args.output), StandardCharsets.US_ASCII));

    for (Map.Entry<K, Map<String, String>> entry : topics.entrySet()) {
        K qid = entry.getKey();
        String queryString = entry.getValue().get(args.topicfield);

        ScoredDocuments docs;
        if (args.searchtweets) {
            docs = searchTweets(searcher, qid, queryString, Long.parseLong(entry.getValue().get("time")));
        } else {
            docs = search(searcher, qid, queryString);
        }

        /**
         * the first column is the topic number.
         * the second column is currently unused and should always be "Q0".
         * the third column is the official document identifier of the retrieved document.
         * the fourth column is the rank the document is retrieved.
         * the fifth column shows the score (integer or floating point) that generated the ranking.
         * the sixth column is called the "run tag" and should be a unique identifier for your
         */
        for (int i = 0; i < docs.documents.length; i++) {
            out.println(String.format(Locale.US, "%s Q0 %s %d %f %s", qid,
                    docs.documents[i].getField(FIELD_ID).stringValue(), (i + 1), docs.scores[i],
                    ((i == 0 || i == docs.documents.length - 1) ? runTag : "See_Line1")));
        }
    }
    out.flush();
    out.close();

    return topics.size();
}

From source file:org.elasticsearch.plugins.PluginManagerIT.java

private void writeMd5(Path file, boolean corrupt) throws IOException {
    String md5Hex = MessageDigests.toHexString(MessageDigests.md5().digest(Files.readAllBytes(file)));
    try (BufferedWriter out = Files.newBufferedWriter(file.resolveSibling(file.getFileName() + ".md5"),
            StandardCharsets.UTF_8)) {
        out.write(md5Hex);/*www. j  a v a  2  s .com*/
        if (corrupt) {
            out.write("bad");
        }
    }
}

From source file:sadl.run.datagenerators.SmacDataGenerator.java

private void run() throws IOException, InterruptedException {
    if (Files.notExists(outputDir)) {
        Files.createDirectories(outputDir);
    }// w  w  w  .  ja  v a 2s.  c o m
    Files.walk(outputDir).filter(p -> !Files.isDirectory(p)).forEach(p -> {
        try {
            logger.info("Deleting file {}", p);
            Files.delete(p);
        } catch (final Exception e) {
            e.printStackTrace();
        }
    });
    int k = 0;
    final boolean splitTimedEvents = true;
    // parse timed sequences
    TimedInput trainingTimedSequences = TimedInput.parseAlt(Paths.get(dataString), 1);
    if (splitTimedEvents) {
        final ButlaPdtaLearner butla = new ButlaPdtaLearner(10000, EventsCreationStrategy.SplitEvents,
                KDEFormelVariant.OriginalKDE);
        final Pair<TimedInput, Map<String, Event>> p = butla
                .splitEventsInTimedSequences(trainingTimedSequences);
        trainingTimedSequences = p.getKey();
    }
    final Random r = MasterSeed.nextRandom();
    final List<TimedWord> trainSequences = new ArrayList<>();
    final List<TimedWord> testSequences = new ArrayList<>();
    final TauPtaLearner learner = new TauPtaLearner();
    final TauPTA pta = learner.train(trainingTimedSequences);
    final TauPTA typeTwoNormalPta = SerializationUtils.clone(pta);
    final DecimalFormat df = new DecimalFormat("00");
    // final Path p = Paths.get("pta_normal.dot");
    // pta.toGraphvizFile(outputDir.resolve(p), false);
    // final Process ps = Runtime.getRuntime().exec("dot -Tpdf -O " + outputDir.resolve(p));
    // System.out.println(outputDir.resolve(p));
    // ps.waitFor();
    logger.info("Finished TauPTA ({} states) creation.", pta.getStateCount());
    TauPTA currentPta;
    while (k < 54) {
        for (final AnomalyInsertionType type : AnomalyInsertionType.values()) {
            if (type != AnomalyInsertionType.NONE && type != AnomalyInsertionType.ALL) {
                // if (type != AnomalyInsertionType.TYPE_TWO) {
                // continue;
                // }
                if (type == AnomalyInsertionType.TYPE_TWO) {
                    currentPta = SerializationUtils.clone(typeTwoNormalPta);
                    currentPta.setRandom(MasterSeed.nextRandom());
                } else {
                    currentPta = pta;
                }
                trainSequences.clear();
                testSequences.clear();
                final TauPTA anomaly = SerializationUtils.clone(currentPta);
                logger.info("inserting Anomaly Type {}", type);
                anomaly.makeAbnormal(type);
                if (type == AnomalyInsertionType.TYPE_TWO) {
                    anomaly.removeAbnormalSequences(currentPta);
                }
                for (int i = 0; i < TRAIN_SIZE; i++) {
                    trainSequences.add(currentPta.sampleSequence());
                }
                // PTAs of Type 2 and 4 always produce abnormal sequences
                // it is possible to sample abnormal and normal sequences with abnormal ptas of the other types (1,3,5).
                // but I don't know how the distribution is, so to be fair, i sample all anomalies the same
                for (int i = 0; i < TEST_SIZE; i++) {
                    if (r.nextDouble() < ANOMALY_PERCENTAGE) {
                        boolean wasAnormal = false;
                        TimedWord seq = null;
                        while (!wasAnormal) {
                            seq = anomaly.sampleSequence();
                            wasAnormal = seq.isAnomaly();
                        }
                        testSequences.add(seq);
                    } else {
                        testSequences.add(currentPta.sampleSequence());
                    }
                }
                final TimedInput trainset = new TimedInput(trainSequences);
                final TimedInput testset = new TimedInput(testSequences);
                final Path outputFile = outputDir
                        .resolve(Paths.get(df.format(k) + "_smac_type" + type.getTypeIndex() + ".txt"));
                try (BufferedWriter bw = Files.newBufferedWriter(outputFile, StandardCharsets.UTF_8)) {
                    trainset.toFile(bw, true);
                    bw.write('\n');
                    bw.write(TRAIN_TEST_SEP);
                    bw.write('\n');
                    testset.toFile(bw, true);
                }
                logger.info("Wrote file #{} ({})", k, outputFile);
                k++;
            }
        }
    }
}

From source file:popgenutils.dfcp.PrepareVCF4DFCP.java

/**
 * /*from  w ww  .  j a v  a  2 s. c  o m*/
 */
private void filterpop() {
    Set<Integer> indices_to_keep = new HashSet<Integer>();
    Map<String, String> sample_to_pop = new HashMap<String, String>();
    Map<String, String> sample_to_superpop = new HashMap<String, String>();
    Set<String> pops_to_keep = new HashSet<String>();

    for (int i = 0; i < 9; i++) {
        indices_to_keep.add(i);
    }

    String[] popsparts = popstokeep.split(",");
    for (String pop : popsparts) {
        pops_to_keep.add(pop);
    }

    try (BufferedReader in = Files.newBufferedReader(Paths.get(popmappingfile), Charset.forName("UTF-8"))) {
        String line = null;
        while ((line = in.readLine()) != null) {
            String[] parts = line.split("\t");
            sample_to_pop.put(parts[0], parts[1]);
            sample_to_superpop.put(parts[0], parts[2]);
        }
    } catch (IOException e) {
        e.printStackTrace();
    }

    StringBuilder header = new StringBuilder();
    try (BufferedReader in = Files.newBufferedReader(Paths.get(filename), Charset.forName("UTF-8"))) {
        BufferedWriter out = null;

        String line = null;
        while ((line = in.readLine()) != null) {

            if (line.startsWith("#CHROM")) {
                //samples begin at 9
                out = Files.newBufferedWriter(
                        Paths.get(output_dir + "/" + "popfilter_" + Paths.get(filename).getFileName()),
                        Charset.forName("UTF-8"));
                out.write(header.toString());
                String[] parts = line.split("\t");
                for (int i = 9; i < parts.length; i++) {
                    if (pops_to_keep.contains(sample_to_superpop.get(parts[i])))
                        indices_to_keep.add(i);
                }
                out.write(parts[0]);
                for (int i = 1; i < parts.length; i++) {
                    if (indices_to_keep.contains(i))
                        out.write("\t" + parts[i]);
                }
                out.write(System.getProperty("line.separator"));
            } else if (line.startsWith("#")) {
                header.append(line + System.getProperty("line.separator"));
            } else {
                // format at 8
                String[] parts = line.split("\t");
                out.write(parts[0]);
                for (int i = 1; i < parts.length; i++) {
                    if (indices_to_keep.contains(i))
                        out.write("\t" + parts[i]);
                }
                out.write(System.getProperty("line.separator"));

            }
        }
        out.close();
    } catch (IOException e) {
        System.err.println("could not read from file " + filename);
        e.printStackTrace();
    }
}

From source file:sadl.run.datagenerators.SmacDataGeneratorMixed.java

private void run() throws IOException, InterruptedException {
    if (Files.notExists(outputDir)) {
        Files.createDirectories(outputDir);
    }/*from w  w  w  .j a v a  2  s .c o  m*/
    Files.walk(outputDir).filter(p -> !Files.isDirectory(p)).forEach(p -> {
        try {
            logger.info("Deleting file {}", p);
            Files.delete(p);
        } catch (final Exception e) {
            e.printStackTrace();
        }
    });
    logger.info("Starting to learn TauPTA...");
    int k = 0;
    // parse timed sequences
    TimedInput trainingTimedSequences = TimedInput.parseAlt(Paths.get(dataString), 1);
    final boolean splitTimedEvents = true;
    if (splitTimedEvents) {
        final ButlaPdtaLearner butla = new ButlaPdtaLearner(10000, EventsCreationStrategy.SplitEvents,
                KDEFormelVariant.OriginalKDE);
        final Pair<TimedInput, Map<String, Event>> p = butla
                .splitEventsInTimedSequences(trainingTimedSequences);
        trainingTimedSequences = p.getKey();
    }
    final Random r = MasterSeed.nextRandom();
    final List<TimedWord> trainSequences = new ArrayList<>();
    final List<TimedWord> testSequences = new ArrayList<>();
    final TauPtaLearner learner = new TauPtaLearner();
    final TauPTA pta = learner.train(trainingTimedSequences);
    final DecimalFormat df = new DecimalFormat("00");
    // final Path p = Paths.get("pta_normal.dot");
    // pta.toGraphvizFile(outputDir.resolve(p), false);
    // final Process ps = Runtime.getRuntime().exec("dot -Tpdf -O " + outputDir.resolve(p));
    // System.out.println(outputDir.resolve(p));
    // ps.waitFor();
    logger.info("Finished TauPTA creation.");
    logger.info("Before inserting anomalies, normal PTA has {} states and {} transitions", pta.getStateCount(),
            pta.getTransitionCount());
    final List<TauPTA> abnormalPtas = new ArrayList<>();
    for (final AnomalyInsertionType type : AnomalyInsertionType.values()) {
        if (type != AnomalyInsertionType.NONE && type != AnomalyInsertionType.ALL) {
            final TauPTA anomaly = SerializationUtils.clone(pta);
            logger.info("inserting Anomaly Type {}", type);
            anomaly.makeAbnormal(type);
            abnormalPtas.add(anomaly);
            if (type == AnomalyInsertionType.TYPE_TWO) {
                anomaly.removeAbnormalSequences(pta);
            }
            logger.info("After inserting anomaly type {}, normal PTA has {} states and {} transitions", type,
                    pta.getStateCount(), pta.getTransitionCount());

        }
    }
    logger.info("After inserting all anomalies, normal PTA has {} states and {} transitions",
            pta.getStateCount(), pta.getTransitionCount());
    final TObjectIntMap<TauPTA> anomalyOccurences = new TObjectIntHashMap<>();
    final Random anomalyChooser = MasterSeed.nextRandom();
    while (k < SAMPLE_FILES) {
        trainSequences.clear();
        testSequences.clear();
        for (int i = 0; i < TRAIN_SIZE; i++) {
            trainSequences.add(pta.sampleSequence());
        }
        for (int i = 0; i < TEST_SIZE; i++) {
            if (r.nextDouble() < ANOMALY_PERCENTAGE) {
                boolean wasAnormal = false;
                TimedWord seq = null;
                final TauPTA chosen = CollectionUtils.chooseRandomObject(abnormalPtas, anomalyChooser);
                while (!wasAnormal) {
                    seq = chosen.sampleSequence();
                    wasAnormal = seq.isAnomaly();
                }
                anomalyOccurences.adjustOrPutValue(chosen, 1, 1);
                testSequences.add(seq);
            } else {
                testSequences.add(pta.sampleSequence());
            }
        }
        final TimedInput trainset = new TimedInput(trainSequences);
        final TimedInput testset = new TimedInput(testSequences);
        final Path outputFile = outputDir.resolve(Paths.get(df.format(k) + "_smac_mixed.txt"));
        try (BufferedWriter bw = Files.newBufferedWriter(outputFile, StandardCharsets.UTF_8)) {
            trainset.toFile(bw, true);
            bw.write('\n');
            bw.write(TRAIN_TEST_SEP);
            bw.write('\n');
            testset.toFile(bw, true);
        }
        logger.info("Wrote file #{} ({})", k, outputFile);
        k++;
    }
    for (final TauPTA anomaly : anomalyOccurences.keySet()) {
        logger.info("Anomaly {} was chosen {} times", anomaly.getAnomalyType(), anomalyOccurences.get(anomaly));
    }
}