Example usage for java.util HashSet HashSet

List of usage examples for java.util HashSet HashSet

Introduction

In this page you can find the example usage for java.util HashSet HashSet.

Prototype

public HashSet() 

Source Link

Document

Constructs a new, empty set; the backing HashMap instance has default initial capacity (16) and load factor (0.75).

Usage

From source file:com.github.brandtg.switchboard.FileLogAggregator.java

/** Main. */
public static void main(String[] args) throws Exception {
    Options opts = new Options();
    opts.addOption("h", "help", false, "Prints help message");
    opts.addOption("f", "file", true, "File to output aggregated logs to");
    opts.addOption("s", "separator", true, "Line separator in log");
    CommandLine cli = new GnuParser().parse(opts, args);

    if (cli.getArgs().length == 0 || cli.hasOption("help")) {
        new HelpFormatter().printHelp("usage: [opts] sourceHost:port ...", opts);
        System.exit(1);//from w ww  .j a va  2s.co  m
    }

    // Parse sources
    Set<InetSocketAddress> sources = new HashSet<>();
    for (int i = 0; i < cli.getArgs().length; i++) {
        String[] tokens = cli.getArgs()[i].split(":");
        sources.add(new InetSocketAddress(tokens[0], Integer.valueOf(tokens[1])));
    }

    // Parse output stream
    OutputStream outputStream;
    if (cli.hasOption("file")) {
        outputStream = new FileOutputStream(cli.getOptionValue("file"));
    } else {
        outputStream = System.out;
    }

    // Separator
    String separator = cli.getOptionValue("separator", "\n");
    if (separator.length() != 1) {
        throw new IllegalArgumentException("Separator must only be 1 character");
    }

    final FileLogAggregator fileLogAggregator = new FileLogAggregator(sources, separator.charAt(0),
            outputStream);

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                fileLogAggregator.stop();
            } catch (Exception e) {
                LOG.error("Error when stopping log aggregator", e);
            }
        }
    });

    fileLogAggregator.start();
}

From source file:com.datascience.gal.scripts.MainClient.java

public static void main(String[] args) throws ParseException {

    DawidSkeneClient dsclient = new DawidSkeneClient();

    String categoriesfile = null;
    String inputfile = null;//from  w w w  . ja v  a 2 s .c om
    String correctfile = null;
    String costfile = null;
    int iterations = 1;
    boolean incremental = false;

    CommandLineParser parser = new GnuParser();
    CommandLine line = parser.parse(cli_options, args);

    if (!line.hasOption("c") || !line.hasOption("l") || !line.hasOption("g") || !line.hasOption("C")) {
        printHelp();
        System.exit(1);
    }

    categoriesfile = line.getOptionValue("c");
    inputfile = line.getOptionValue("l");

    if (line.hasOption("g"))
        correctfile = line.getOptionValue("g");
    if (line.hasOption("C"))
        costfile = line.getOptionValue("C");
    if (line.hasOption("i"))
        iterations = Integer.parseInt(line.getOptionValue("i"));
    if (line.hasOption("I"))
        incremental = true;

    // see if the thing is awake
    System.out.println(dsclient.isAlive() ? "pinged alive" : "problem with dsclient");
    // hard reset
    System.out.println(dsclient.deleteDS(0) ? "deleted ds 0" : "failed to delete ds 0");

    Collection<Category> categories = loadCategoriesFromFile(categoriesfile);
    if (categories.size() < 2) {
        System.err.println("invalid category input");
        System.exit(1);
    }

    System.out.println("sending to service: " + JSONUtils.toJson(categories));

    System.out.println(
            "response: " + (dsclient.initializeDS(0, categories, incremental) ? "success" : "failure"));

    // check the serialization of the json ds object
    DawidSkene ds = dsclient.getDawidSkene(0);
    System.out.println(ds);

    Collection<MisclassificationCost> costs = loadMisclassificationCosts(costfile);

    System.out.println("response: " + (dsclient.addMisclassificationCosts(0, costs) ? "successfully added costs"
            : "unable to add costs") + " - " + JSONUtils.toJson(costs));

    Collection<AssignedLabel> labels = loadAssignedLabels(inputfile);

    System.out.println("response: "
            + (dsclient.addAssignedLabels(0, labels) ? "successfully added labels" : "unable to add labels")
            + " - " + JSONUtils.toJson(labels));

    Collection<CorrectLabel> gold = loadGoldLabels(correctfile);
    System.out.println("response: "
            + (dsclient.addCorrectLabels(0, gold) ? "successfully added labels" : "unable to add labels")
            + " - " + JSONUtils.toJson(gold));

    Map<String, String> preVotes = dsclient.computeMajorityVotes(0);

    ds = dsclient.getDawidSkene(0);

    System.out.println(ds);
    if (!incremental)
        System.out.println(
                dsclient.iterateBlocking(0, iterations) ? "successfully iterated" : "failed ds iterations");

    ds = dsclient.getDawidSkene(0);
    System.out.println(ds);

    System.out.println(ds.printAllWorkerScores(false));
    System.out.println(ds.printObjectClassProbabilities(0));
    System.out.println(ds.printPriors());

    Map<String, String> postVotes = dsclient.computeMajorityVotes(0);

    System.out.println("note- diff doesnt work with incremental\n" + printDiffVote(preVotes, postVotes));

    System.out.println(JSONUtils.toJson(ds.getMajorityVote()));
    System.out.println(JSONUtils.toJson(ds.getObjectProbs()));

    Collection<String> objectNames = new HashSet<String>();
    String name = null;
    for (AssignedLabel label : labels) {
        objectNames.add(label.getObjectName());
        name = label.getObjectName();
    }

    System.out.println(JSONUtils.toJson(ds.getMajorityVote(objectNames)));
    System.out.println(JSONUtils.toJson(ds.getObjectProbs(objectNames)));

    System.out.println(JSONUtils.toJson(ds.getMajorityVote(name)));
    System.out.println(JSONUtils.toJson(ds.getObjectProbs(name)));

    boolean contains;
    contains = dsclient.hasDSObject(0);
    System.out.println(contains ? "has 0" : "dont have 0");
    contains = dsclient.hasDSObject(1);
    System.out.println(contains ? "has 1" : "dont have 1");
    System.out.println("deleting 0");
    dsclient.deleteDS(0);
    contains = dsclient.hasDSObject(0);
    System.out.println(contains ? "has 0" : "dont have 0");
}

From source file:com.github.fritaly.graphml4j.samples.GradleDependenciesWithGroups.java

public static void main(String[] args) throws Exception {
    if (args.length != 1) {
        System.out/*w w w.j av a  2 s  .com*/
                .println(String.format("%s <output-file>", GradleDependenciesWithGroups.class.getSimpleName()));
        System.exit(1);
    }

    final File file = new File(args[0]);

    System.out.println("Writing GraphML file to " + file.getAbsolutePath() + " ...");

    FileWriter fileWriter = null;
    GraphMLWriter graphWriter = null;
    Reader reader = null;
    LineNumberReader lineReader = null;

    try {
        fileWriter = new FileWriter(file);
        graphWriter = new GraphMLWriter(fileWriter);

        // Customize the rendering of nodes
        final NodeStyle nodeStyle = graphWriter.getNodeStyle();
        nodeStyle.setWidth(250.0f);
        nodeStyle.setHeight(50.0f);

        graphWriter.setNodeStyle(nodeStyle);

        // The dependency graph has been generated by Gradle with the
        // command "gradle dependencies". The output of this command has
        // been saved to a text file which will be parsed to rebuild the
        // dependency graph
        reader = new InputStreamReader(
                GradleDependenciesWithGroups.class.getResourceAsStream("gradle-dependencies.txt"));
        lineReader = new LineNumberReader(reader);

        String line = null;

        // Stack containing the artifacts per depth inside the dependency
        // graph (the topmost dependency is the first one in the stack)
        final Stack<Artifact> stack = new Stack<Artifact>();

        final Map<String, Set<Artifact>> artifactsByGroup = new HashMap<String, Set<Artifact>>();

        // List of parent/child relationships between artifacts
        final List<Relationship> relationships = new ArrayList<Relationship>();

        while ((line = lineReader.readLine()) != null) {
            // Determine the depth of the current dependency inside the
            // graph. The depth can be inferred from the indentation used by
            // Gradle. Each level of depth adds 5 more characters of
            // indentation
            final int initialLength = line.length();

            // Remove the strings used by Gradle to indent dependencies
            line = StringUtils.replace(line, "+--- ", "");
            line = StringUtils.replace(line, "|    ", "");
            line = StringUtils.replace(line, "\\--- ", "");
            line = StringUtils.replace(line, "     ", "");

            // The depth can easily be inferred now
            final int depth = (initialLength - line.length()) / 5;

            // Remove unnecessary artifacts
            while (depth <= stack.size()) {
                stack.pop();
            }

            // Create an artifact from the dependency (group, artifact,
            // version) tuple
            final Artifact artifact = createArtifact(line);

            stack.push(artifact);

            if (stack.size() > 1) {
                // Store the artifact and its parent
                relationships.add(new Relationship(stack.get(stack.size() - 2), artifact));
            }

            if (!artifactsByGroup.containsKey(artifact.group)) {
                artifactsByGroup.put(artifact.group, new HashSet<Artifact>());
            }

            artifactsByGroup.get(artifact.group).add(artifact);
        }

        // Open the graph
        graphWriter.graph();

        final Map<Artifact, String> nodeIdsByArtifact = new HashMap<Artifact, String>();

        // Loop over the groups and generate the associated nodes
        for (String group : artifactsByGroup.keySet()) {
            graphWriter.group(group, true);

            for (Artifact artifact : artifactsByGroup.get(group)) {
                final String nodeId = graphWriter.node(artifact.getLabel());

                nodeIdsByArtifact.put(artifact, nodeId);
            }

            graphWriter.closeGroup();
        }

        // Generate the edges
        for (Relationship relationship : relationships) {
            final String parentId = nodeIdsByArtifact.get(relationship.parent);
            final String childId = nodeIdsByArtifact.get(relationship.child);

            graphWriter.edge(parentId, childId);
        }

        // Close the graph
        graphWriter.closeGraph();

        System.out.println("Done");
    } finally {
        // Calling GraphMLWriter.close() is necessary to dispose the underlying resources
        graphWriter.close();
        fileWriter.close();
        lineReader.close();
        reader.close();
    }
}

From source file:edu.msu.cme.rdp.probematch.cli.SliceToPrimer.java

public static void main(String[] args) throws Exception {
    //args = "--fedit-dist 4 --redit-dist=4 -k --max-length=400 --min-length=280 -o java_sliced_edit4.fasta TGCGAYCCSAARGCBGACTC ATSGCCATCATYTCRCCGGA /scratch/fishjord/tae_kwon_primer_match/all_genomes.fasta".split(" ");
    PatternBitMask64[] fprimers;/*from   w ww  .  j  a  va 2 s . c o m*/
    String[] fprimerStrs, rprimerStrs;
    PatternBitMask64[] rprimers;

    FastaWriter seqOut;
    PrintStream statsOut;

    int fEdit = 3;
    int rEdit = 3;
    int minLength = Integer.MIN_VALUE;
    int maxLength = Integer.MAX_VALUE;
    boolean allowAmbiguities = true;
    boolean keepPrimers = false;

    SequenceReader inSeqs;

    try {

        CommandLine line = new PosixParser().parse(options, args);

        if (line.hasOption("edit-dist")) {
            fEdit = rEdit = Integer.parseInt(line.getOptionValue("edit-dist"));

            if (line.hasOption("redit-dist") || line.hasOption("fedit-dist")) {
                throw new Exception("edit-dist, [fedit-dist, redit-dist] are mutually exclusive");
            }
        }

        if (line.hasOption("fedit-dist")) {
            fEdit = Integer.parseInt(line.getOptionValue("fedit-dist"));
        }

        if (line.hasOption("no-ambiguities")) {
            allowAmbiguities = false;
        }

        if (line.hasOption("keep-primers")) {
            keepPrimers = true;
        }

        if (line.hasOption("redit-dist")) {
            rEdit = Integer.parseInt(line.getOptionValue("redit-dist"));
        }

        if (line.hasOption("seq-out")) {
            seqOut = new FastaWriter(new File(line.getOptionValue("seq-out")));
        } else {
            throw new Exception("Must specify seq-out");
        }

        if (line.hasOption("stats-out")) {
            statsOut = new PrintStream(new File(line.getOptionValue("stats-out")));
        } else {
            statsOut = System.out;
        }

        if (line.hasOption("min-length")) {
            minLength = Integer.parseInt(line.getOptionValue("min-length"));
        }

        if (line.hasOption("max-length")) {
            maxLength = Integer.parseInt(line.getOptionValue("max-length"));
        }

        args = line.getArgs();

        if (args.length != 3) {
            throw new Exception("Unexpected number of command line arguments");
        }

        fprimers = translateStringPrimers(args[0].split(","), allowAmbiguities, false);
        fprimerStrs = args[0].split(",");
        rprimers = translateStringPrimers(args[1].split(","), allowAmbiguities, true);
        rprimerStrs = args[1].split(",");
        inSeqs = new SequenceReader(new File(args[2]));

    } catch (Exception e) {
        new HelpFormatter().printHelp("SliceToPrimer [options] <f,p,r,i,m,e,r> <r,p,r,i,m,e,r> <in_seq_file>",
                options);
        System.err.println("ERROR: " + e.getMessage());
        return;
    }

    Sequence seq;

    statsOut.println(
            "orig_seqid\tsliced_seqid\tfprimer\tstart\tend\tscore\trprimer\tstart\tend\tscore\tlength");

    ScoringMatrix sccoringMatrix = ScoringMatrix.getDefaultNuclMatrix();

    DPMAligner[] faligners = new DPMAligner[fprimers.length];
    for (int index = 0; index < faligners.length; index++) {
        faligners[index] = new DPMAligner(fprimerStrs[index], Integer.MAX_VALUE);
    }

    try {
        while ((seq = inSeqs.readNextSequence()) != null) {
            Set<PrimerMatch> fprimerMatches = new HashSet();
            Set<PrimerMatch> rprimerMatches = new HashSet();

            for (int index = 0; index < fprimers.length; index++) {
                PatternBitMask64 primer = fprimers[index];

                for (BitVector64Match r : BitVector64.process(seq.getSeqString().toCharArray(), primer, fEdit)
                        .getResults()) {
                    PrimerMatch match = new PrimerMatch();
                    match.start = r.getPosition() - (primer.getPatternLength() + r.getScore());
                    match.end = r.getPosition();
                    match.score = r.getScore();
                    match.primerIndex = index;
                    fprimerMatches.add(match);
                }
            }

            for (int index = 0; index < rprimers.length; index++) {
                PatternBitMask64 primer = rprimers[index];

                for (BitVector64Match r : BitVector64.process(seq.getSeqString().toCharArray(), primer, rEdit)
                        .getResults()) {
                    PrimerMatch match = new PrimerMatch();
                    match.start = r.getPosition() - (primer.getPatternLength() + r.getScore());
                    match.end = r.getPosition();
                    match.score = r.getScore();
                    match.primerIndex = index;
                    rprimerMatches.add(match);
                }
            }

            if (fprimerMatches.isEmpty() || rprimerMatches.isEmpty()) {
                statsOut.println(seq.getSeqName() + "\tEither/or no forward/reverse primer hits");
                continue;
            }
            for (PrimerMatch fmatch : fprimerMatches) {
                PrimerMatch bestReverse = null;
                int bestScore = Integer.MAX_VALUE;
                for (PrimerMatch rmatch : rprimerMatches) {
                    if (rmatch.start > fmatch.end && rmatch.start - fmatch.end < bestScore) {
                        bestReverse = rmatch;
                        bestScore = rmatch.start - fmatch.end;
                    }
                }

                if (bestReverse == null) {
                    statsOut.println(seq.getSeqName() + "\tNo reverse primer before " + fmatch.end);
                    continue;
                }

                String slicedSeq = null;
                if (keepPrimers) {
                    slicedSeq = seq.getSeqString().substring(fmatch.start, bestReverse.end);
                } else {
                    slicedSeq = seq.getSeqString().substring(fmatch.end, bestReverse.start);
                }

                String seqid = seq.getSeqName() + "_" + fmatch.primerIndex + "_" + fmatch.start;
                if (slicedSeq.length() > minLength && slicedSeq.length() < maxLength) {
                    seqOut.writeSeq(seqid, "", slicedSeq);
                }

                DPMAlignment seqs = faligners[fmatch.primerIndex]
                        .align(seq.getSeqString().substring(fmatch.start, fmatch.end));
                System.err.println(">" + seqid);
                System.err.println(fprimerStrs[fmatch.primerIndex]);
                System.err.println(seq.getSeqString().substring(fmatch.start, fmatch.end));
                System.err.println();
                System.err.println(seqs.getAlignedMatchFragment());
                System.err.println(seqs.getAlignedProbe());

                System.err.println();

                statsOut.println(seq.getSeqName() + "\t" + seqid + "\t" + fmatch.primerIndex + "\t"
                        + fmatch.start + "\t" + fmatch.end + "\t" + fmatch.score + "\t"
                        + bestReverse.primerIndex + "\t" + bestReverse.start + "\t" + bestReverse.end + "\t"
                        + bestReverse.score + "\t" + slicedSeq.length());
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        statsOut.close();
        seqOut.close();
    }
}

From source file:org.biopax.validator.BiopaxValidatorClient.java

/**
 * Checks BioPAX files using the online BioPAX Validator. 
 * //  w ww  . j  ava  2  s .  c  o  m
 * @see <a href="http://www.biopax.org/validator">BioPAX Validator Webservice</a>
 * 
 * @param argv
 * @throws IOException
 */
public static void main(String[] argv) throws IOException {
    if (argv.length == 0) {
        System.err.println("Available parameters: \n"
                + "<path> <output> [xml|html|biopax] [auto-fix] [only-errors] [maxerrors=n] [notstrict]\n"
                + "\t- validate a BioPAX file/directory (up to ~25MB in total size, -\n"
                + "\totherwise, please use the biopax-validator.jar instead)\n"
                + "\tin the directory using the online BioPAX Validator service\n"
                + "\t(generates html or xml report, or gets the processed biopax\n"
                + "\t(cannot fix all errros though) see http://www.biopax.org/validator)");
        System.exit(-1);
    }

    final String input = argv[0];
    final String output = argv[1];

    File fileOrDir = new File(input);
    if (!fileOrDir.canRead()) {
        System.err.println("Cannot read from " + input);
        System.exit(-1);
    }
    if (output == null || output.isEmpty()) {
        System.err.println("No output file specified (for the validation report).");
        System.exit(-1);
    }

    // default options
    RetFormat outf = RetFormat.HTML;
    boolean fix = false;
    Integer maxErrs = null;
    Behavior level = null; //will report both errors and warnings
    String profile = null;

    // match optional arguments
    for (int i = 2; i < argv.length; i++) {
        if ("html".equalsIgnoreCase(argv[i])) {
            outf = RetFormat.HTML;
        } else if ("xml".equalsIgnoreCase(argv[i])) {
            outf = RetFormat.XML;
        } else if ("biopax".equalsIgnoreCase(argv[i])) {
            outf = RetFormat.OWL;
        } else if ("auto-fix".equalsIgnoreCase(argv[i])) {
            fix = true;
        } else if ("only-errors".equalsIgnoreCase(argv[i])) {
            level = Behavior.ERROR;
        } else if ((argv[i]).toLowerCase().startsWith("maxerrors=")) {
            String num = argv[i].substring(10);
            maxErrs = Integer.valueOf(num);
        } else if ("notstrict".equalsIgnoreCase(argv[i])) {
            profile = "notstrict";
        }
    }

    // collect files
    Collection<File> files = new HashSet<File>();

    if (fileOrDir.isDirectory()) {
        // validate all the OWL files in the folder
        FilenameFilter filter = new FilenameFilter() {
            public boolean accept(File dir, String name) {
                return (name.endsWith(".owl"));
            }
        };

        for (String s : fileOrDir.list(filter)) {
            files.add(new File(fileOrDir.getCanonicalPath() + File.separator + s));
        }
    } else {
        files.add(fileOrDir);
    }

    // upload and validate using the default URL: http://www.biopax.org/biopax-validator/check.html        
    if (!files.isEmpty()) {
        BiopaxValidatorClient val = new BiopaxValidatorClient();
        val.validate(fix, profile, outf, level, maxErrs, null, files.toArray(new File[] {}),
                new FileOutputStream(output));
    }
}

From source file:io.cloudslang.lang.tools.build.SlangBuildMain.java

public static void main(String[] args) {
    loadUserProperties();/* www .ja v  a2s  . co m*/
    configureLog4j();

    ApplicationArgs appArgs = new ApplicationArgs();
    parseArgs(args, appArgs);
    String projectPath = parseProjectPathArg(appArgs);
    final String contentPath = defaultIfEmpty(appArgs.getContentRoot(), projectPath + CONTENT_DIR);
    final String testsPath = defaultIfEmpty(appArgs.getTestRoot(), projectPath + TEST_DIR);
    List<String> testSuites = parseTestSuites(appArgs);
    boolean shouldPrintCoverageData = appArgs.shouldOutputCoverage();
    boolean runTestsInParallel = appArgs.isParallel();
    int threadCount = parseThreadCountArg(appArgs, runTestsInParallel);
    String testCaseTimeout = parseTestTimeout(appArgs);
    setProperty(TEST_CASE_TIMEOUT_IN_MINUTES_KEY, valueOf(testCaseTimeout));
    final boolean shouldValidateDescription = appArgs.shouldValidateDescription();
    final boolean shouldValidateCheckstyle = appArgs.shouldValidateCheckstyle();
    String runConfigPath = FilenameUtils.normalize(appArgs.getRunConfigPath());

    BuildMode buildMode = null;
    Set<String> changedFiles = null;
    try {
        String smartModePath = appArgs.getChangesOnlyConfigPath();
        if (StringUtils.isEmpty(smartModePath)) {
            buildMode = BuildMode.BASIC;
            changedFiles = new HashSet<>();
            printBuildModeInfo(buildMode);
        } else {
            buildMode = BuildMode.CHANGED;
            changedFiles = readChangedFilesFromSource(smartModePath);
            printBuildModeInfo(buildMode);
        }
    } catch (Exception ex) {
        log.error("Exception: " + ex.getMessage());
        System.exit(1);
    }

    // Override with the values from the file if configured
    List<String> testSuitesParallel = new ArrayList<>();
    List<String> testSuitesSequential = new ArrayList<>();
    BulkRunMode bulkRunMode = runTestsInParallel ? ALL_PARALLEL : ALL_SEQUENTIAL;

    TestCaseRunMode unspecifiedTestSuiteRunMode = runTestsInParallel ? TestCaseRunMode.PARALLEL
            : TestCaseRunMode.SEQUENTIAL;
    if (get(runConfigPath).isAbsolute() && isRegularFile(get(runConfigPath), NOFOLLOW_LINKS)
            && equalsIgnoreCase(PROPERTIES_FILE_EXTENSION, FilenameUtils.getExtension(runConfigPath))) {
        Properties runConfigurationProperties = ArgumentProcessorUtils.getPropertiesFromFile(runConfigPath);
        shouldPrintCoverageData = getBooleanFromPropertiesWithDefault(TEST_COVERAGE, shouldPrintCoverageData,
                runConfigurationProperties);
        threadCount = getIntFromPropertiesWithDefaultAndRange(TEST_PARALLEL_THREAD_COUNT,
                Runtime.getRuntime().availableProcessors(), runConfigurationProperties, 1,
                MAX_THREADS_TEST_RUNNER + 1);
        testSuites = getTestSuitesForKey(runConfigurationProperties, TEST_SUITES_TO_RUN);
        testSuitesParallel = getTestSuitesForKey(runConfigurationProperties, TEST_SUITES_PARALLEL);
        testSuitesSequential = getTestSuitesForKey(runConfigurationProperties, TEST_SUITES_SEQUENTIAL);
        addErrorIfSameTestSuiteIsInBothParallelOrSequential(testSuitesParallel, testSuitesSequential);
        unspecifiedTestSuiteRunMode = getEnumInstanceFromPropertiesWithDefault(TEST_SUITES_RUN_UNSPECIFIED,
                unspecifiedTestSuiteRunMode, runConfigurationProperties);
        addWarningsForMisconfiguredTestSuites(unspecifiedTestSuiteRunMode, testSuites, testSuitesSequential,
                testSuitesParallel);
        bulkRunMode = POSSIBLY_MIXED;
    } else { // Warn when file is misconfigured, relative path, file does not exist or is not a properties file
        log.info(format(DID_NOT_DETECT_RUN_CONFIGURATION_PROPERTIES_FILE, runConfigPath));
    }

    String testCaseReportLocation = getProperty(TEST_CASE_REPORT_LOCATION);
    if (StringUtils.isBlank(testCaseReportLocation)) {
        log.info("Test case report location property [" + TEST_CASE_REPORT_LOCATION
                + "] is not defined. Report will be skipped.");
    }

    // Setting thread count for visibility in ParallelTestCaseExecutorService
    setProperty(SLANG_TEST_RUNNER_THREAD_COUNT, valueOf(threadCount));

    log.info(NEW_LINE + "------------------------------------------------------------");
    log.info("Building project: " + projectPath);
    log.info("Content root is at: " + contentPath);
    log.info("Test root is at: " + testsPath);
    log.info("Active test suites are: " + getListForPrint(testSuites));
    log.info("Parallel run mode is configured for test suites: " + getListForPrint(testSuitesParallel));
    log.info("Sequential run mode is configured for test suites: " + getListForPrint(testSuitesSequential));
    log.info("Default run mode '" + unspecifiedTestSuiteRunMode.name().toLowerCase()
            + "' is configured for test suites: " + getListForPrint(
                    getDefaultRunModeTestSuites(testSuites, testSuitesParallel, testSuitesSequential)));

    log.info("Bulk run mode for tests: " + getBulkModeForPrint(bulkRunMode));

    log.info("Print coverage data: " + valueOf(shouldPrintCoverageData));
    log.info("Validate description: " + valueOf(shouldValidateDescription));
    log.info("Validate checkstyle: " + valueOf(shouldValidateCheckstyle));
    log.info("Thread count: " + threadCount);
    log.info("Test case timeout in minutes: "
            + (isEmpty(testCaseTimeout) ? valueOf(MAX_TIME_PER_TESTCASE_IN_MINUTES) : testCaseTimeout));

    log.info(NEW_LINE + "Loading...");

    ClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext("spring/testRunnerContext.xml");
    context.registerShutdownHook();
    SlangBuilder slangBuilder = context.getBean(SlangBuilder.class);
    LoggingService loggingService = context.getBean(LoggingServiceImpl.class);
    Slang slang = context.getBean(Slang.class);

    try {

        updateTestSuiteMappings(context.getBean(TestRunInfoService.class), testSuitesParallel,
                testSuitesSequential, testSuites, unspecifiedTestSuiteRunMode);

        registerEventHandlers(slang);

        List<RuntimeException> exceptions = new ArrayList<>();

        SlangBuildResults buildResults = slangBuilder.buildSlangContent(projectPath, contentPath, testsPath,
                testSuites, shouldValidateDescription, shouldValidateCheckstyle, bulkRunMode, buildMode,
                changedFiles);
        exceptions.addAll(buildResults.getCompilationExceptions());
        if (exceptions.size() > 0) {
            logErrors(exceptions, projectPath, loggingService);
        }
        IRunTestResults runTestsResults = buildResults.getRunTestsResults();
        Map<String, TestRun> skippedTests = runTestsResults.getSkippedTests();

        if (isNotEmpty(skippedTests)) {
            printSkippedTestsSummary(skippedTests, loggingService);
        }
        printPassedTests(runTestsResults, loggingService);
        if (shouldPrintCoverageData) {
            printTestCoverageData(runTestsResults, loggingService);
        }

        if (isNotEmpty(runTestsResults.getFailedTests())) {
            printBuildFailureSummary(projectPath, runTestsResults, loggingService);
        } else {
            printBuildSuccessSummary(contentPath, buildResults, runTestsResults, loggingService);
        }
        loggingService.waitForAllLogTasksToFinish();

        generateTestCaseReport(context.getBean(SlangTestCaseRunReportGeneratorService.class), runTestsResults,
                testCaseReportLocation);
        System.exit(isNotEmpty(runTestsResults.getFailedTests()) ? 1 : 0);

    } catch (Throwable e) {
        logErrorsPrefix(loggingService);
        loggingService.logEvent(Level.ERROR, "Exception: " + e.getMessage());
        logErrorsSuffix(projectPath, loggingService);
        loggingService.waitForAllLogTasksToFinish();
        System.exit(1);
    }
}

From source file:com.impetus.kundera.ycsb.benchmark.CouchDBNativeClient.java

public static void main(String[] args) {

    CouchDBNativeClient cli = new CouchDBNativeClient();

    Properties props = new Properties();

    props.setProperty("hosts", "localhost");
    props.setProperty("port", "5984");
    cli.setProperties(props);//  w ww. ja va 2  s  .  co m

    try {
        cli.init();
    } catch (Exception e) {
        e.printStackTrace();
        System.exit(0);
    }

    HashMap<String, ByteIterator> vals = new HashMap<String, ByteIterator>();
    vals.put("age", new StringByteIterator("57"));
    vals.put("middlename", new StringByteIterator("bradley"));
    vals.put("favoritecolor", new StringByteIterator("blue"));
    int res = cli.insert("usertable", "BrianFrankCooper", vals);
    System.out.println("Result of insert: " + res);

    HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
    HashSet<String> fields = new HashSet<String>();
    fields.add("middlename");
    fields.add("age");
    fields.add("favoritecolor");
    res = cli.read("usertable", "BrianFrankCooper", null, result);
    System.out.println("Result of read: " + res);
    for (String s : result.keySet()) {
        System.out.println("[" + s + "]=[" + result.get(s) + "]");
    }

    res = cli.delete("usertable", "BrianFrankCooper");
    System.out.println("Result of delete: " + res);

}

From source file:amie.keys.CSAKey.java

public static void main(String[] args) throws IOException, InterruptedException {
    final Triple<MiningAssistant, Float, String> parsedArgs = parseArguments(args);
    final Set<Rule> output = new LinkedHashSet<>();

    // Helper object that contains the implementation for the calculation
    // of confidence and support
    // The file with the non-keys, one per line
    long timea = System.currentTimeMillis();
    List<List<String>> inputNonKeys = Utilities.parseNonKeysFile(parsedArgs.third);
    System.out.println(inputNonKeys.size() + " input non-keys");
    final List<List<String>> nonKeys = pruneBySupport(inputNonKeys, parsedArgs.second,
            parsedArgs.first.getKb());// ww  w  . ja v a  2  s .  co  m
    Collections.sort(nonKeys, new Comparator<List<String>>() {

        @Override
        public int compare(List<String> o1, List<String> o2) {
            int r = Integer.compare(o2.size(), o1.size());
            if (r == 0) {
                return Integer.compare(o2.hashCode(), o1.hashCode());
            }

            return r;
        }

    });
    System.out.println(nonKeys.size() + " non-keys after pruning");
    int totalLoad = computeLoad(nonKeys);
    System.out.println(totalLoad + " is the total load");
    int nThreads = Runtime.getRuntime().availableProcessors();
    //int batchSize = Math.max(Math.min(maxBatchSize, totalLoad / nThreads), minBatchSize);
    int batchSize = Math.max(Math.min(maxLoad, totalLoad / nThreads), minLoad);

    final Queue<int[]> chunks = new PriorityQueue(50, new Comparator<int[]>() {
        @Override
        public int compare(int[] o1, int[] o2) {
            return Integer.compare(o2[2], o1[2]);
        }

    });

    final HashSet<HashSet<Integer>> nonKeysInt = new HashSet<>();
    final HashMap<String, Integer> property2Id = new HashMap<>();
    final HashMap<Integer, String> id2Property = new HashMap<>();
    final List<Integer> propertiesList = new ArrayList<>();
    int support = (int) parsedArgs.second.floatValue();
    KB kb = parsedArgs.first.getKb();
    buildDictionaries(nonKeys, nonKeysInt, property2Id, id2Property, propertiesList, support, kb);
    final List<HashSet<Integer>> nonKeysIntList = new ArrayList<>(nonKeysInt);
    int start = 0;
    int[] nextIdx = nextIndex(nonKeysIntList, 0, batchSize);
    int end = nextIdx[0];
    int load = nextIdx[1];
    while (start < nonKeysIntList.size()) {
        int[] chunk = new int[] { start, end, load };
        chunks.add(chunk);
        start = end;
        nextIdx = nextIndex(nonKeysIntList, end, batchSize);
        end = nextIdx[0];
        load = nextIdx[1];
    }

    Thread[] threads = new Thread[Math.min(Runtime.getRuntime().availableProcessors(), chunks.size())];
    for (int i = 0; i < threads.length; ++i) {
        threads[i] = new Thread(new Runnable() {

            @Override
            public void run() {
                while (true) {
                    int[] chunk = null;
                    synchronized (chunks) {
                        if (!chunks.isEmpty()) {
                            chunk = chunks.poll();
                        } else {
                            break;
                        }
                    }
                    System.out.println("Processing chunk " + Arrays.toString(chunk));
                    mine(parsedArgs, nonKeysIntList, property2Id, id2Property, propertiesList, chunk[0],
                            chunk[1], output);
                }

            }
        });
        threads[i].start();
    }

    for (int i = 0; i < threads.length; ++i) {
        threads[i].join();
    }
    long timeb = System.currentTimeMillis();
    System.out.println("==== Unique C-keys =====");
    for (Rule r : output) {
        System.out.println(Utilities.formatKey(r));
    }
    System.out.println(
            "VICKEY found " + output.size() + " unique conditional keys in " + (timeb - timea) + " ms");
}

From source file:com.act.biointerpretation.l2expansion.L2FilteringDriver.java

public static void main(String[] args) throws Exception {

    // Build command line parser.
    Options opts = new Options();
    for (Option.Builder b : OPTION_BUILDERS) {
        opts.addOption(b.build());/*ww  w . j  a v  a 2 s  . c  o  m*/
    }

    CommandLine cl = null;
    try {
        CommandLineParser parser = new DefaultParser();
        cl = parser.parse(opts, args);
    } catch (ParseException e) {
        LOGGER.error("Argument parsing failed: %s", e.getMessage());
        HELP_FORMATTER.printHelp(L2FilteringDriver.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
        System.exit(1);
    }

    // Print help.
    if (cl.hasOption(OPTION_HELP)) {
        HELP_FORMATTER.printHelp(L2FilteringDriver.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
        return;
    }

    checkFilterOptionIsValid(OPTION_CHEMICAL_FILTER, cl);
    checkFilterOptionIsValid(OPTION_REACTION_FILTER, cl);

    // Get corpus files.
    File corpusFile = new File(cl.getOptionValue(OPTION_INPUT_CORPUS));
    if (!corpusFile.exists()) {
        LOGGER.error("Input corpus file does not exist.");
        return;
    }

    File outputFile = new File(cl.getOptionValue(OPTION_OUTPUT_PATH));
    outputFile.createNewFile();
    if (outputFile.isDirectory()) {
        LOGGER.error("Output file is directory.");
        System.exit(1);
    }

    LOGGER.info("Reading corpus from file.");
    L2PredictionCorpus predictionCorpus = L2PredictionCorpus.readPredictionsFromJsonFile(corpusFile);
    LOGGER.info("Read in corpus with %d predictions.", predictionCorpus.getCorpus().size());
    LOGGER.info("Corpus has %d distinct substrates.", predictionCorpus.getUniqueSubstrateInchis().size());

    if (cl.hasOption(OPTION_FILTER_SUBSTRATES)) {
        LOGGER.info("Filtering by substrates.");
        File substratesFile = new File(cl.getOptionValue(OPTION_FILTER_SUBSTRATES));
        L2InchiCorpus inchis = new L2InchiCorpus();
        inchis.loadCorpus(substratesFile);
        Set<String> inchiSet = new HashSet<String>();
        inchiSet.addAll(inchis.getInchiList());

        predictionCorpus = predictionCorpus
                .applyFilter(prediction -> inchiSet.containsAll(prediction.getSubstrateInchis()));

        predictionCorpus.writePredictionsToJsonFile(outputFile);
        LOGGER.info("Done writing filtered corpus to file.");
        return;
    }

    if (cl.hasOption(OPTION_SPLIT_BY_RO)) {
        LOGGER.info("Splitting corpus into distinct corpuses for each ro.");
        Map<String, L2PredictionCorpus> corpusMap = predictionCorpus
                .splitCorpus(prediction -> prediction.getProjectorName());

        for (Map.Entry<String, L2PredictionCorpus> entry : corpusMap.entrySet()) {
            String fileName = cl.getOptionValue(OPTION_OUTPUT_PATH) + "." + entry.getKey();
            File oneOutputFile = new File(fileName);
            entry.getValue().writePredictionsToJsonFile(oneOutputFile);
        }
        LOGGER.info("Done writing split corpuses to file.");
        return;
    }

    predictionCorpus = runDbLookups(cl, predictionCorpus, opts);

    LOGGER.info("Applying filters.");
    predictionCorpus = applyFilter(predictionCorpus, ALL_CHEMICALS_IN_DB, cl, OPTION_CHEMICAL_FILTER);
    predictionCorpus = applyFilter(predictionCorpus, REACTION_MATCHES_DB, cl, OPTION_REACTION_FILTER);
    LOGGER.info("Filtered corpus has %d predictions.", predictionCorpus.getCorpus().size());

    LOGGER.info("Printing final corpus.");
    predictionCorpus.writePredictionsToJsonFile(outputFile);

    LOGGER.info("L2FilteringDriver complete!.");
}

From source file:Main.java

public static <A> Set<A> set(A... a) {
    Set<A> set = new HashSet<A>();
    Collections.addAll(set, a);/*  w  ww  .  j  a v a 2 s. c  o m*/
    return set;
}