Example usage for java.util HashSet HashSet

List of usage examples for java.util HashSet HashSet

Introduction

In this page you can find the example usage for java.util HashSet HashSet.

Prototype

public HashSet(int initialCapacity) 

Source Link

Document

Constructs a new, empty set; the backing HashMap instance has the specified initial capacity and default load factor (0.75).

Usage

From source file:com.dattack.dbcopy.cli.DbCopyCli.java

/**
 * The <code>main</code> method.
 *
 * @param args/*from w  ww.  jav  a2  s  .c om*/
 *            the program arguments
 */
public static void main(final String[] args) {

    final Options options = createOptions();

    try {
        final CommandLineParser parser = new DefaultParser();
        final CommandLine cmd = parser.parse(options, args);
        final String[] filenames = cmd.getOptionValues(FILE_OPTION);
        final String[] jobNames = cmd.getOptionValues(JOB_NAME_OPTION);
        final String[] propertiesFiles = cmd.getOptionValues(PROPERTIES_OPTION);

        HashSet<String> jobNameSet = null;
        if (jobNames != null) {
            jobNameSet = new HashSet<>(Arrays.asList(jobNames));
        }

        CompositeConfiguration configuration = new CompositeConfiguration();
        if (propertiesFiles != null) {
            for (final String fileName : propertiesFiles) {
                configuration.addConfiguration(new PropertiesConfiguration(fileName));
            }
        }

        final DbCopyEngine ping = new DbCopyEngine();
        ping.execute(filenames, jobNameSet, configuration);

    } catch (@SuppressWarnings("unused") final ParseException e) {
        showUsage(options);
    } catch (final ConfigurationException | DattackParserException e) {
        System.err.println(e.getMessage());
    }
}

From source file:net.ontopia.topicmaps.db2tm.Execute.java

public static void main(String[] argv) throws Exception {

    // Initialize logging
    CmdlineUtils.initializeLogging();//from  w w  w.  j a v  a2s. c  o m

    // Register logging options
    CmdlineOptions options = new CmdlineOptions("Execute", argv);
    CmdlineUtils.registerLoggingOptions(options);
    OptionsListener ohandler = new OptionsListener();
    options.addLong(ohandler, "tm", 't', true);
    options.addLong(ohandler, "baseuri", 'b', true);
    options.addLong(ohandler, "out", 'o', true);
    options.addLong(ohandler, "relations", 'r', true);
    options.addLong(ohandler, "force-rescan", 'f', true);

    // Parse command line options
    try {
        options.parse();
    } catch (CmdlineOptions.OptionsException e) {
        System.err.println("Error: " + e.getMessage());
        System.exit(1);
    }

    // Get command line arguments
    String[] args = options.getArguments();

    if (args.length < 2) {
        usage();
        System.exit(3);
    }

    // Arguments
    String operation = args[0];
    String cfgfile = args[1];

    if (!"add".equals(operation) && !"sync".equals(operation) && !"remove".equals(operation)) {
        usage();
        System.err.println("Operation '" + operation + "' not supported.");
        System.exit(3);
    }

    try {
        // Read mapping file
        log.debug("Reading relation mapping file {}", cfgfile);
        RelationMapping mapping = RelationMapping.read(new File(cfgfile));

        // open topic map
        String tmurl = ohandler.tm;
        log.debug("Opening topic map {}", tmurl);
        TopicMapIF topicmap;
        if (tmurl == null || "tm:in-memory:new".equals(tmurl))
            topicmap = new InMemoryTopicMapStore().getTopicMap();
        else if ("tm:rdbms:new".equals(tmurl))
            topicmap = new RDBMSTopicMapStore().getTopicMap();
        else {
            TopicMapReaderIF reader = ImportExportUtils.getReader(tmurl);
            topicmap = reader.read();
        }
        TopicMapStoreIF store = topicmap.getStore();

        // base locator
        String outfile = ohandler.out;
        LocatorIF baseloc = (outfile == null ? store.getBaseAddress() : new URILocator(new File(outfile)));
        if (baseloc == null && tmurl != null)
            baseloc = (ohandler.baseuri == null ? new URILocator(tmurl) : new URILocator(ohandler.baseuri));

        // figure out which relations to actually process
        Collection<String> relations = null;
        if (ohandler.relations != null) {
            String[] relnames = StringUtils.split(ohandler.relations, ",");
            if (relnames.length > 0) {
                relations = new HashSet<String>(relnames.length);
                relations.addAll(Arrays.asList(relnames));
            }
        }

        try {
            // Process data sources in mapping
            if ("add".equals(operation))
                Processor.addRelations(mapping, relations, topicmap, baseloc);
            else if ("sync".equals(operation)) {
                boolean rescan = ohandler.forceRescan != null
                        && Boolean.valueOf(ohandler.forceRescan).booleanValue();
                Processor.synchronizeRelations(mapping, relations, topicmap, baseloc, rescan);
            } else if ("remove".equals(operation))
                Processor.removeRelations(mapping, relations, topicmap, baseloc);
            else
                throw new UnsupportedOperationException("Unsupported operation: " + operation);

            // export topicmap
            if (outfile != null) {
                log.debug("Exporting topic map to {}", outfile);
                TopicMapWriterIF writer = ImportExportUtils.getWriter(new File(outfile));
                writer.write(topicmap);
            }

            // commit transaction
            store.commit();
            log.debug("Transaction committed.");
        } catch (Exception t) {
            log.error("Error occurred while running operation '" + operation + "'", t);
            // abort transaction
            store.abort();
            log.debug("Transaction aborted.");
            throw t;
        } finally {
            if (store.isOpen())
                store.close();
        }

    } catch (Exception e) {
        Throwable cause = e.getCause();
        if (cause instanceof DB2TMException)
            System.err.println("Error: " + e.getMessage());
        else
            throw e;
    }
}

From source file:net.ontopia.topicmaps.cmdlineutils.rdbms.RDBMSIndexTool.java

public static void main(String[] argv) throws Exception {

    // Initialize logging
    CmdlineUtils.initializeLogging();/*from   w  w w .jav a  2 s  .  c  o  m*/

    // Register logging options
    CmdlineOptions options = new CmdlineOptions("RDBMSIndexTool", argv);
    CmdlineUtils.registerLoggingOptions(options);

    // Parse command line options
    try {
        options.parse();
    } catch (CmdlineOptions.OptionsException e) {
        System.err.println("Error: " + e.getMessage());
        System.exit(1);
    }

    // Get command line arguments
    String[] args = options.getArguments();

    if (args.length != 1) {
        usage();
        System.exit(3);
    }

    // load database schema project
    ClassLoader cloader = RDBMSIndexTool.class.getClassLoader();
    InputStream istream = cloader.getResourceAsStream("net/ontopia/topicmaps/impl/rdbms/config/schema.xml");
    Project dbp = DatabaseProjectReader.loadProject(istream);

    // open database connection
    String propfile = args[0];
    ConnectionFactoryIF cf = new DefaultConnectionFactory(PropertyUtils.loadProperties(new File(propfile)),
            true);

    Connection conn = cf.requestConnection();
    try {
        DatabaseMetaData dbm = conn.getMetaData();
        boolean downcase = dbm.storesLowerCaseIdentifiers();

        Map extra_indexes = new TreeMap();
        Map missing_indexes = new TreeMap();

        Iterator tables = dbp.getTables().iterator();
        while (tables.hasNext()) {
            Table table = (Table) tables.next();
            String table_name = (downcase ? table.getName().toLowerCase() : table.getName());
            //! System.out.println("T :"  + table_name);

            // get primary keys from database
            Map pkeys = getPrimaryKeys(table_name, dbm);

            // get indexes from database
            Map indexes = getIndexes(table_name, dbm);

            Map dindexes = new HashMap();
            if (table.getPrimaryKeys() != null) {
                String pkey = table_name + '(' + StringUtils.join(table.getPrimaryKeys(), ',') + ')';
                if (!pkeys.containsKey(pkey))
                    System.out.println("PKM: " + pkey);
            }

            Iterator iter = table.getIndexes().iterator();
            while (iter.hasNext()) {
                Index index = (Index) iter.next();
                String i = table_name + '(' + StringUtils.join(index.getColumns(), ',') + ')';
                String index_name = (downcase ? index.getName().toLowerCase() : index.getName());
                dindexes.put(i, index_name);
            }

            Set extra = new HashSet(indexes.keySet());
            extra.removeAll(dindexes.keySet());
            extra.removeAll(pkeys.keySet());
            if (!extra.isEmpty()) {
                Iterator i = extra.iterator();
                while (i.hasNext()) {
                    Object k = i.next();
                    extra_indexes.put(k, indexes.get(k));
                }
            }

            Set missing = new HashSet(dindexes.keySet());
            missing.addAll(pkeys.keySet());
            missing.removeAll(indexes.keySet());
            if (!missing.isEmpty()) {
                Iterator i = missing.iterator();
                while (i.hasNext()) {
                    Object k = i.next();
                    missing_indexes.put(k, dindexes.get(k));
                }
            }

        }
        if (!extra_indexes.isEmpty())
            System.out.println("/* --- Extra indexes ----------------------------------------- */");
        Iterator eiter = extra_indexes.keySet().iterator();
        while (eiter.hasNext()) {
            Object k = eiter.next();
            System.out.println("drop index " + extra_indexes.get(k) + "; /* " + k + " */");
        }

        if (!missing_indexes.isEmpty())
            System.out.println("/* --- Missing indexes---------------------------------------- */");
        Iterator miter = missing_indexes.keySet().iterator();
        while (miter.hasNext()) {
            Object k = miter.next();
            System.out.println("create index " + missing_indexes.get(k) + " on " + k + ";");
        }

    } finally {
        conn.rollback();
        conn.close();
    }

}

From source file:fr.inria.atlanmod.emf.graphs.Connectedness.java

public static void main(String[] args) {

    Options options = createOptions();/*from   w  w  w.j  ava2s.c  om*/

    CommandLineParser parser = new PosixParser();

    try {
        CommandLine commandLine = parser.parse(options, args);
        String inputMetamodel = commandLine.getOptionValue(INPUT_METAMODEL);
        String inputModel = commandLine.getOptionValue(INPUT_MODEL);
        Boolean logUnreachable = commandLine.hasOption(LOG_UNREACHABLE);

        ResourceSet resourceSet = new ResourceSetImpl();
        Resource.Factory.Registry.INSTANCE.getExtensionToFactoryMap()
                .put(Resource.Factory.Registry.DEFAULT_EXTENSION, new XMIResourceFactoryImpl());

        {
            LOG.log(Level.INFO, "Loading input metamodel");
            URI uri = URI.createFileURI(inputMetamodel);
            Resource resource = resourceSet.getResource(uri, true);
            registerEPackages(resource);
        }

        URI uri = URI.createFileURI(inputModel);

        LOG.log(Level.INFO, "Loading input model");
        Resource resource = resourceSet.getResource(uri, true);

        LOG.log(Level.INFO, "Getting input model contents");
        Set<EObject> resourceContents = getResourceContents(resource);
        int totalCount = resourceContents.size();

        LOG.log(Level.INFO, MessageFormat.format("Input model contains {0} elements", totalCount));

        List<EClassifier> candidateEClassifiers = buildCandidateEClassifiers();

        for (Iterator<EObject> it = resource.getAllContents(); it.hasNext();) {
            EObject eObject = it.next();
            if (candidateEClassifiers.contains(eObject.eClass())) {
                Set<EObject> reachableEObjects = getReachableEObjects(eObject);
                int i = reachableEObjects.size();
                LOG.log(Level.INFO, MessageFormat.format("Found {0} reachable objects from {1} (EClass {2})", i,
                        EcoreUtil.getURI(eObject), eObject.eClass().getName()));
                if (logUnreachable) {
                    Set<EObject> unreachableEObjects = new HashSet<>(resourceContents);
                    unreachableEObjects.removeAll(reachableEObjects);
                    LOG.log(Level.INFO,
                            MessageFormat.format("{0} elements are unreachable from {1} (EClass {2})",
                                    unreachableEObjects.size(), EcoreUtil.getURI(eObject),
                                    eObject.eClass().getName()));
                    for (EObject unreachableEObject : unreachableEObjects) {
                        LOG.log(Level.INFO, MessageFormat.format("Unreachable EObject {0} is of type {1}",
                                EcoreUtil.getURI(unreachableEObject), unreachableEObject.eClass()));
                    }
                }
            }
        }

    } catch (ParseException e) {
        LOG.log(Level.SEVERE, e.getLocalizedMessage(), e);
        LOG.log(Level.INFO, "Current arguments: " + Arrays.toString(args));
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("java -jar <this-file.jar>", options, true);
    } catch (Throwable e) {
        LOG.log(Level.SEVERE, e.getLocalizedMessage(), e);
        MessageUtil.showError(e.toString());
    }
}

From source file:com.dattack.dbtools.ping.Ping.java

/**
 * The <code>main</code> method.
 *
 * @param args/* w  w w .j  a  v a 2 s  . c o  m*/
 *            the program arguments
 */
public static void main(final String[] args) {

    final Options options = createOptions();

    try {
        final CommandLineParser parser = new DefaultParser();
        final CommandLine cmd = parser.parse(options, args);
        final String[] filenames = cmd.getOptionValues(FILE_OPTION);
        final String[] taskNames = cmd.getOptionValues(TASK_NAME_OPTION);

        HashSet<String> hs = null;
        if (taskNames != null) {
            hs = new HashSet<>(Arrays.asList(taskNames));
        }

        final Ping ping = new Ping();
        ping.execute(filenames, hs);

    } catch (@SuppressWarnings("unused") final ParseException e) {
        showUsage(options);
    } catch (final ConfigurationException | DbpingParserException e) {
        System.err.println(e.getMessage());
    }
}

From source file:com.example.java.collections.ArrayExample.java

public static void main(String[] args) {

    /* ########################################################### */
    // Initializing an Array
    String[] creatures = { "goldfish", "oscar", "guppy", "minnow" };
    int[] numbers = new int[10];
    int counter = 0;
    while (counter < numbers.length) {
        numbers[counter] = counter;//from w  ww .j  a  v  a 2 s  . c  o  m
        System.out.println("number[" + counter + "]: " + counter);
        counter++;
    }
    for (int theInt : numbers) {
        System.out.println(theInt);
    }
    //System.out.println(numbers[numbers.length]);
    /* ########################################################### */

    /* ########################################################### */
    // Using Charecter Array
    String name = "Michael";
    // Charecter Array
    char[] charName = name.toCharArray();
    System.out.println(charName);

    // Array Fuctions
    char[] html = new char[] { 'M', 'i', 'c', 'h', 'a', 'e', 'l' };
    char[] lastFour = new char[4];
    System.arraycopy(html, 3, lastFour, 0, lastFour.length);
    System.out.println(lastFour);
    /* ########################################################### */

    /* ########################################################### */
    // Using Arrays of Other Types
    Object[] person = new Object[] { "Michael", new Integer(94), new Integer(1), new Date() };

    String fname = (String) person[0]; //ok
    Integer age = (Integer) person[1]; //ok
    Date start = (Date) person[2]; //oops!
    /* ########################################################### */

    /* ########################################################### */
    // Muti Dimestional Array
    String[][] bits = { { "Michael", "Ernest", "MFE" }, { "Ernest", "Friedman-Hill", "EFH" },
            { "Kathi", "Duggan", "KD" }, { "Jeff", "Kellum", "JK" } };

    bits[0] = new String[] { "Rudy", "Polanski", "RP" };
    bits[1] = new String[] { "Rudy", "Washington", "RW" };
    bits[2] = new String[] { "Rudy", "O'Reilly", "RO" };
    /* ########################################################### */

    /* ########################################################### */
    //Create ArrayList from array
    String[] stringArray = { "a", "b", "c", "d", "e" };
    ArrayList<String> arrayList = new ArrayList<String>(Arrays.asList(stringArray));
    System.out.println(arrayList);
    // [a, b, c, d, e]
    /* ########################################################### */

    /* ########################################################### */
    //Check if an array contains a certain value
    String[] stringArray1 = { "a", "b", "c", "d", "e" };
    boolean b = Arrays.asList(stringArray).contains("a");
    System.out.println(b);
    // true
    /* ########################################################### */

    /* ########################################################### */
    //Concatenate two arrays
    int[] intArray = { 1, 2, 3, 4, 5 };
    int[] intArray2 = { 6, 7, 8, 9, 10 };
    // Apache Commons Lang library
    int[] combinedIntArray = ArrayUtils.addAll(intArray, intArray2);
    /* ########################################################### */

    /* ########################################################### */
    //Joins the elements of the provided array into a single String
    // Apache common lang
    String j = StringUtils.join(new String[] { "a", "b", "c" }, ", ");
    System.out.println(j);
    // a, b, c
    /* ########################################################### */

    /* ########################################################### */
    //Covnert ArrayList to Array
    String[] stringArray3 = { "a", "b", "c", "d", "e" };
    ArrayList<String> arrayList1 = new ArrayList<String>(Arrays.asList(stringArray));
    String[] stringArr = new String[arrayList.size()];
    arrayList.toArray(stringArr);
    for (String s : stringArr) {
        System.out.println(s);
    }
    /* ########################################################### */

    /* ########################################################### */
    //Convert Array to Set
    Set<String> set = new HashSet<String>(Arrays.asList(stringArray));
    System.out.println(set);
    //[d, e, b, c, a]
    /* ########################################################### */

    /* ########################################################### */
    //Reverse an array
    int[] intArray1 = { 1, 2, 3, 4, 5 };
    ArrayUtils.reverse(intArray1);
    System.out.println(Arrays.toString(intArray1));
    //[5, 4, 3, 2, 1]
    /* ########################################################### */

    /* ########################################################### */
    // Remove element of an array
    int[] intArray3 = { 1, 2, 3, 4, 5 };
    int[] removed = ArrayUtils.removeElement(intArray3, 3);//create a new array
    System.out.println(Arrays.toString(removed));
    /* ########################################################### */

    /* ########################################################### */
    byte[] bytes = ByteBuffer.allocate(4).putInt(8).array();
    for (byte t : bytes) {
        System.out.format("0x%x ", t);
    }
    /* ########################################################### */

}

From source file:de.tudarmstadt.ukp.experiments.dip.wp1.documents.Step7CollectMTurkResults.java

public static void main(String[] args) throws Exception {
    // input dir - list of xml query containers
    // /home/user-ukp/research/data/dip/wp1-documents/step4-boiler-plate/
    File inputDir = new File(args[0] + "/");

    // MTurk result file

    // output dir
    File outputDir = new File(args[2]);
    if (!outputDir.exists()) {
        outputDir.mkdirs();//  w ww  .  j a va 2s.  co m

    }

    // Folder with success files
    File mturkSuccessDir = new File(args[1]);

    Collection<File> files = FileUtils.listFiles(mturkSuccessDir, new String[] { "result" }, false);
    if (files.isEmpty()) {
        throw new IllegalArgumentException("Input folder is empty. " + mturkSuccessDir);
    }

    HashMap<String, List<MTurkAnnotation>> mturkAnnotations = new HashMap<>();

    // parsing all CSV files
    for (File mturkCSVResultFile : files) {
        System.out.println("Parsing " + mturkCSVResultFile.getName());

        MTurkOutputReader outputReader = new MTurkOutputReader(
                new HashSet<>(Arrays.asList("annotation", "workerid")), mturkCSVResultFile);

        // for fixing broken data input: for each hit, collect all sentence IDs
        Map<String, SortedSet<String>> hitSentences = new HashMap<>();

        // first iteration: collect the sentences
        for (Map<String, String> record : outputReader) {
            String hitID = record.get("hitid");
            if (!hitSentences.containsKey(hitID)) {
                hitSentences.put(hitID, new TreeSet<>());
            }

            String relevantSentences = record.get("Answer.relevant_sentences");
            String irrelevantSentences = record.get("Answer.irrelevant_sentences");

            if (relevantSentences != null) {
                hitSentences.get(hitID).addAll(Arrays.asList(relevantSentences.split(",")));
            }

            if (irrelevantSentences != null) {
                hitSentences.get(hitID).addAll(Arrays.asList(irrelevantSentences.split(",")));
            }
        }

        // and now second iteration
        for (Map<String, String> record : outputReader) {
            String hitID = record.get("hitid");
            String annotatorID = record.get("workerid");
            String acceptTime = record.get("assignmentaccepttime");
            String submitTime = record.get("assignmentsubmittime");
            String relevantSentences = record.get("Answer.relevant_sentences");
            String irrelevantSentences = record.get("Answer.irrelevant_sentences");
            String reject = record.get("reject");
            String filename[];
            String comment;
            String clueWeb;
            String[] relevant = {};
            String[] irrelevant = {};

            filename = record.get("annotation").split("_");
            String fileXml = filename[0];
            clueWeb = filename[1].trim();
            comment = record.get("Answer.comment");

            if (relevantSentences != null) {
                relevant = relevantSentences.split(",");
            }

            if (irrelevantSentences != null) {
                irrelevant = irrelevantSentences.split(",");
            }

            // sanitizing data: if both relevant and irrelevant are empty, that's a bug
            // we're gonna look up all sentences from this HIT and treat this assignment
            // as if there were only irrelevant ones
            if (relevant.length == 0 && irrelevant.length == 0) {
                SortedSet<String> strings = hitSentences.get(hitID);
                irrelevant = new String[strings.size()];
                strings.toArray(irrelevant);
            }

            if (reject != null) {
                System.out.println(" HIT " + hitID + " annotated by " + annotatorID + " was rejected ");
            } else {
                /*
                // relevant sentences is a comma-delimited string,
                // this regular expression is rather strange
                // it must contain digits, it might be that there is only one space or a comma or some other char
                // digits are the sentence ids. if relevant sentences do not contain digits then it is wrong
                if (relevantSentences.matches("^\\D*$") &&
                    irrelevantSentences.matches("^\\D*$")) {
                try {
                    throw new IllegalStateException(
                            "No annotations found for HIT " + hitID + " in " +
                                    fileXml + " for document " + clueWeb);
                }
                catch (IllegalStateException ex) {
                    ex.printStackTrace();
                }
                        
                }
                */
                MTurkAnnotation mturkAnnotation;
                try {
                    mturkAnnotation = new MTurkAnnotation(hitID, annotatorID, acceptTime, submitTime, comment,
                            clueWeb, relevant, irrelevant);
                } catch (IllegalArgumentException ex) {
                    throw new IllegalArgumentException("Record: " + record, ex);
                }

                List<MTurkAnnotation> listOfAnnotations = mturkAnnotations.get(fileXml);

                if (listOfAnnotations == null) {
                    listOfAnnotations = new ArrayList<>();
                }
                listOfAnnotations.add(mturkAnnotation);
                mturkAnnotations.put(fileXml, listOfAnnotations);
            }

        }
        //            parser.close();
    }

    // Debugging: output number of HITs of a query
    System.out.println("Accepted HITs for a query:");
    for (Map.Entry e : mturkAnnotations.entrySet()) {
        ArrayList<MTurkAnnotation> a = (ArrayList<MTurkAnnotation>) e.getValue();
        System.out.println(e.getKey() + " " + a.size());
    }

    for (File f : FileUtils.listFiles(inputDir, new String[] { "xml" }, false)) {
        QueryResultContainer queryResultContainer = QueryResultContainer
                .fromXML(FileUtils.readFileToString(f, "utf-8"));
        String fileName = f.getName();
        List<MTurkAnnotation> listOfAnnotations = mturkAnnotations.get(fileName);

        if (listOfAnnotations == null || listOfAnnotations.isEmpty()) {
            throw new IllegalStateException("No annotations for " + f.getName());
        }

        for (QueryResultContainer.SingleRankedResult rankedResults : queryResultContainer.rankedResults) {
            for (MTurkAnnotation mtAnnotation : listOfAnnotations) {
                String clueWeb = mtAnnotation.clueWeb;
                if (rankedResults.clueWebID.equals(clueWeb)) {
                    List<QueryResultContainer.MTurkRelevanceVote> mTurkRelevanceVotes = rankedResults.mTurkRelevanceVotes;
                    QueryResultContainer.MTurkRelevanceVote relevanceVote = new QueryResultContainer.MTurkRelevanceVote();
                    String annotatorID = mtAnnotation.annotatorID;
                    String hitID = mtAnnotation.hitID;
                    String acceptTime = mtAnnotation.acceptTime;
                    String submitTime = mtAnnotation.submitTime;
                    String comment = mtAnnotation.comment;
                    String[] relevant = mtAnnotation.relevant;
                    String[] irrelevant = mtAnnotation.irrelevant;
                    relevanceVote.turkID = annotatorID.trim();
                    relevanceVote.hitID = hitID.trim();
                    relevanceVote.acceptTime = acceptTime.trim();
                    relevanceVote.submitTime = submitTime.trim();
                    relevanceVote.comment = comment != null ? comment.trim() : null;
                    if (relevant.length == 0 && irrelevant.length == 0) {
                        try {
                            throw new IllegalStateException("the length of the annotations is 0"
                                    + rankedResults.clueWebID + " for HIT " + relevanceVote.hitID);
                        } catch (IllegalStateException e) {
                            e.printStackTrace();
                        }
                    }
                    for (String r : relevant) {
                        String sentenceId = r.trim();
                        if (!sentenceId.isEmpty() && sentenceId.matches("\\d+")) {
                            QueryResultContainer.SingleSentenceRelevanceVote singleSentenceVote = new QueryResultContainer.SingleSentenceRelevanceVote();
                            singleSentenceVote.sentenceID = sentenceId;
                            singleSentenceVote.relevant = "true";
                            relevanceVote.singleSentenceRelevanceVotes.add(singleSentenceVote);
                        }
                    }
                    for (String r : irrelevant) {
                        String sentenceId = r.trim();
                        if (!sentenceId.isEmpty() && sentenceId.matches("\\d+")) {
                            QueryResultContainer.SingleSentenceRelevanceVote singleSentenceVote = new QueryResultContainer.SingleSentenceRelevanceVote();
                            singleSentenceVote.sentenceID = sentenceId;
                            singleSentenceVote.relevant = "false";
                            relevanceVote.singleSentenceRelevanceVotes.add(singleSentenceVote);
                        }
                    }
                    mTurkRelevanceVotes.add(relevanceVote);
                }
            }

        }
        File outputFile = new File(outputDir, f.getName());
        FileUtils.writeStringToFile(outputFile, queryResultContainer.toXML(), "utf-8");
        System.out.println("Finished " + outputFile);
    }

}

From source file:com.google.appengine.tools.pipeline.impl.util.JsonUtils.java

public static void main(String[] args) throws Exception {
    JSONObject x = new JSONObject();
    x.put("first", 5);
    x.put("second", 7);
    debugPrint("hello");
    debugPrint(7);/*from   w ww.  j  a  va2  s.  c o  m*/
    debugPrint(3.14159);
    debugPrint("");
    debugPrint('x');
    debugPrint(x);
    debugPrint(null);

    Map<String, Integer> map = new HashMap<>();
    map.put("first", 5);
    map.put("second", 7);
    debugPrint(map);

    int[] array = new int[] { 5, 7 };
    debugPrint(array);

    ArrayList<Integer> arrayList = new ArrayList<>(2);
    arrayList.add(5);
    arrayList.add(7);
    debugPrint(arrayList);

    Collection<Integer> collection = new HashSet<>(2);
    collection.add(5);
    collection.add(7);
    debugPrint(collection);

    Object object = new Object();
    debugPrint(object);

    Map<String, String> map1 = new HashMap<>();
    map1.put("a", "hello");
    map1.put("b", "goodbye");

    Object[] array2 = new Object[] { 17, "yes", "no", map1 };

    Map<String, Object> map2 = new HashMap<>();
    map2.put("first", 5.4);
    map2.put("second", array2);
    map2.put("third", map1);

    debugPrint(map2);

    class MyBean {
        @SuppressWarnings("unused")
        public int getX() {
            return 11;
        }

        @SuppressWarnings("unused")
        public boolean isHot() {
            return true;
        }

        @SuppressWarnings("unused")
        public String getName() {
            return "yellow";
        }
    }
    debugPrint(new MyBean());

}

From source file:com.utdallas.s3lab.smvhunter.monkey.MonkeyMe.java

/**
 * @param args//w  ww  .  ja v a  2s .c o m
 */
public static void main(String[] args) throws Exception {

    logger.info("start time ==== " + System.currentTimeMillis());

    try {
        //load the adb location from the props file
        Properties props = new Properties();
        props.load(new FileInputStream(new File("adb.props")));

        //set the adb location
        WindowUpdate.adbLocation = props.getProperty("adb_location");
        adbLocation = props.getProperty("adb_location");
        //set root dir
        ROOT_DIRECTORY = props.getProperty("root_dir");
        //completed txt
        completedFile = new File(ROOT_DIRECTORY + "tested.txt");
        //db location for static analysis
        dbLocation = props.getProperty("db_location");
        //location for smart input generation output
        smartInputLocation = props.getProperty("smart_input_location");

        //udp dump location
        udpDumpLocation = props.getProperty("udp_dump_location");
        //logcat dump location
        logCatLocation = props.getProperty("log_cat_location");
        //strace output location
        straceOutputLocation = props.getProperty("strace_output_location");
        //strace dump location
        straceDumpLocation = props.getProperty("strace_output_location");

        //set x and y coords
        UIEnumerator.screenX = props.getProperty("x");
        UIEnumerator.screenY = props.getProperty("y");

        DeviceOfflineMonitor.START_EMULATOR = props.getProperty("restart");

        //read output of static analysis
        readFromStaticAnalysisText();
        logger.info("Read static analysis output");

        readFromSmartInputText();
        logger.info("Read smart input generation output");

        //populate the queue with apps which are only present in the static analysis
        @SuppressWarnings("unchecked")
        final Set<? extends String> sslApps = new HashSet<String>(
                CollectionUtils.collect(FileUtils.readLines(new File(dbLocation)), new Transformer() {
                    @Override
                    public Object transform(Object input) {
                        //get app file name
                        String temp = StringUtils.substringBefore(input.toString(), " ");
                        return StringUtils.substringAfterLast(temp, "/");
                    }
                }));
        Collection<File> fileList = FileUtils.listFiles(new File(ROOT_DIRECTORY), new String[] { "apk" },
                false);
        CollectionUtils.filter(fileList, new Predicate() {
            @Override
            public boolean evaluate(Object object) {
                return sslApps.contains(StringUtils.substringAfterLast(object.toString(), "/"));
            }
        });
        apkQueue = new LinkedBlockingDeque<File>(fileList);

        logger.info("finished listing files from the root directory");

        try {
            //populate the tested apk list
            completedApps.addAll(FileUtils.readLines(completedFile));
        } catch (Exception e) {
            //pass except
            logger.info("No tested.txt file found");

            //create new file
            if (completedFile.createNewFile()) {
                logger.info("tested.txt created in root directory");
            } else {
                logger.info("tested.txt file could not be created");
            }

        }

        //get the executors for managing the emulators
        executors = Executors.newCachedThreadPool();

        //set the devicemonitor exec
        DeviceOfflineMonitor.exec = executors;

        final List<Future<?>> futureList = new ArrayList<Future<?>>();

        //start the offline device monitor (emulator management thread)
        logger.info("Starting Device Offline Monitor Thread");
        executors.submit(new DeviceOfflineMonitor());

        //get ADB backend object for device change listener
        AdbBackend adb = new AdbBackend();

        //register for device change and wait for events
        //once event is received, start the MonkeyMe thread
        MonkeyDeviceChangeListener deviceChangeListener = new MonkeyDeviceChangeListener(executors, futureList);
        AndroidDebugBridge.addDeviceChangeListener(deviceChangeListener);
        logger.info("Listening to changes in devices (emulators)");

        //wait for the latch to come down
        //this means that all the apks have been processed
        cdl.await();

        logger.info("Finished testing all apps waiting for threads to join");

        //now wait for every thread to finish
        for (Future<?> future : futureList) {
            future.get();
        }

        logger.info("All threads terminated");

        //stop listening for device update
        AndroidDebugBridge.removeDeviceChangeListener(deviceChangeListener);

        //stop the debug bridge
        AndroidDebugBridge.terminate();

        //stop offline device monitor
        DeviceOfflineMonitor.stop = true;

        logger.info("adb and listeners terminated");

    } finally {
        logger.info("Executing this finally");
        executors.shutdownNow();
    }

    logger.info("THE END!!");
}

From source file:com.act.lcms.v2.MZCollisionCounter.java

public static void main(String[] args) throws Exception {
    CLIUtil cliUtil = new CLIUtil(MassChargeCalculator.class, HELP_MESSAGE, OPTION_BUILDERS);
    CommandLine cl = cliUtil.parseCommandLine(args);

    File inputFile = new File(cl.getOptionValue(OPTION_INPUT_INCHI_LIST));
    if (!inputFile.exists()) {
        cliUtil.failWithMessage("Input file at does not exist at %s", inputFile.getAbsolutePath());
    }//from   w  w w  .j  ava  2 s  .c o  m

    List<MassChargeCalculator.MZSource> sources = new ArrayList<>();
    try (BufferedReader reader = new BufferedReader(new FileReader(inputFile))) {
        String line;
        while ((line = reader.readLine()) != null) {
            line = line.trim();
            sources.add(new MassChargeCalculator.MZSource(line));
            if (sources.size() % 1000 == 0) {
                LOGGER.info("Loaded %d sources from input file", sources.size());
            }
        }
    }

    Set<String> considerIons = Collections.emptySet();
    if (cl.hasOption(OPTION_ONLY_CONSIDER_IONS)) {
        List<String> ions = Arrays.asList(cl.getOptionValues(OPTION_ONLY_CONSIDER_IONS));
        LOGGER.info("Only considering ions for m/z calculation: %s", StringUtils.join(ions, ", "));
        considerIons = new HashSet<>(ions);
    }

    TSVWriter<String, Long> tsvWriter = new TSVWriter<>(Arrays.asList("collisions", "count"));
    tsvWriter.open(new File(cl.getOptionValue(OPTION_OUTPUT_FILE)));

    try {
        LOGGER.info("Loaded %d sources in total from input file", sources.size());

        MassChargeCalculator.MassChargeMap mzMap = MassChargeCalculator.makeMassChargeMap(sources,
                considerIons);

        if (!cl.hasOption(OPTION_COUNT_WINDOW_INTERSECTIONS)) {
            // Do an exact analysis of the m/z collisions if windowing is not specified.

            LOGGER.info("Computing precise collision histogram.");
            Iterable<Double> mzs = mzMap.ionMZIter();
            Map<Integer, Long> collisionHistogram = histogram(
                    StreamSupport.stream(mzs.spliterator(), false).map(mz -> { // See comment about Iterable below.
                        try {
                            return mzMap.ionMZToMZSources(mz).size();
                        } catch (NoSuchElementException e) {
                            LOGGER.error("Caught no such element exception for mz %f: %s", mz, e.getMessage());
                            throw e;
                        }
                    }));
            List<Integer> sortedCollisions = new ArrayList<>(collisionHistogram.keySet());
            Collections.sort(sortedCollisions);
            for (Integer collision : sortedCollisions) {
                tsvWriter.append(new HashMap<String, Long>() {
                    {
                        put("collisions", collision.longValue());
                        put("count", collisionHistogram.get(collision));
                    }
                });
            }
        } else {
            /* After some deliberation (thanks Gil!), the windowed variant of this calculation counts the number of
             * structures whose 0.01 Da m/z windows (for some set of ions) overlap with each other.
             *
             * For example, let's assume we have five total input structures, and are only searching for one ion.  Let's
             * also assume that three of those structures have m/z A and the remaining two have m/z B.  The windows might
             * look like this in the m/z domain:
             * |----A----|
             *        |----B----|
             * Because A represents three structures and overlaps with B, which represents two, we assign A a count of 5--
             * this is the number of structures we believe could fall into the range of A given our current peak calling
             * approach.  Similarly, B is assigned a count of 5, as the possibility for collision/confusion is symmetric.
             *
             * Note that this is an over-approximation of collisions, as we could more precisely only consider intersections
             * when the exact m/z of B falls within the window around A and vice versa.  However, because we have observed
             * cases where the MS sensor doesn't report structures at exactly the m/z we predict, we employ this weaker
             * definition of intersection to give a slightly pessimistic view of what confusions might be possible. */
            // Compute windows for every m/z.  We don't care about the original mz values since we just want the count.
            List<Double> mzs = mzMap.ionMZsSorted();

            final Double windowHalfWidth;
            if (cl.hasOption(OPTION_WINDOW_HALFWIDTH)) {
                // Don't use get with default for this option, as we want the exact FP value of the default tolerance.
                windowHalfWidth = Double.valueOf(cl.getOptionValue(OPTION_WINDOW_HALFWIDTH));
            } else {
                windowHalfWidth = DEFAULT_WINDOW_TOLERANCE;
            }

            /* Window = (lower bound, upper bound), counter of represented m/z's that collide with this window, and number
             * of representative structures (which will be used in counting collisions). */
            LinkedList<CollisionWindow> allWindows = new LinkedList<CollisionWindow>() {
                {
                    for (Double mz : mzs) {
                        // CPU for memory trade-off: don't re-compute the window bounds over and over and over and over and over.
                        try {
                            add(new CollisionWindow(mz, windowHalfWidth, mzMap.ionMZToMZSources(mz).size()));
                        } catch (NoSuchElementException e) {
                            LOGGER.error("Caught no such element exception for mz %f: %s", mz, e.getMessage());
                            throw e;
                        }
                    }
                }
            };

            // Sweep line time!  The window ranges are the interesting points.  We just accumulate overlap counts as we go.
            LinkedList<CollisionWindow> workingSet = new LinkedList<>();
            List<CollisionWindow> finished = new LinkedList<>();

            while (allWindows.size() > 0) {
                CollisionWindow thisWindow = allWindows.pop();
                // Remove any windows from the working set that don't overlap with the next window.
                while (workingSet.size() > 0 && workingSet.peekFirst().getMaxMZ() < thisWindow.getMinMZ()) {
                    finished.add(workingSet.pop());
                }

                for (CollisionWindow w : workingSet) {
                    /* Add the size of the new overlapping window's structure count to each of the windows in the working set,
                     * which represents the number of possible confused structures that fall within the overlapping region.
                     * We exclude the window itself as it should already have counted the colliding structures it represents. */
                    w.getAccumulator().add(thisWindow.getStructureCount());

                    /* Reciprocally, add the structure counts of all windows with which the current window overlaps to it. */
                    thisWindow.getAccumulator().add(w.getStructureCount());
                }

                // Now that accumulation is complete, we can safely add the current window.
                workingSet.add(thisWindow);
            }

            // All the interesting events are done, so drop the remaining windows into the finished set.
            finished.addAll(workingSet);

            Map<Long, Long> collisionHistogram = histogram(
                    finished.stream().map(w -> w.getAccumulator().longValue()));
            List<Long> sortedCollisions = new ArrayList<>(collisionHistogram.keySet());
            Collections.sort(sortedCollisions);
            for (Long collision : sortedCollisions) {
                tsvWriter.append(new HashMap<String, Long>() {
                    {
                        put("collisions", collision);
                        put("count", collisionHistogram.get(collision));
                    }
                });
            }
        }
    } finally {
        if (tsvWriter != null) {
            tsvWriter.close();
        }
    }
}