Example usage for java.util Set size

List of usage examples for java.util Set size

Introduction

In this page you can find the example usage for java.util Set size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this set (its cardinality).

Usage

From source file:com.espertech.esper.epl.core.ResultSetProcessorSimple.java

/**
 * Applies the select-clause to the given events returning the selected events. The number of events stays the
 * same, i.e. this method does not filter it just transforms the result set.
 * @param exprProcessor - processes each input event and returns output event
 * @param orderByProcessor - for sorting output events according to the order-by clause
 * @param events - input events/*w w  w  .  j  av a  2  s .c  o  m*/
 * @param isNewData - indicates whether we are dealing with new data (istream) or old data (rstream)
 * @param isSynthesize - set to true to indicate that synthetic events are required for an iterator result set
 * @param result is the result event list to populate
 * @param optSortKeys is the result sort key list to populate, for sorting
 * @param exprEvaluatorContext context for expression evalauation
 */
protected static void getSelectEventsNoHaving(SelectExprProcessor exprProcessor,
        OrderByProcessor orderByProcessor, Set<MultiKey<EventBean>> events, boolean isNewData,
        boolean isSynthesize, List<EventBean> result, List<Object> optSortKeys,
        ExprEvaluatorContext exprEvaluatorContext) {
    int length = (events != null) ? events.size() : 0;
    if (length == 0) {
        return;
    }

    for (MultiKey<EventBean> key : events) {
        EventBean[] eventsPerStream = key.getArray();
        result.add(exprProcessor.process(eventsPerStream, isNewData, isSynthesize, exprEvaluatorContext));
        if (orderByProcessor != null) {
            optSortKeys.add(orderByProcessor.getSortKey(eventsPerStream, isNewData, exprEvaluatorContext));
        }
    }
}

From source file:com.buaa.cfs.utils.NetUtils.java

/**
 * This is used to get all the resolutions that were added using {@link NetUtils#addStaticResolution(String,
 * String)}. The return value is a List each element of which contains an array of String of the form
 * String[0]=hostname, String[1]=resolved-hostname
 *
 * @return the list of resolutions/*w w w .  j  av  a2s  .  co m*/
 */
public static List<String[]> getAllStaticResolutions() {
    synchronized (hostToResolved) {
        Set<Entry<String, String>> entries = hostToResolved.entrySet();
        if (entries.size() == 0) {
            return null;
        }
        List<String[]> l = new ArrayList<String[]>(entries.size());
        for (Entry<String, String> e : entries) {
            l.add(new String[] { e.getKey(), e.getValue() });
        }
        return l;
    }
}

From source file:edu.wpi.checksims.algorithm.similaritymatrix.SimilarityMatrix.java

/**
 * Generate a similarity matrix from a given set of submissions.
 *
 * @param inputSubmissions Submissions to generate from
 * @param results Results to build from. Must contain results for every possible unordered pair of input submissions
 * @return Similarity Matrix built from given results
 * @throws InternalAlgorithmError Thrown on missing results, or results containing a submission not in the input
 *//*from w  w  w  .  jav a  2  s.c o m*/
public static SimilarityMatrix generateMatrix(Set<Submission> inputSubmissions, Set<AlgorithmResults> results)
        throws InternalAlgorithmError {
    checkNotNull(inputSubmissions);
    checkNotNull(results);
    checkArgument(!inputSubmissions.isEmpty(), "Must provide at least 1 submission to build matrix from");
    checkArgument(!results.isEmpty(), "Must provide at least 1 AlgorithmResults to build matrix from!");

    // Generate the matrix we'll use
    MatrixEntry[][] matrix = new MatrixEntry[inputSubmissions.size()][inputSubmissions.size()];

    // Order the submissions
    List<Submission> orderedSubmissions = Ordering.natural().immutableSortedCopy(inputSubmissions);

    // Generate the matrix

    // Start with the diagonal, filling with 100% similarity
    for (int i = 0; i < orderedSubmissions.size(); i++) {
        Submission s = orderedSubmissions.get(i);

        matrix[i][i] = new MatrixEntry(s, s, s.getNumTokens());
    }

    // Now go through all the results, and build appropriate two MatrixEntry objects for each
    for (AlgorithmResults result : results) {
        int aIndex = orderedSubmissions.indexOf(result.a);
        int bIndex = orderedSubmissions.indexOf(result.b);

        if (aIndex == -1) {
            throw new InternalAlgorithmError(
                    "Processed Algorithm Result with submission not in given input submissions with name \""
                            + result.a.getName() + "\"");
        } else if (bIndex == -1) {
            throw new InternalAlgorithmError(
                    "Processed Algorithm Result with submission not in given input submissions with name \""
                            + result.b.getName() + "\"");
        }

        matrix[aIndex][bIndex] = new MatrixEntry(result.a, result.b, result.identicalTokensA);
        matrix[bIndex][aIndex] = new MatrixEntry(result.b, result.a, result.identicalTokensB);
    }

    // Verification pass: Go through and ensure that the entire array was populated
    for (int x = 0; x < orderedSubmissions.size(); x++) {
        for (int y = 0; y < orderedSubmissions.size(); y++) {
            if (matrix[x][y] == null) {
                throw new InternalAlgorithmError("Missing Algorithm Results for comparison of submissions \""
                        + orderedSubmissions.get(x).getName() + "\" and \""
                        + orderedSubmissions.get(y).getName() + "\"");
            }
        }
    }

    return new SimilarityMatrix(matrix, orderedSubmissions, orderedSubmissions, results);
}

From source file:br.bireme.tb.URLS.java

public static void generateFileStructure(final String url, final String rootDir) throws IOException {
    if (url == null) {
        throw new NullPointerException("url");
    }/*  w w  w. ja va  2s.c o  m*/
    if (rootDir == null) {
        throw new NullPointerException("rootDir");
    }
    if (url.trim().endsWith(".def")) {
        throw new NullPointerException("initial url file can not be a def file.");
    }
    final File root = new File(rootDir);

    if (root.exists() && (!Utils.deleteFile(root))) {
        final String msg = "Directory [" + root.getAbsolutePath() + "] creation error.";
        Logger.getLogger(Logger.GLOBAL_LOGGER_NAME).severe(msg);
        throw new IOException(msg);
    }
    if (!root.mkdirs()) {
        final String msg = "Directory [" + root.getAbsolutePath() + "] creation error.";
        Logger.getLogger(Logger.GLOBAL_LOGGER_NAME).severe(msg);
        throw new IOException(msg);
    }

    System.out.println("Searching cvs files\n");
    final Set<String> files = generateCells(url, root);
    System.out.println("Total cell files created: " + files.size());

    try {
        createAllSitemap(files, root);
    } catch (IOException ioe) {
        Logger.getLogger(Logger.GLOBAL_LOGGER_NAME).log(Level.SEVERE, "Sitemap file creation error.", ioe);
    }

    try (final BufferedWriter writer = new BufferedWriter(new FileWriter(new File(root, "index.html")))) {
        writer.append("<!DOCTYPE html>\n");
        writer.append("<html>\n");
        writer.append(" <head>\n");
        writer.append(" <meta charset=\"UTF-8\">\n");
        writer.append(" </head>\n");
        writer.append(" <body>\n");
        writer.append(" <h1>Fichas de Qualificao</h1>\n");
        writer.append(" <ul>\n");
        for (String path : files) {
            writer.append(" <li>\n");
            writer.append(" <a href=\"" + path + "\">" + path + "</a>\n");
            writer.append(" </li>\n");
        }
        writer.append(" </ul>\n");
        writer.append(" </body>\n");
        writer.append("</html>\n");
    } catch (IOException ioe) {
        Logger.getLogger(Logger.GLOBAL_LOGGER_NAME).log(Level.SEVERE, "Index file creation error.", ioe);
    }

    System.out.println("Files saved at: " + root.getAbsolutePath());
}

From source file:com.espertech.esper.epl.core.ResultSetProcessorSimple.java

/**
 * Applies the select-clause to the given events returning the selected events. The number of events stays the
 * same, i.e. this method does not filter it just transforms the result set.
 * @param exprProcessor - processes each input event and returns output event
 * @param events - input events//from   w ww  .j  a  v  a 2s .  co  m
 * @param isNewData - indicates whether we are dealing with new data (istream) or old data (rstream)
 * @param isSynthesize - set to true to indicate that synthetic events are required for an iterator result set
 * @return output events, one for each input event
 */
protected static EventBean[] getSelectEventsNoHaving(SelectExprProcessor exprProcessor,
        Set<MultiKey<EventBean>> events, boolean isNewData, boolean isSynthesize,
        ExprEvaluatorContext exprEvaluatorContext) {
    if ((events == null) || (events.isEmpty())) {
        return null;
    }
    int length = events.size();

    EventBean[] result = new EventBean[length];

    int count = 0;
    for (MultiKey<EventBean> key : events) {
        EventBean[] eventsPerStream = key.getArray();
        result[count] = exprProcessor.process(eventsPerStream, isNewData, isSynthesize, exprEvaluatorContext);
        count++;
    }

    return result;
}

From source file:de.tudarmstadt.ukp.dkpro.tc.core.util.ReportUtils.java

/**
 * Converts a map containing a matrix into a matrix
 * //from w  w  w. ja va  2s . c  om
 * @param aggregateMap
 *            a map created with {@link ReportUtils#updateAggregateMatrix(Map, File)}
 * @see ReportUtils#updateAggregateMatrix(Map, File)
 * @return a table with the matrix
 */
public static FlexTable<String> createOverallConfusionMatrix(Map<List<String>, Double> aggregateMap) {
    FlexTable<String> cMTable = FlexTable.forClass(String.class);
    cMTable.setSortRows(false);

    Set<String> labelsPred = new TreeSet<String>();
    Set<String> labelsAct = new TreeSet<String>();

    // sorting rows/columns
    for (List<String> key : aggregateMap.keySet()) {
        labelsPred.add(key.get(0).substring(0, key.get(0).indexOf(Constants.CM_PREDICTED)));
        labelsAct.add(key.get(1).substring(0, key.get(1).indexOf(Constants.CM_ACTUAL)));
    }

    List<String> labelsPredL = new ArrayList<String>(labelsPred);
    List<String> labelsActL = new ArrayList<String>(labelsAct);

    // create temporary matrix
    double[][] tempM = new double[labelsAct.size()][labelsPred.size()];
    for (List<String> key : aggregateMap.keySet()) {
        int c = labelsPredL.indexOf(key.get(0).substring(0, key.get(0).indexOf(Constants.CM_PREDICTED)));
        int r = labelsActL.indexOf(key.get(1).substring(0, key.get(1).indexOf(Constants.CM_ACTUAL)));
        tempM[r][c] = aggregateMap.get(key);
    }

    // convert to FlexTable
    for (int i = 0; i < tempM.length; i++) {
        LinkedHashMap<String, String> row = new LinkedHashMap<String, String>();
        for (int r = 0; r < tempM[0].length; r++) {
            row.put(labelsPredL.get(r) + " " + Constants.CM_PREDICTED, String.valueOf(tempM[i][r]));
        }
        cMTable.addRow(labelsActL.get(i) + " " + Constants.CM_ACTUAL, row);
    }

    return cMTable;
}

From source file:com.taobao.android.tpatch.utils.JarSplitUtils.java

public static void splitZipToFolder(File inputFile, File outputFolder, Set<String> includeEnties)
        throws IOException {
    if (!outputFolder.exists()) {
        outputFolder.mkdirs();//from w w  w.  jav  a 2s . c  om
    }
    if (null == includeEnties || includeEnties.size() < 1) {
        return;
    }
    final byte[] buffer = new byte[8192];
    FileInputStream fis = new FileInputStream(inputFile);
    ZipInputStream zis = new ZipInputStream(fis);

    try {
        // loop on the entries of the jar file package and put them in the final jar
        ZipEntry entry;
        while ((entry = zis.getNextEntry()) != null) {
            // do not take directories or anything inside a potential META-INF folder.
            if (entry.isDirectory()) {
                continue;
            }
            String name = entry.getName();
            if (!includeEnties.contains(name)) {
                continue;
            }
            File destFile = new File(outputFolder, name);
            destFile.getParentFile().mkdirs();
            // read the content of the entry from the input stream, and write it into the archive.
            int count;
            FileOutputStream fout = FileUtils.openOutputStream(destFile);
            while ((count = zis.read(buffer)) != -1) {
                fout.write(buffer, 0, count);
            }
            fout.close();
            zis.closeEntry();
        }
    } finally {
        zis.close();
    }
    fis.close();
}

From source file:edu.wpi.checksims.algorithm.smithwaterman.SmithWatermanAlgorithm.java

/**
 * Get the closest coordinate to the origin from a given set.
 *
 * @param coordinates Coordinates to search within
 * @return Closest coordinate to origin --- (0,0)
 *///  ww w .  ja va  2s.  c  om
static Coordinate getFirstMatchCoordinate(Set<Coordinate> coordinates) {
    checkNotNull(coordinates);
    checkArgument(!coordinates.isEmpty(), "Cannot get first match coordinate as match set is empty!");

    if (coordinates.size() == 1) {
        return Iterables.get(coordinates, 0);
    }

    Coordinate candidate = Iterables.get(coordinates, 0);

    // Search for a set of coordinates closer to the origin
    for (Coordinate coord : coordinates) {
        if (coord.getX() <= candidate.getX() && coord.getY() <= candidate.getY()) {
            candidate = coord;
        }
    }

    return candidate;
}

From source file:com.espertech.esper.epl.core.ResultSetProcessorSimple.java

/**
 * Applies the select-clause to the given events returning the selected events. The number of events stays the
 * same, i.e. this method does not filter it just transforms the result set.
 * @param exprProcessor - processes each input event and returns output event
 * @param orderByProcessor - for sorting output events according to the order-by clause
 * @param events - input events//from w  w  w  . ja  v  a 2 s  . co  m
 * @param isNewData - indicates whether we are dealing with new data (istream) or old data (rstream)
 * @param isSynthesize - set to true to indicate that synthetic events are required for an iterator result set
 * @param exprEvaluatorContext context for expression evalauation
 * @return output events, one for each input event
 */
protected static EventBean[] getSelectEventsNoHaving(SelectExprProcessor exprProcessor,
        OrderByProcessor orderByProcessor, Set<MultiKey<EventBean>> events, boolean isNewData,
        boolean isSynthesize, ExprEvaluatorContext exprEvaluatorContext) {
    if ((events == null) || (events.isEmpty())) {
        return null;
    }
    int length = events.size();

    EventBean[] result = new EventBean[length];
    EventBean[][] eventGenerators = null;
    if (orderByProcessor != null) {
        eventGenerators = new EventBean[length][];
    }

    int count = 0;
    for (MultiKey<EventBean> key : events) {
        EventBean[] eventsPerStream = key.getArray();
        result[count] = exprProcessor.process(eventsPerStream, isNewData, isSynthesize, exprEvaluatorContext);
        if (orderByProcessor != null) {
            eventGenerators[count] = eventsPerStream;
        }
        count++;
    }

    if (orderByProcessor != null) {
        return orderByProcessor.sort(result, eventGenerators, isNewData, exprEvaluatorContext);
    } else {
        return result;
    }
}

From source file:com.espertech.esper.epl.join.plan.NStreamOuterQueryPlanBuilder.java

/**
 * Recusivly builds a substream-per-stream ordered tree graph using the
 * join information supplied for outer joins and from the query graph (where clause).
 * <p>/*from   w  w  w .  jav  a2s  . c om*/
 * Required streams are considered first and their lookup is placed first in the list
 * to gain performance.
 * @param streamNum is the root stream number that supplies the incoming event to build the tree for
 * @param queryGraph contains where-clause stream relationship info
 * @param completedStreams is a temporary holder for streams already considered
 * @param substreamsPerStream is the ordered, tree-like structure to be filled
 * @param streamCallStack the query plan call stack of streams available via cursor
 * @param dependencyGraph - dependencies between historical streams
 * @throws ExprValidationException if the query planning failed
 */
protected static void recursiveBuildInnerJoin(int streamNum, Stack<Integer> streamCallStack,
        QueryGraph queryGraph, Set<Integer> completedStreams, LinkedHashMap<Integer, int[]> substreamsPerStream,
        DependencyGraph dependencyGraph) throws ExprValidationException {
    // add this stream to the set of completed streams
    completedStreams.add(streamNum);

    // check if the dependencies have been satisfied
    if (dependencyGraph.hasDependency(streamNum)) {
        Set<Integer> dependencies = dependencyGraph.getDependenciesForStream(streamNum);
        for (Integer dependentStream : dependencies) {
            if (!streamCallStack.contains(dependentStream)) {
                throw new ExprValidationException(
                        "Historical stream " + streamNum + " parameter dependency originating in stream "
                                + dependentStream + " cannot or may not be satisfied by the join");
            }
        }
    }

    // Determine the streams we can navigate to from this stream
    Set<Integer> navigableStreams = queryGraph.getNavigableStreams(streamNum);

    // remove streams with a dependency on other streams not yet processed
    Integer[] navigableStreamArr = navigableStreams.toArray(new Integer[navigableStreams.size()]);
    for (int navigableStream : navigableStreamArr) {
        if (dependencyGraph.hasUnsatisfiedDependency(navigableStream, completedStreams)) {
            navigableStreams.remove(navigableStream);
        }
    }

    // remove those already done
    navigableStreams.removeAll(completedStreams);

    // if we are a leaf node, we are done
    if (navigableStreams.isEmpty()) {
        substreamsPerStream.put(streamNum, new int[0]);
        return;
    }

    // First the outer (required) streams to this stream, then the inner (optional) streams
    int[] substreams = new int[navigableStreams.size()];
    substreamsPerStream.put(streamNum, substreams);
    int count = 0;
    for (int stream : navigableStreams) {
        substreams[count++] = stream;
        completedStreams.add(stream);
    }

    for (int stream : navigableStreams) {
        streamCallStack.push(stream);
        recursiveBuildInnerJoin(stream, streamCallStack, queryGraph, completedStreams, substreamsPerStream,
                dependencyGraph);
        streamCallStack.pop();
    }
}