Example usage for com.google.common.collect Sets newTreeSet

List of usage examples for com.google.common.collect Sets newTreeSet

Introduction

In this page you can find the example usage for com.google.common.collect Sets newTreeSet.

Prototype

public static <E extends Comparable> TreeSet<E> newTreeSet() 

Source Link

Document

Creates a mutable, empty TreeSet instance sorted by the natural sort ordering of its elements.

Usage

From source file:fr.inria.oak.paxquery.algebra.optimizer.rules.PushProjections.java

private static BaseLogicalOperator pushProjections(BaseLogicalOperator op,
        Set<ProjectColumn> columnsRequiredAbove) {
    // 1. The set of columns that the logical operator should have. It is built with the set of
    // columnsRequiredAbove and the columns referred by any predicate that the logical operator
    // would have.
    Set<ProjectColumn> columnsRequiredBelow = getRequiredInputColumns(op, columnsRequiredAbove);
    Set<ProjectColumn> columnsOperator = getOperatorColumns(op, columnsRequiredAbove, columnsRequiredBelow);
    // Update columns required above
    for (ProjectColumn column : columnsRequiredAbove) {
        if (!column.nestedColumns.isEmpty()) {
            for (ProjectColumn opColumn : columnsOperator) {
                if (column.pos == opColumn.pos) {
                    Set<ProjectColumn> nestedFields = Sets.newTreeSet();
                    for (ProjectColumn nestedField : opColumn.nestedColumns) {
                        nestedFields.add(nestedField.copy());
                    }//  w  w  w.j av  a  2s. c  o m
                    column.nestedColumns = nestedFields;
                }
            }
        }
    }

    // if (LOG.isDebugEnabled()) {
    System.out.println("op : " + op);
    System.out.println("columnsRequiredAbove : " + columnsRequiredAbove);
    System.out.println("columnsRequiredBelow : " + columnsRequiredBelow);
    System.out.println("columnsOperator : " + columnsOperator);
    // }

    // Children created by the method as we push projections.
    if (op instanceof BaseUnaryOperator) {
        BaseLogicalOperator newChild = pushProjections(op.getChildren().get(0), columnsRequiredBelow);
        LogicalPlan.connect(op, newChild);
    } else if (op instanceof BaseBinaryOperator) {
        BaseBinaryOperator binaryOp = (BaseBinaryOperator) op;
        Pair<Set<ProjectColumn>, Set<ProjectColumn>> splitFields = splitFieldRequiredForBinaryOperator(binaryOp,
                columnsRequiredBelow);
        BaseLogicalOperator newLeftChild = pushProjections(op.getChildren().get(0), splitFields.left);
        BaseLogicalOperator newRightChild = pushProjections(op.getChildren().get(1), splitFields.right);
        LogicalPlan.connect(op, newLeftChild, newRightChild);
    }

    // 2. Create a project operator on top, if necessary.
    BaseLogicalOperator returnOp = op;
    Set<Integer> columnPositionsRequiredAbove = getTopLevelSet(columnsRequiredAbove);
    Set<Integer> opFieldsPositions = getTopLevelSet(columnsOperator);
    if (returnOp.getNRSMD().colNo > columnPositionsRequiredAbove.size()) {
        int projCols[] = new int[columnPositionsRequiredAbove.size()];
        int here = 0;
        int above = 0;
        for (int i = 0; i < op.getNRSMD().colNo; i++) {
            if (columnPositionsRequiredAbove.contains(i)) {
                projCols[above++] = here++;
            } else if (opFieldsPositions.contains(i)) {
                here++;
            }
        }
        Projection proj = new Projection(returnOp, projCols);
        returnOp = proj;
    }

    // 3. Update the operator using the position mapping
    ColumnsMapping updatedColumns = obtainMapping(columnsRequiredBelow);
    updateOperatorColumns(op, updatedColumns);
    op.buildOwnDetails();

    return returnOp;
}

From source file:com.google.devtools.build.lib.buildtool.InstrumentationFilterSupport.java

private static void optimizeFilterSet(SortedSet<String> packageFilters) {
    Iterator<String> iterator = packageFilters.iterator();
    if (iterator.hasNext()) {
        // Find common parent filters to reduce number of filter expressions. In practice this
        // still produces nicely constrained instrumentation filter while making final
        // filter value much more user-friendly - especially in case of /my/package/... wildcards.
        Set<String> parentFilters = Sets.newTreeSet();
        String filterString = iterator.next();
        PathFragment parent = PathFragment.create(filterString).getParentDirectory();
        while (iterator.hasNext()) {
            String current = iterator.next();
            if (parent != null && parent.getPathString().length() > 0 && !current.startsWith(filterString)
                    && current.startsWith(parent.getPathString())) {
                parentFilters.add(parent.getPathString());
            } else {
                filterString = current;//w ww  .j  a  v  a  2s  .c  o  m
                parent = PathFragment.create(filterString).getParentDirectory();
            }
        }
        packageFilters.addAll(parentFilters);

        // Optimize away nested filters.
        iterator = packageFilters.iterator();
        String prev = iterator.next();
        while (iterator.hasNext()) {
            String current = iterator.next();
            if (current.startsWith(prev)) {
                iterator.remove();
            } else {
                prev = current;
            }
        }
    }
}

From source file:org.glowroot.local.ui.LayoutService.java

private static Layout buildLayout(String version, ConfigService configService,
        List<PluginDescriptor> pluginDescriptors, @Nullable HeapDumps heapDumps,
        long fixedAggregateIntervalSeconds, long fixedAggregateRollupSeconds, long fixedGaugeIntervalSeconds,
        long fixedGaugeRollupSeconds) {
    // use linked hash set to maintain ordering in case there is no default transaction type
    List<String> transactionTypes = Lists.newArrayList(configService.getAllTransactionTypes());
    String defaultTransactionType = configService.getDefaultTransactionType();
    List<String> orderedTransactionTypes = Lists.newArrayList();
    if (transactionTypes.isEmpty()) {
        defaultTransactionType = "NO TRANSACTION TYPES DEFINED";
    } else {//from   w  w w.  j  a  v  a  2 s  . co m
        if (!transactionTypes.contains(defaultTransactionType)) {
            defaultTransactionType = transactionTypes.iterator().next();
        }
        transactionTypes.remove(defaultTransactionType);
    }
    // add default transaction type first
    orderedTransactionTypes.add(defaultTransactionType);
    // add the rest alphabetical
    orderedTransactionTypes.addAll(Ordering.from(String.CASE_INSENSITIVE_ORDER).sortedCopy(transactionTypes));
    Set<String> transactionCustomAttributes = Sets.newTreeSet();
    for (PluginDescriptor pluginDescriptor : pluginDescriptors) {
        transactionCustomAttributes.addAll(pluginDescriptor.transactionCustomAttributes());
    }
    UserInterfaceConfig userInterfaceConfig = configService.getUserInterfaceConfig();
    return Layout.builder().jvmHeapDump(heapDumps != null).footerMessage("version " + version)
            .adminPasswordEnabled(userInterfaceConfig.adminPasswordEnabled())
            .readOnlyPasswordEnabled(userInterfaceConfig.readOnlyPasswordEnabled())
            .anonymousAccess(userInterfaceConfig.anonymousAccess())
            .addAllTransactionTypes(orderedTransactionTypes).defaultTransactionType(defaultTransactionType)
            .addAllTransactionCustomAttributes(transactionCustomAttributes)
            .fixedAggregateIntervalSeconds(fixedAggregateIntervalSeconds)
            .fixedAggregateRollupSeconds(fixedAggregateRollupSeconds)
            .fixedGaugeIntervalSeconds(fixedGaugeIntervalSeconds)
            .fixedGaugeRollupSeconds(fixedGaugeRollupSeconds).build();
}

From source file:com.google.gdt.eclipse.designer.util.ui.ResourceSelectionDialog.java

public ResourceSelectionDialog(Shell parentShell, IResourcesProvider provider,
        ModuleDescription moduleDescription, String title) throws Exception {
    super(parentShell, Activator.getDefault());
    m_provider = provider;// w  w w .jav a  2 s  .c om
    m_title = title;
    // prepare resources
    m_root = new ResourceFolder(null, null);
    {
        addWarFolder(moduleDescription);
    }
    {
        final Set<String> visitedModules = Sets.newTreeSet();
        final Set<String> visitedPackages = Sets.newTreeSet();
        ModuleVisitor.accept(moduleDescription, new ModuleVisitor() {
            private ResourceFolder m_moduleFolder;

            @Override
            public boolean visitModule(ModuleElement module) {
                String moduleName = module.getName();
                if (visitedModules.contains(moduleName)) {
                    return false;
                }
                //
                m_moduleFolder = new ResourceFolder(m_root, moduleName);
                m_root.add(m_moduleFolder);
                //
                visitedModules.add(moduleName);
                return true;
            }

            @Override
            public void visitPublicPackage(ModuleElement module, String packageName) throws Exception {
                if (!visitedPackages.contains(packageName)) {
                    visitedPackages.add(packageName);
                    String path = packageName.replace('.', '/');
                    for (String file : m_provider.listFiles(path)) {
                        m_moduleFolder.add(file, path + "/" + file);
                    }
                }
            }
        });
    }
}

From source file:com.opengamma.bbg.referencedata.cache.AbstractInvalidFieldCachingReferenceDataProvider.java

/**
 * Examines and groups the request using the known invalid fields.
 * //from   ww w .  jav  a 2s  . c o  m
 * @param request  the request, not null
 * @param invalidFieldsByIdentifier  the invalid fields, keyed by identifier, not null
 * @return the map of field-set to identifier-set, not null
 */
protected Map<Set<String>, Set<String>> buildUnderlyingRequestGroups(ReferenceDataProviderGetRequest request,
        Map<String, Set<String>> invalidFieldsByIdentifier) {
    Map<Set<String>, Set<String>> result = Maps.newHashMap();
    for (String identifier : request.getIdentifiers()) {
        // select known invalid fields for the identifier
        Set<String> invalidFields = invalidFieldsByIdentifier.get(identifier);

        // calculate the missing fields that must be queried from the underlying
        Set<String> missingFields = null;
        if (invalidFields == null) {
            missingFields = Sets.newHashSet(request.getFields());
        } else {
            missingFields = Sets.difference(request.getFields(), invalidFields);
        }

        // build the grouped result map, keyed from field-set to identifier-set
        Set<String> resultIdentifiers = result.get(missingFields);
        if (resultIdentifiers == null) {
            resultIdentifiers = Sets.newTreeSet();
            result.put(missingFields, resultIdentifiers);
        }
        resultIdentifiers.add(identifier);
    }
    return result;
}

From source file:uk.ac.ebi.spot.rdf.model.baseline.ExperimentalFactors.java

public SortedSet<AssayGroupFactor> getFilteredAssayGroupFactors(final Set<Factor> filterFactors) {

    SortedSet<AssayGroupFactor> result = Sets.newTreeSet();

    for (String groupId : orderedFactorGroupsByAssayGroupId.keySet()) {
        List<Factor> remainingFactors;

        if (CollectionUtils.isNotEmpty(filterFactors)) {
            remainingFactors = orderedFactorGroupsByAssayGroupId.get(groupId).remove(filterFactors);
        } else {/*from   w  w w.  java2 s  .com*/
            remainingFactors = Lists.newArrayList(orderedFactorGroupsByAssayGroupId.get(groupId).iterator());
        }
        if (remainingFactors.size() == 1) {
            result.add(new AssayGroupFactor(groupId, remainingFactors.get(0)));
        }
    }

    return result;
}

From source file:org.commoncrawl.mapred.ec2.postprocess.linkCollector.LinkGraphDataEmitterJob.java

private static SortedSet<Long> scanForValidSegments(FileSystem fs) throws IOException {
    SortedSet<Long> completeSegmentIds = Sets.newTreeSet();

    for (FileStatus fileStatus : fs.globStatus(new Path(VALID_SEGMENTS_PATH + "[0-9]*"))) {
        completeSegmentIds.add(Long.parseLong(fileStatus.getPath().getName()));
    }//from w  w  w. j ava 2 s .  c  o m
    return completeSegmentIds;
}

From source file:org.apache.lens.cube.parse.AbridgedTimeRangeWriter.java

/**
 * parts is a collection of FactPartition objects. And FactPartition can be viewed as two boolean conditions, one
 * specified by it's containingPart object, and another specified by itself in the form (partCol = partSpec)
 * <p/>/*  w  w w  .  j  av a  2s .  c o  m*/
 * Collection of FactPartition objects can be viewed as an OR clause on all the FactPartition objects -- which by
 * itself is a binary AND clause.
 * <p/>
 * So Collection&lt;FactPartition&gt; is nothing but (a AND b) OR (c AND d) OR (e AND f) ...
 * <p/>
 * This function tries to reduce such a big clause by using Boolean arithmetic. The big thing it aims to reduce is the
 * following class of clauses:
 * <p/>
 * (a AND c) OR (a AND d) OR (b AND c) OR (b AND d) => ((a OR b) AND (c OR d))
 * <p/>
 * Equivalent return value for such a reduction would be an entry in the returned map from set(a,b) to set(c,d).
 * Assuming the argument was set(a(containing=c), a(containing=d), b(containing=c), b(containing=d))
 *
 * @param parts
 * @return
 */
private Map<Set<FactPartition>, Set<FactPartition>> groupPartitions(Collection<FactPartition> parts) {
    Map<FactPartition, Set<FactPartition>> partitionSetMap = new HashMap<FactPartition, Set<FactPartition>>();
    for (FactPartition part : parts) {
        partitionSetMap.computeIfAbsent(part.getContainingPart(), k -> Sets.newTreeSet())
                .add(part.withoutContaining());
    }
    Map<Set<FactPartition>, Set<FactPartition>> setSetOppositeMap = Maps.newHashMap();
    for (Map.Entry<FactPartition, Set<FactPartition>> entry : partitionSetMap.entrySet()) {
        setSetOppositeMap.computeIfAbsent(entry.getValue(), k -> Sets.newTreeSet());
        if (entry.getKey() != null) {
            setSetOppositeMap.get(entry.getValue()).add(entry.getKey());
        }
    }
    // inverse again
    return setSetOppositeMap.entrySet().stream().collect(toMap(Map.Entry::getValue, Map.Entry::getKey));
}

From source file:com.greplin.lucene.filter.PhraseFilter.java

@Override
public DocIdSet getDocIdSet(final IndexReader reader) throws IOException {
    List<IndexReader> subReaders = IndexReaders.gatherSubReaders(reader);
    PhraseFilterMatchList[] results = new PhraseFilterMatchList[subReaders.size()];
    int matchCount = 0;
    int readerNumber = 0;

    for (IndexReader subReader : subReaders) {
        SortedSet<TermWithFrequency> termsOrderedByFrequency = Sets.newTreeSet();
        for (int i = 0; i < this.terms.length; i++) {
            Term t = this.terms[i];
            termsOrderedByFrequency.add(new TermWithFrequency(t, subReader.docFreq(t), i));
        }//  w w w .  ja  va2s  .  co m

        PhraseFilterMatchList matches = null;
        TermPositions termPositions = subReader.termPositions();
        try {
            for (TermWithFrequency term : termsOrderedByFrequency) {
                if (term.docFreq == 0) {
                    break;
                }

                termPositions.seek(term.term);

                if (matches == null) {
                    // If this is the first term, collect all matches that intersect
                    // with the provided initial document set.
                    Intersection intersection = this.intersectionProvider.get(reader);

                    matches = new PhraseFilterMatchList(term.docFreq);
                    while (intersection.advanceToNextIntersection(termPositions)) {
                        int freq = termPositions.freq();
                        PhraseFilterIntList list = new PhraseFilterIntList(freq);
                        for (int i = 0; i < freq; i++) {
                            list.add(termPositions.nextPosition() - term.offset);
                        }
                        matches.add(termPositions.doc(), list);
                    }
                } else {
                    // Otherwise, intersect with the existing matches.
                    matches.intersect(termPositions, term.offset);
                }

                if (matches.getCount() == 0) {
                    break;
                }
            }
        } finally {
            termPositions.close();
        }

        if (matches != null) {
            results[readerNumber] = matches;
            matchCount += matches.getCount();
        }
        readerNumber++;
    }

    final int bitsPerIntPowerLogTwo = 5; // 2^5 = 32
    if (matchCount > reader.maxDoc() >> bitsPerIntPowerLogTwo) {
        FixedBitSet result = new FixedBitSet(reader.maxDoc());
        int readerOffset = 0;
        for (int readerIndex = 0; readerIndex < results.length; readerIndex++) {
            PhraseFilterMatchList matches = results[readerIndex];
            if (matches != null) {
                int count = matches.getCount();
                int[] docIds = matches.getDocIds();
                for (int i = 0; i < count; i++) {
                    result.set(docIds[i] + readerOffset);
                }
            }
            readerOffset += subReaders.get(readerIndex).maxDoc();
        }
        return result;
    } else if (matchCount == 0) {
        return DocIdSets.EMPTY;
    } else {
        int[] result = new int[matchCount];
        int base = 0;
        int readerOffset = 0;
        for (int readerIndex = 0; readerIndex < results.length; readerIndex++) {
            PhraseFilterMatchList matches = results[readerIndex];
            if (matches != null) {
                int count = matches.getCount();
                int[] docIds = matches.getDocIds();
                for (int i = 0; i < count; i++) {
                    result[base + i] = docIds[i] + readerOffset;
                }
                base += count;
            }
            readerOffset += subReaders.get(readerIndex).maxDoc();
        }
        return new SortedIntArrayDocIdSet(result);
    }
}

From source file:org.estatio.fixturescripts.CreateRetroInvoices.java

@Programmatic
public SortedSet<LocalDate> findDueDatesForLease(LocalDate startDueDate, LocalDate nextDueDate, Lease lease) {
    final SortedSet<LocalDate> dates = Sets.newTreeSet();
    for (LeaseItem leaseItem : lease.getItems()) {
        dates.addAll(findDueDatesForLeaseItem(startDueDate, nextDueDate, leaseItem));
    }/*from   w w w . ja v  a2s .  c  o m*/
    return dates;
}