Example usage for java.util Collection removeAll

List of usage examples for java.util Collection removeAll

Introduction

In this page you can find the example usage for java.util Collection removeAll.

Prototype

boolean removeAll(Collection<?> c);

Source Link

Document

Removes all of this collection's elements that are also contained in the specified collection (optional operation).

Usage

From source file:org.topazproject.otm.impl.SessionImpl.java

private <T> void write(Id id, T o, boolean delete) throws OtmException {
    ClassMetadata cm = sessionFactory.getInstanceMetadata(id.getClassMetadata(), getEntityMode(), o);
    TripleStore store = sessionFactory.getTripleStore();
    SearchStore ss = sessionFactory.getSearchStore();
    boolean bf = (cm.getBlobField() != null);
    boolean tp = (cm.getRdfMappers().size() + cm.getAllTypes().size()) > 0;

    if (delete) {
        if (log.isDebugEnabled())
            log.debug("deleting from store: " + id);

        if (interceptor != null)
            interceptor.onPreDelete(this, cm, id.getId(), o);
        states.remove(o);//from w w  w .jav  a2s  .  com
        if (bf) {
            Blob blob = getBlob(cm, id.toString(), o);
            blob.delete();
            updateBlobSearch(ss, blob, cm, id, o);
        }
        if (tp) {
            updateTripleSearch(ss, cm, cm.getRdfMappers(), id, o, true);
            store.delete(cm, cm.getRdfMappers(), id.getId(), o, getTripleStoreCon());
        }
        if (interceptor != null)
            interceptor.onPostDelete(this, cm, id.getId(), o);
    } else if (isPristineProxy(id, o)) {
        if (log.isDebugEnabled())
            log.debug("Update skipped for " + id + ". This is a proxy object and is not even loaded.");
    } else {
        Interceptor.Updates updates = states.update(o, cm, this);
        Collection<RdfMapper> fields;
        boolean firstTime = (updates == null);
        if (firstTime)
            fields = cm.getRdfMappers();
        else
            fields = updates.pmapChanged ? cm.getRdfMappers() : updates.rdfMappers;

        int nFields = fields.size();
        if (log.isDebugEnabled()) {
            if (firstTime)
                log.debug("Saving " + id + " to store.");
            else if (nFields == cm.getRdfMappers().size())
                log.debug("Full update for " + id + ".");
            else if (nFields == 0)
                log.debug("Update skipped for " + id + ". No changes to the object state.");
            else {
                Collection<RdfMapper> skips = new ArrayList<RdfMapper>(cm.getRdfMappers());
                skips.removeAll(fields);
                StringBuilder buf = new StringBuilder(100);
                char sep = ' ';
                for (RdfMapper m : skips) {
                    buf.append(sep).append(m.getName());
                    sep = ',';
                }
                log.debug("Partial update for " + id + ". Skipped:" + buf);
            }
        }

        boolean wroteSomething = false;
        if (tp && (firstTime || (nFields > 0))) {
            updateTripleSearch(ss, cm, fields, id, o, true);
            store.delete(cm, fields, id.getId(), o, getTripleStoreCon());
            store.insert(cm, fields, id.getId(), o, getTripleStoreCon());
            updateTripleSearch(ss, cm, fields, id, o, false);
            wroteSomething = true;
        }
        if (bf) {
            Blob blob = getBlob(cm, id.toString(), o);
            switch (states.digestUpdate(o, cm, this)) {
            case delete:
                blob.delete();
                break;
            case update:
            case insert:
                PropertyBinder binder = cm.getBlobField().getBinder(getEntityMode());
                byte[] b = binder.getStreamer().getBytes(binder, o);
                blob.writeAll(b);
                break;
            }

            if (blob.mark() == Blob.ChangeState.NONE)
                bf = false;
            else
                updateBlobSearch(ss, blob, cm, id, o);

            if (updates != null)
                updates.blobChanged = bf;
            if (bf)
                wroteSomething = true;
        }
        if ((interceptor != null) && wroteSomething)
            interceptor.onPostWrite(this, cm, id.getId(), o, updates);
    }
}

From source file:com.evolveum.midpoint.repo.sql.helpers.LookupTableHelper.java

public <T extends ObjectType> Collection<? extends ItemDelta> filterLookupTableModifications(Class<T> type,
        Collection<? extends ItemDelta> modifications) {
    Collection<ItemDelta> tableDelta = new ArrayList<>();
    if (!LookupTableType.class.equals(type)) {
        return tableDelta;
    }//from  w  ww .  j a  v  a2  s.c  om

    ItemPath tablePath = new ItemPath(LookupTableType.F_ROW);
    for (ItemDelta delta : modifications) {
        ItemPath path = delta.getPath();
        if (path.isEmpty()) {
            throw new UnsupportedOperationException(
                    "Lookup table cannot be modified via empty-path modification");
        } else if (path.equivalent(tablePath)) {
            tableDelta.add(delta);
        } else if (path.isSuperPath(tablePath)) {
            // todo - what about modifications with path like table[id] or table[id]/xxx where xxx=key|value|label?
            throw new UnsupportedOperationException(
                    "Lookup table row can be modified only by specifying path=table");
        }
    }

    modifications.removeAll(tableDelta);

    return tableDelta;
}

From source file:be.ac.ua.comp.scarletnebula.core.CloudProvider.java

/**
 * @return A collection of all keys that exist on the provider side but not
 *         locally for this provider./*from  w ww.  j  a v a2  s  .  c  om*/
 * @throws InternalException
 * @throws CloudException
 */
public Collection<String> getUnknownKeys() throws InternalException, CloudException {
    final ShellKeySupport shellKeySupport = providerImpl.getIdentityServices().getShellKeySupport();

    final Collection<String> keys = shellKeySupport.list();
    final Collection<String> knownKeys = KeyManager.getKeyNames(getName());
    keys.removeAll(knownKeys);
    return keys;
}

From source file:org.kuali.kra.lookup.KraDocSearchCriteriaDTOLookupableHelperServiceImpl.java

protected void filterOutProtocolNotificationDocument(Collection resultTable,
        List<DocumentSearchResult> docSearchResults) {
    List<ResultRow> notificationResultRows = new ArrayList<ResultRow>();
    List<DocumentSearchResult> notificationSearchResults = new ArrayList<DocumentSearchResult>();
    String titleForComparison = null;
    LOG.info("begin filter notification " + docSearchResults.size());
    int index = 0;
    for (DocumentSearchResult docSearchResult : docSearchResults) {
        if ("Notification".equals(docSearchResult.getResultContainer("docTypeLabel").getUserDisplayValue())) {
            titleForComparison = docSearchResult.getResultContainer(DOCUMENT_TITLE_FIELD).getUserDisplayValue();
            if (StringUtils.isNotBlank(titleForComparison)
                    && isProtocolActionNotification(titleForComparison)) {
                notificationResultRows.add(((List<ResultRow>) resultTable).get(index));
                notificationSearchResults.add(docSearchResult);
            }/*from   w  w w .  j a  v a  2  s .  c  o m*/
        }
        index++;
    }
    resultTable.removeAll(notificationResultRows);
    docSearchResults.removeAll(notificationSearchResults);
    LOG.info("end filter notification " + docSearchResults.size());
}

From source file:edu.purdue.cc.bionet.ui.ClusteringDisplayPanel.java

public void setSampleGroups(Collection<SampleGroup> sampleGroups) {
    Component frame = this;
    while (!(frame instanceof Frame) && frame != null) {
        frame = frame.getParent();/*from w w w . j a va  2  s  . c  o m*/
    }
    ClusterSelectionDialog dialog = new ClusterSelectionDialog((Frame) frame,
            Settings.getLanguage().get("Choose Clustering Method"), this.getSampleGroups() != null);

    if (dialog.getReturnValue() == null) {
        return;
    }
    super.setSampleGroups(sampleGroups);
    Collection<Molecule> molecules;
    try {
        ClusterSelectorTreePanel clusterPanel = (ClusterSelectorTreePanel) this.selectorPanel.getComponent(0);
        molecules = clusterPanel.getCheckedMolecules();
    } catch (ArrayIndexOutOfBoundsException e) {
        molecules = this.molecules;
    } catch (ClassCastException e) {
        molecules = this.molecules;
        Logger.getLogger(this.getClass()).error(e, e);
    }
    // clear the panels and set new layouts based on the new groups.
    this.selectorPanel.removeAll();
    this.clusterGraphPanel.removeAll();
    int rows = (int) Math.ceil(Math.sqrt(sampleGroups.size()));
    int cols = (int) Math.ceil(sampleGroups.size() / rows);
    GridLayout layout = (GridLayout) this.selectorPanel.getLayout();
    layout.setRows(rows);
    layout.setColumns(cols);
    layout = (GridLayout) this.clusterGraphPanel.getLayout();
    layout.setRows(rows);
    layout.setColumns(cols);

    // start the clusterer(s) in parallel.
    Map<Thread, RunnableClusterer> clusterers = new HashMap<Thread, RunnableClusterer>();
    for (SampleGroup group : sampleGroups) {
        RunnableClusterer clusterer = new RunnableClusterer(dialog.getReturnValue());
        SampleGroup filteredGroup = new SampleGroup(group);
        filteredGroup.retainAll(this.sampleSelectorTree.getSamples());
        clusterer.setDataset(this.getDataset(molecules, filteredGroup));
        Thread thread = new Thread(clusterer);
        thread.start();
        clusterers.put(thread, clusterer);
    }

    // iterate through each group and join the threads
    Iterator<SampleGroup> groupIter = sampleGroups.iterator();
    Collection<ClusterSelectorTreePanel> clusterTreeList = new ArrayList<ClusterSelectorTreePanel>();
    this.clusterGraphPanel.removeAll();
    for (Map.Entry<Thread, RunnableClusterer> clusterer : clusterers.entrySet()) {
        try {
            clusterer.getKey().join();
            Dataset[] result = clusterer.getValue().getResult();
            Collection<Collection<Molecule>> clusters = new TreeSet<Collection<Molecule>>(
                    new DatasetComparator());
            Collection<Molecule> unclustered = new ArrayList<Molecule>(this.molecules);
            for (Dataset dataset : result) {
                Collection<Molecule> cluster = this.getMoleculesForDataset(dataset);
                clusters.add(cluster);
                unclustered.removeAll(cluster);
            }

            // create a new clustertree and add the appropriate listeners
            ClusterSelectorTreePanel clusterTree = new ClusterSelectorTreePanel(clusters);
            if (unclustered.size() > 0) {
                clusterTree.add(unclustered,
                        String.format(Settings.getLanguage().get("Unclustered") + " (%d)", unclustered.size()),
                        false);
            }
            for (ClusterSelectorTreePanel tree : clusterTreeList) {
                tree.addTreeCheckingListener(clusterTree);
                clusterTree.addTreeCheckingListener(tree);
            }
            clusterTreeList.add(clusterTree);

            this.selectorPanel.add(clusterTree);
            ClusterGraph graph = new ClusterGraph(this.sampleSelectorTree, clusterTree, groupIter.next());
            this.clusterGraphPanel.add(graph);
            graph.setMeanGraph(clusterTree.getRoot());
        } catch (InterruptedException e) {
            Logger.getLogger(this.getClass()).debug(e, e);
        }
    }
    this.removeSampleGroupsMenuItem.setEnabled(sampleGroups.size() > 1);
    this.validate();
}

From source file:org.apache.hadoop.mapred.CapacityTaskScheduler.java

@Override
public synchronized Collection<JobInProgress> getJobs(String queueName) {
    Collection<JobInProgress> jobCollection = new ArrayList<JobInProgress>();
    CapacitySchedulerQueue queue = queueInfoMap.get(queueName);
    Collection<JobInProgress> runningJobs = queue.getRunningJobs();
    jobCollection.addAll(queue.getInitializingJobs());
    if (runningJobs != null) {
        jobCollection.addAll(runningJobs);
    }//from  ww w.j  a va 2 s.  co m
    Collection<JobInProgress> waitingJobs = queue.getWaitingJobs();
    Collection<JobInProgress> tempCollection = new ArrayList<JobInProgress>();
    if (waitingJobs != null) {
        tempCollection.addAll(waitingJobs);
    }
    tempCollection.removeAll(runningJobs);
    if (!tempCollection.isEmpty()) {
        jobCollection.addAll(tempCollection);
    }
    return jobCollection;
}

From source file:org.apache.kylin.cube.CubeCapabilityChecker.java

public static CapabilityResult check(CubeInstance cube, SQLDigest digest) {
    CapabilityResult result = new CapabilityResult();
    result.capable = false;/* w ww  .j a  v a2 s. co  m*/

    // match joins is ensured at model select

    // dimensions & measures
    Collection<TblColRef> dimensionColumns = getDimensionColumns(digest);
    Collection<FunctionDesc> aggrFunctions = digest.aggregations;
    Collection<TblColRef> unmatchedDimensions = unmatchedDimensions(dimensionColumns, cube);
    Collection<FunctionDesc> unmatchedAggregations = unmatchedAggregations(aggrFunctions, cube);

    // try custom measure types
    tryCustomMeasureTypes(unmatchedDimensions, unmatchedAggregations, digest, cube, result);

    //more tricks
    String rootFactTable = cube.getRootFactTable();
    if (rootFactTable.equals(digest.factTable)) {
        //for query-on-facttable
        //1. dimension as measure

        if (!unmatchedAggregations.isEmpty()) {
            tryDimensionAsMeasures(unmatchedAggregations, result,
                    cube.getDescriptor().listDimensionColumnsIncludingDerived());
        }
    } else {
        //for non query-on-facttable 
        if (cube.getSegments().get(0).getSnapshots().containsKey(digest.factTable)) {

            Set<TblColRef> dimCols = Sets
                    .newHashSet(cube.getModel().findFirstTable(digest.factTable).getColumns());

            //1. all aggregations on lookup table can be done. For distinct count, mark them all DimensionAsMeasures
            // so that the measure has a chance to be upgraded to DimCountDistinctMeasureType in org.apache.kylin.metadata.model.FunctionDesc#reInitMeasureType
            if (!unmatchedAggregations.isEmpty()) {
                Iterator<FunctionDesc> itr = unmatchedAggregations.iterator();
                while (itr.hasNext()) {
                    FunctionDesc functionDesc = itr.next();
                    if (dimCols.containsAll(functionDesc.getParameter().getColRefs())) {
                        itr.remove();
                    }
                }
            }
            tryDimensionAsMeasures(Lists.newArrayList(aggrFunctions), result, dimCols);

            //2. more "dimensions" contributed by snapshot
            if (!unmatchedDimensions.isEmpty()) {
                unmatchedDimensions.removeAll(dimCols);
            }
        } else {
            logger.info("cube {} does not touch lookup table {} at all", cube.getName(), digest.factTable);
        }
    }

    if (!unmatchedDimensions.isEmpty()) {
        logger.info("Exclude cube " + cube.getName() + " because unmatched dimensions: " + unmatchedDimensions);
        return result;
    }

    if (!unmatchedAggregations.isEmpty()) {
        logger.info(
                "Exclude cube " + cube.getName() + " because unmatched aggregations: " + unmatchedAggregations);
        return result;
    }

    if (cube.getStorageType() == IStorageAware.ID_HBASE
            && MassInTupleFilter.containsMassInTupleFilter(digest.filter)) {
        logger.info("Exclude cube " + cube.getName()
                + " because only v2 storage + v2 query engine supports massin");
        return result;
    }

    if (digest.limitPrecedesAggr) {
        logger.info("Exclude cube " + cube.getName() + " because there's limit preceding aggregation");
        return result;
    }

    if (digest.isRawQuery && rootFactTable.equals(digest.factTable)) {
        result.influences.add(new CapabilityInfluence() {
            @Override
            public double suggestCostMultiplier() {
                return 100;
            }
        });
    }

    // cost will be minded by caller
    result.capable = true;
    return result;
}

From source file:org.apache.cassandra.service.StorageService.java

public static void calculatePendingRanges(AbstractReplicationStrategy strategy, String table) {
    TokenMetadata tm = StorageService.instance.getTokenMetadata();
    Multimap<Range, InetAddress> pendingRanges = HashMultimap.create();
    Map<Token, InetAddress> bootstrapTokens = tm.getBootstrapTokens();
    Set<InetAddress> leavingEndpoints = tm.getLeavingEndpoints();

    if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && tm.getMovingEndpoints().isEmpty()) {
        if (logger_.isDebugEnabled())
            logger_.debug("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}", table);
        tm.setPendingRanges(table, pendingRanges);
        return;/*from ww w . j  a va2s  .c o  m*/
    }

    Multimap<InetAddress, Range> addressRanges = strategy.getAddressRanges();

    // Copy of metadata reflecting the situation after all leave operations are finished.
    TokenMetadata allLeftMetadata = tm.cloneAfterAllLeft();

    // get all ranges that will be affected by leaving nodes
    Set<Range> affectedRanges = new HashSet<Range>();
    for (InetAddress endpoint : leavingEndpoints)
        affectedRanges.addAll(addressRanges.get(endpoint));

    // for each of those ranges, find what new nodes will be responsible for the range when
    // all leaving nodes are gone.
    for (Range range : affectedRanges) {
        Collection<InetAddress> currentEndpoints = strategy.calculateNaturalEndpoints(range.right, tm);
        Collection<InetAddress> newEndpoints = strategy.calculateNaturalEndpoints(range.right, allLeftMetadata);
        newEndpoints.removeAll(currentEndpoints);
        pendingRanges.putAll(range, newEndpoints);
    }

    // At this stage pendingRanges has been updated according to leave operations. We can
    // now continue the calculation by checking bootstrapping nodes.

    // For each of the bootstrapping nodes, simply add and remove them one by one to
    // allLeftMetadata and check in between what their ranges would be.
    for (Map.Entry<Token, InetAddress> entry : bootstrapTokens.entrySet()) {
        InetAddress endpoint = entry.getValue();

        allLeftMetadata.updateNormalToken(entry.getKey(), endpoint);
        for (Range range : strategy.getAddressRanges(allLeftMetadata).get(endpoint))
            pendingRanges.put(range, endpoint);
        allLeftMetadata.removeEndpoint(endpoint);
    }

    // At this stage pendingRanges has been updated according to leaving and bootstrapping nodes.
    // We can now finish the calculation by checking moving nodes.

    // For each of the moving nodes, we do the same thing we did for bootstrapping:
    // simply add and remove them one by one to allLeftMetadata and check in between what their ranges would be.
    for (Pair<Token, InetAddress> moving : tm.getMovingEndpoints()) {
        InetAddress endpoint = moving.right; // address of the moving node

        //  moving.left is a new token of the endpoint
        allLeftMetadata.updateNormalToken(moving.left, endpoint);

        for (Range range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) {
            pendingRanges.put(range, endpoint);
        }

        allLeftMetadata.removeEndpoint(endpoint);
    }

    tm.setPendingRanges(table, pendingRanges);

    if (logger_.isDebugEnabled())
        logger_.debug("Pending ranges:\n" + (pendingRanges.isEmpty() ? "<empty>" : tm.printPendingRanges()));
}

From source file:org.nuclos.client.ui.collect.result.ResultController.java

/**
 * command: select columns/*from  w  w w  . j  a  v  a 2  s . com*/
 * Lets the user select the columns to show in the result list.
 */
public void cmdSelectColumns(final ChoiceEntityFieldList fields) {
    final SelectColumnsController ctl = new SelectColumnsController(clctctl.getTab());
    // final List<CollectableEntityField> lstAvailable = (List<CollectableEntityField>) fields.getAvailableFields();
    // final List<CollectableEntityField> lstSelected = (List<CollectableEntityField>) fields.getSelectedFields();
    final ResultPanel<Clct> panel = getResultPanel();
    final JTable tbl = panel.getResultTable();

    final Map<String, Integer> mpWidths = panel.getVisibleColumnWidth(fields.getSelectedFields());
    ctl.setModel(fields);
    final boolean bOK = ctl.run(getSpringLocaleDelegate().getMessage("SelectColumnsController.1",
            "Anzuzeigende Spalten ausw\u00e4hlen"));

    if (bOK) {
        UIUtils.runCommand(clctctl.getTab(), new CommonRunnable() {
            @Override
            public void run() throws CommonBusinessException {
                final int iSelectedRow = tbl.getSelectedRow();
                fields.set(ctl.getAvailableObjects(), ctl.getSelectedObjects(),
                        clctctl.getResultController().getCollectableEntityFieldComparator());
                final List<? extends CollectableEntityField> lstSelectedNew = fields.getSelectedFields();
                ((CollectableTableModel<?>) tbl.getModel()).setColumns(lstSelectedNew);
                panel.setupTableCellRenderers(tbl);
                Collection<CollectableEntityField> collNewlySelected = new ArrayList<CollectableEntityField>(
                        lstSelectedNew);
                collNewlySelected.removeAll(fields.getSelectedFields());
                if (!collNewlySelected.isEmpty()) {
                    if (!clctctl.getSearchStrategy().getCollectablesInResultAreAlwaysComplete()) {
                        // refresh the result:
                        clctctl.getResultController().getSearchResultStrategy().refreshResult();
                    }
                }

                // reselect the previously selected row (which gets lost be refreshing the model)
                if (iSelectedRow != -1) {
                    tbl.setRowSelectionInterval(iSelectedRow, iSelectedRow);
                }

                isIgnorePreferencesUpdate = true;
                panel.restoreColumnWidths(ctl.getSelectedObjects(), mpWidths);
                isIgnorePreferencesUpdate = false;

                // Set the newly added columns to optimal width
                for (CollectableEntityField clctef : collNewlySelected) {
                    TableUtils.setOptimalColumnWidth(tbl,
                            tbl.getColumnModel().getColumnIndex(clctef.getLabel()));
                }
            }
        });
    }
}