Example usage for org.apache.commons.collections CollectionUtils subtract

List of usage examples for org.apache.commons.collections CollectionUtils subtract

Introduction

In this page you can find the example usage for org.apache.commons.collections CollectionUtils subtract.

Prototype

public static Collection subtract(final Collection a, final Collection b) 

Source Link

Document

Returns a new Collection containing a - b.

Usage

From source file:org.apache.cayenne.modeler.action.ImportEOModelAction.java

/**
 * Adds DataMap into the project./*from  w  w w  .  java 2 s .  com*/
 */
protected void addDataMap(DataMap map, DataMap currentMap) {

    ProjectController mediator = getProjectController();

    if (currentMap != null) {
        // merge with existing map... have to memorize map state before and after
        // to do the right events

        Collection originalOE = new ArrayList(currentMap.getObjEntities());
        Collection originalDE = new ArrayList(currentMap.getDbEntities());
        Collection originalQueries = new ArrayList(currentMap.getQueryDescriptors());

        currentMap.mergeWithDataMap(map);
        map = currentMap;

        // postprocess changes
        Collection newOE = new ArrayList(currentMap.getObjEntities());
        Collection newDE = new ArrayList(currentMap.getDbEntities());
        Collection newQueries = new ArrayList(currentMap.getQueryDescriptors());

        EntityEvent entityEvent = new EntityEvent(Application.getFrame(), null);
        QueryEvent queryEvent = new QueryEvent(Application.getFrame(), null);

        Collection addedOE = CollectionUtils.subtract(newOE, originalOE);
        Iterator it = addedOE.iterator();
        while (it.hasNext()) {
            Entity e = (Entity) it.next();
            entityEvent.setEntity(e);
            entityEvent.setId(MapEvent.ADD);
            mediator.fireObjEntityEvent(entityEvent);
        }

        Collection removedOE = CollectionUtils.subtract(originalOE, newOE);
        it = removedOE.iterator();
        while (it.hasNext()) {
            Entity e = (Entity) it.next();
            entityEvent.setEntity(e);
            entityEvent.setId(MapEvent.REMOVE);
            mediator.fireObjEntityEvent(entityEvent);
        }

        Collection addedDE = CollectionUtils.subtract(newDE, originalDE);
        it = addedDE.iterator();
        while (it.hasNext()) {
            Entity e = (Entity) it.next();
            entityEvent.setEntity(e);
            entityEvent.setId(MapEvent.ADD);
            mediator.fireDbEntityEvent(entityEvent);
        }

        Collection removedDE = CollectionUtils.subtract(originalDE, newDE);
        it = removedDE.iterator();
        while (it.hasNext()) {
            Entity e = (Entity) it.next();
            entityEvent.setEntity(e);
            entityEvent.setId(MapEvent.REMOVE);
            mediator.fireDbEntityEvent(entityEvent);
        }

        // queries
        Collection addedQueries = CollectionUtils.subtract(newQueries, originalQueries);
        it = addedQueries.iterator();
        while (it.hasNext()) {
            QueryDescriptor q = (QueryDescriptor) it.next();
            queryEvent.setQuery(q);
            queryEvent.setId(MapEvent.ADD);
            mediator.fireQueryEvent(queryEvent);
        }

        Collection removedQueries = CollectionUtils.subtract(originalQueries, newQueries);
        it = removedQueries.iterator();
        while (it.hasNext()) {
            QueryDescriptor q = (QueryDescriptor) it.next();
            queryEvent.setQuery(q);
            queryEvent.setId(MapEvent.REMOVE);
            mediator.fireQueryEvent(queryEvent);
        }

        mediator.fireDataMapDisplayEvent(new DataMapDisplayEvent(Application.getFrame(), map,
                (DataChannelDescriptor) mediator.getProject().getRootNode(), mediator.getCurrentDataNode()));
    } else {
        // fix DataMap name, as there maybe a map with the same name already
        ConfigurationNode root = (DataChannelDescriptor) mediator.getProject().getRootNode();
        map.setName(NameBuilder.builder(map, root).baseName(map.getName()).name());

        // side effect of this operation is that if a node was created, this DataMap
        // will be linked with it...
        mediator.addDataMap(Application.getFrame(), map);
    }
}

From source file:org.apache.drill.exec.store.parquet2.DrillParquetReader.java

@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {

    try {/*from  w ww.  j a va 2  s. c  o m*/
        this.operatorContext = context;
        schema = footer.getFileMetaData().getSchema();
        MessageType projection = null;

        if (isStarQuery()) {
            projection = schema;
        } else {
            columnsNotFound = new ArrayList<SchemaPath>();
            projection = getProjection(schema, getColumns(), columnsNotFound);
            if (projection == null) {
                projection = schema;
            }
            if (columnsNotFound != null && columnsNotFound.size() > 0) {
                nullFilledVectors = new ArrayList();
                for (SchemaPath col : columnsNotFound) {
                    nullFilledVectors
                            .add((NullableIntVector) output.addField(
                                    MaterializedField.create(col,
                                            org.apache.drill.common.types.Types
                                                    .optional(TypeProtos.MinorType.INT)),
                                    (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(
                                            TypeProtos.MinorType.INT, TypeProtos.DataMode.OPTIONAL)));
                }
                if (columnsNotFound.size() == getColumns().size()) {
                    noColumnsFound = true;
                }
            }
        }

        logger.debug("Requesting schema {}", projection);

        ColumnIOFactory factory = new ColumnIOFactory(false);
        MessageColumnIO columnIO = factory.getColumnIO(projection, schema);
        Map<ColumnPath, ColumnChunkMetaData> paths = new HashMap();

        for (ColumnChunkMetaData md : footer.getBlocks().get(entry.getRowGroupIndex()).getColumns()) {
            paths.put(md.getPath(), md);
        }

        Path filePath = new Path(entry.getPath());

        BlockMetaData blockMetaData = footer.getBlocks().get(entry.getRowGroupIndex());

        recordCount = (int) blockMetaData.getRowCount();

        pageReadStore = new ColumnChunkIncReadStore(recordCount,
                new DirectCodecFactory(fileSystem.getConf(), operatorContext.getAllocator()),
                operatorContext.getAllocator(), fileSystem, filePath);

        for (String[] path : schema.getPaths()) {
            Type type = schema.getType(path);
            if (type.isPrimitive()) {
                ColumnChunkMetaData md = paths.get(ColumnPath.get(path));
                pageReadStore.addColumn(schema.getColumnDescription(path), md);
            }
        }

        if (!noColumnsFound) {
            writer = new VectorContainerWriter(output);
            // Discard the columns not found in the schema when create DrillParquetRecordMaterializer, since they have been added to output already.
            final Collection<SchemaPath> columns = columnsNotFound == null || columnsNotFound.size() == 0
                    ? getColumns()
                    : CollectionUtils.subtract(getColumns(), columnsNotFound);
            recordMaterializer = new DrillParquetRecordMaterializer(output, writer, projection, columns,
                    fragmentContext.getOptions());
            primitiveVectors = writer.getMapVector().getPrimitiveVectors();
            recordReader = columnIO.getRecordReader(pageReadStore, recordMaterializer);
        }
    } catch (Exception e) {
        handleAndRaise("Failure in setting up reader", e);
    }
}

From source file:org.apache.eagle.alert.coordinator.trigger.DynamicPolicyLoader.java

/**
 * When it is run at the first time, due to cachedPolicies being empty, all existing policies are expected
 * to be addedPolicies./*  w w  w .  j  a v  a 2  s  .  com*/
 */
@SuppressWarnings("unchecked")
@Override
public void run() {
    // we should catch every exception to avoid zombile thread
    try {
        final Stopwatch watch = Stopwatch.createStarted();
        LOG.info("Starting to load policies");
        List<PolicyDefinition> current = client.listPolicies();
        Map<String, PolicyDefinition> currPolicies = new HashMap<>();
        current.forEach(pe -> currPolicies.put(pe.getName(), pe));

        Collection<String> addedPolicies = CollectionUtils.subtract(currPolicies.keySet(),
                cachedPolicies.keySet());
        Collection<String> removedPolicies = CollectionUtils.subtract(cachedPolicies.keySet(),
                currPolicies.keySet());
        Collection<String> potentiallyModifiedPolicies = CollectionUtils.intersection(currPolicies.keySet(),
                cachedPolicies.keySet());

        List<String> reallyModifiedPolicies = new ArrayList<>();
        for (String updatedPolicy : potentiallyModifiedPolicies) {
            if (currPolicies.get(updatedPolicy) != null
                    && !currPolicies.get(updatedPolicy).equals(cachedPolicies.get(updatedPolicy))) {
                reallyModifiedPolicies.add(updatedPolicy);
            }
        }

        boolean policyChanged = false;
        if (addedPolicies.size() != 0 || removedPolicies.size() != 0 || reallyModifiedPolicies.size() != 0) {
            policyChanged = true;
        }

        if (!policyChanged) {
            LOG.info("No policy (totally {}) changed since last round", current.size());
            return;
        }

        synchronized (this) {
            for (PolicyChangeListener listener : listeners) {
                listener.onPolicyChange(current, addedPolicies, removedPolicies, reallyModifiedPolicies);
            }
        }

        watch.stop();

        LOG.info("Finished loading {} policies, added: {}, removed: {}, modified: {}, taken: {} ms",
                current.size(), addedPolicies.size(), removedPolicies.size(),
                potentiallyModifiedPolicies.size(), watch.elapsed(TimeUnit.MILLISECONDS));
        // reset cached policies
        cachedPolicies = currPolicies;
    } catch (Throwable t) {
        LOG.warn("Error loading policy, but continue to run", t);
    }
}

From source file:org.apache.eagle.alert.engine.evaluator.nodata.NoDataPolicyHandler.java

@SuppressWarnings("rawtypes")
private void compareAndEmit(Set wisb, Set wiri, StreamEvent event) {
    // compare with wisbValues if wisbValues are already there for dynamic type
    Collection noDataValues = CollectionUtils.subtract(wisb, wiri);
    LOG.debug("nodatavalues:" + noDataValues + ", wisb: " + wisb + ", wiri: " + wiri);
    if (noDataValues != null && noDataValues.size() > 0) {
        LOG.info("No data alert is triggered with no data values {} and wisb {}", noDataValues, wisbValues);
        AlertStreamEvent alertEvent = createAlertEvent(event.getTimestamp(), event.getData());
        collector.emit(alertEvent);/*  ww w. j  av  a  2 s.  c  om*/
    }
}

From source file:org.apache.eagle.alert.engine.evaluator.nodata.NoDataPolicyTimeBatchHandler.java

@SuppressWarnings("rawtypes")
public void compareAndEmit(Set wisb, Set wiri, StreamEvent event) {
    // compare with wisbValues if wisbValues are already there for dynamic
    // type// www.  j  av a  2s.c o m
    Collection noDataValues = CollectionUtils.subtract(wisb, wiri);
    LOG.debug("nodatavalues:" + noDataValues + ", wisb: " + wisb + ", wiri: " + wiri);
    if (noDataValues != null && noDataValues.size() > 0) {
        LOG.info("No data alert is triggered with no data values {} and wisb {}", noDataValues, wisb);

        String is = policyDef.getOutputStreams().get(0);
        StreamDefinition sd = sds.get(is);
        int timestampIndex = sd.getColumnIndex("timestamp");
        int hostIndex = sd.getColumnIndex("host");
        int originalStreamNameIndex = sd.getColumnIndex("originalStreamName");

        for (Object one : noDataValues) {
            Object[] triggerEvent = new Object[sd.getColumns().size()];
            for (int i = 0; i < sd.getColumns().size(); i++) {
                if (i == timestampIndex) {
                    triggerEvent[i] = System.currentTimeMillis();
                } else if (i == hostIndex) {
                    triggerEvent[hostIndex] = ((List) one).get(0);
                } else if (i == originalStreamNameIndex) {
                    triggerEvent[originalStreamNameIndex] = event.getStreamId();
                } else if (sd.getColumns().size() < i) {
                    LOG.error("strema event data have different lenght compare to column definition!");
                } else {
                    triggerEvent[i] = sd.getColumns().get(i).getDefaultValue();
                }
            }
            AlertStreamEvent alertEvent = createAlertEvent(sd, event.getTimestamp(), triggerEvent);
            LOG.info(String.format("Nodata alert %s generated and will be emitted",
                    Joiner.on(",").join(triggerEvent)));
            collector.emit(alertEvent);
        }

    }
}

From source file:org.apache.eagle.alert.engine.runner.AlertBolt.java

@SuppressWarnings("unchecked")
@Override/*from   w  ww  . j a v  a  2  s .co  m*/
public synchronized void onAlertBoltSpecChange(AlertBoltSpec spec, Map<String, StreamDefinition> sds) {
    List<PolicyDefinition> newPolicies = spec.getBoltPoliciesMap().get(boltId);
    if (newPolicies == null) {
        LOG.info("no new policy with AlertBoltSpec {} for this bolt {}", spec, boltId);
        return;
    }

    Map<String, PolicyDefinition> newPoliciesMap = new HashMap<>();
    newPolicies.forEach(p -> newPoliciesMap.put(p.getName(), p));
    MapComparator<String, PolicyDefinition> comparator = new MapComparator<>(newPoliciesMap, cachedPolicies);
    comparator.compare();

    MapComparator<String, StreamDefinition> streamComparator = new MapComparator<>(sds, sdf);
    streamComparator.compare();

    List<StreamDefinition> addOrUpdatedStreams = streamComparator.getAdded();
    addOrUpdatedStreams.addAll(streamComparator.getModified());
    List<PolicyDefinition> cachedPoliciesTemp = new ArrayList<>(cachedPolicies.values());
    addOrUpdatedStreams.forEach(s -> {
        cachedPoliciesTemp.stream().filter(p -> p.getInputStreams().contains(s.getStreamId())
                || p.getOutputStreams().contains(s.getStreamId())).forEach(p -> {
                    if (comparator.getModified().stream().filter(x -> x.getName().equals(p.getName()))
                            .count() <= 0
                            && comparator.getAdded().stream().filter(x -> x.getName().equals(p.getName()))
                                    .count() <= 0) {
                        comparator.getModified().add(p);
                    }
                });
        ;
    });

    policyGroupEvaluator.onPolicyChange(spec.getVersion(), comparator.getAdded(), comparator.getRemoved(),
            comparator.getModified(), sds);

    // update alert output collector
    Set<PublishPartition> newPublishPartitions = new HashSet<>();
    spec.getPublishPartitions().forEach(p -> {
        if (newPolicies.stream().filter(o -> o.getName().equals(p.getPolicyId())).count() > 0) {
            newPublishPartitions.add(p);
        }
    });

    Collection<PublishPartition> addedPublishPartitions = CollectionUtils.subtract(newPublishPartitions,
            cachedPublishPartitions);
    Collection<PublishPartition> removedPublishPartitions = CollectionUtils.subtract(cachedPublishPartitions,
            newPublishPartitions);
    Collection<PublishPartition> modifiedPublishPartitions = CollectionUtils.intersection(newPublishPartitions,
            cachedPublishPartitions);

    LOG.debug("added PublishPartition " + addedPublishPartitions);
    LOG.debug("removed PublishPartition " + removedPublishPartitions);
    LOG.debug("modified PublishPartition " + modifiedPublishPartitions);

    alertOutputCollector.onAlertBoltSpecChange(addedPublishPartitions, removedPublishPartitions,
            modifiedPublishPartitions);

    // switch
    cachedPolicies = newPoliciesMap;
    cachedPublishPartitions = newPublishPartitions;
    sdf = sds;
    specVersion = spec.getVersion();
    this.spec = spec;
}

From source file:org.apache.eagle.alert.engine.runner.MapComparator.java

@SuppressWarnings("unchecked")
public void compare() {
    Set<K> keys1 = map1.keySet();
    Set<K> keys2 = map2.keySet();
    Collection<K> addedKeys = CollectionUtils.subtract(keys1, keys2);
    Collection<K> removedKeys = CollectionUtils.subtract(keys2, keys1);
    Collection<K> modifiedKeys = CollectionUtils.intersection(keys1, keys2);

    addedKeys.forEach(k -> added.add(map1.get(k)));
    removedKeys.forEach(k -> removed.add(map2.get(k)));
    modifiedKeys.forEach(k -> {/*from   w w  w .j  a  v a 2  s  .  c o  m*/
        if (!map1.get(k).equals(map2.get(k))) {
            modified.add(map1.get(k));
        }
    });
}

From source file:org.apache.eagle.alert.engine.runner.StreamRouterBolt.java

/**
 * Compare with metadata snapshot cache to generate diff like added, removed and modified between different versions.
 *
 * @param spec//  w  w w  . java 2 s  . c o m
 */
@SuppressWarnings("unchecked")
@Override
public synchronized void onStreamRouteBoltSpecChange(RouterSpec spec, Map<String, StreamDefinition> sds) {
    sanityCheck(spec);

    // figure out added, removed, modified StreamSortSpec
    Map<StreamPartition, StreamSortSpec> newSSS = spec.makeSSS();

    Set<StreamPartition> newStreamIds = newSSS.keySet();
    Set<StreamPartition> cachedStreamIds = cachedSSS.keySet();
    Collection<StreamPartition> addedStreamIds = CollectionUtils.subtract(newStreamIds, cachedStreamIds);
    Collection<StreamPartition> removedStreamIds = CollectionUtils.subtract(cachedStreamIds, newStreamIds);
    Collection<StreamPartition> modifiedStreamIds = CollectionUtils.intersection(newStreamIds, cachedStreamIds);

    Map<StreamPartition, StreamSortSpec> added = new HashMap<>();
    Map<StreamPartition, StreamSortSpec> removed = new HashMap<>();
    Map<StreamPartition, StreamSortSpec> modified = new HashMap<>();
    addedStreamIds.forEach(s -> added.put(s, newSSS.get(s)));
    removedStreamIds.forEach(s -> removed.put(s, cachedSSS.get(s)));
    modifiedStreamIds.forEach(s -> {
        if (!newSSS.get(s).equals(cachedSSS.get(s))) { // this means StreamSortSpec is changed for one specific streamId
            modified.put(s, newSSS.get(s));
        }
    });
    if (LOG.isDebugEnabled()) {
        LOG.debug("added StreamSortSpec " + added);
        LOG.debug("removed StreamSortSpec " + removed);
        LOG.debug("modified StreamSortSpec " + modified);
    }
    router.onStreamSortSpecChange(added, removed, modified);
    // switch cache
    cachedSSS = newSSS;

    // figure out added, removed, modified StreamRouterSpec
    Map<StreamPartition, List<StreamRouterSpec>> newSRS = spec.makeSRS();

    Set<StreamPartition> newStreamPartitions = newSRS.keySet();
    Set<StreamPartition> cachedStreamPartitions = cachedSRS.keySet();

    Collection<StreamPartition> addedStreamPartitions = CollectionUtils.subtract(newStreamPartitions,
            cachedStreamPartitions);
    Collection<StreamPartition> removedStreamPartitions = CollectionUtils.subtract(cachedStreamPartitions,
            newStreamPartitions);
    Collection<StreamPartition> modifiedStreamPartitions = CollectionUtils.intersection(newStreamPartitions,
            cachedStreamPartitions);

    Collection<StreamRouterSpec> addedRouterSpecs = new ArrayList<>();
    Collection<StreamRouterSpec> removedRouterSpecs = new ArrayList<>();
    Collection<StreamRouterSpec> modifiedRouterSpecs = new ArrayList<>();
    addedStreamPartitions.forEach(s -> addedRouterSpecs.addAll(newSRS.get(s)));
    removedStreamPartitions.forEach(s -> removedRouterSpecs.addAll(cachedSRS.get(s)));
    modifiedStreamPartitions.forEach(s -> {
        if (!CollectionUtils.isEqualCollection(newSRS.get(s), cachedSRS.get(s))) { // this means StreamRouterSpec is changed for one specific StreamPartition
            modifiedRouterSpecs.addAll(newSRS.get(s));
        }
    });

    if (LOG.isDebugEnabled()) {
        LOG.debug("added StreamRouterSpec " + addedRouterSpecs);
        LOG.debug("removed StreamRouterSpec " + removedRouterSpecs);
        LOG.debug("modified StreamRouterSpec " + modifiedRouterSpecs);
    }

    routeCollector.onStreamRouterSpecChange(addedRouterSpecs, removedRouterSpecs, modifiedRouterSpecs, sds);
    // switch cache
    cachedSRS = newSRS;
    sdf = sds;
    specVersion = spec.getVersion();
}

From source file:org.apache.eagle.alert.engine.spark.function.AlertBoltFunction.java

private void onAlertBoltSpecChange(String boltId, AlertBoltSpec spec, Map<String, StreamDefinition> sds,
        PolicyGroupEvaluatorImpl policyGroupEvaluator, AlertBoltOutputCollectorWrapper alertOutputCollector,
        PolicyState policyState, PublishState publishState) {
    List<PolicyDefinition> newPolicies = spec.getBoltPoliciesMap().get(boltId);
    LOG.debug("newPolicies {}", newPolicies);
    if (newPolicies == null) {
        LOG.info("no new policy with AlertBoltSpec {} for this bolt {}", spec, boltId);
        return;/*w w  w .  j  a  v  a2  s .com*/
    }

    Map<String, PolicyDefinition> newPoliciesMap = new HashMap<>();
    newPolicies.forEach(p -> newPoliciesMap.put(p.getName(), p));
    LOG.debug("newPoliciesMap {}", newPoliciesMap);
    MapComparator<String, PolicyDefinition> comparator = new MapComparator<>(newPoliciesMap,
            policyState.getCachedPolicyByBoltId(boltId));
    comparator.compare();
    LOG.debug("getAdded {}", comparator.getAdded());
    LOG.debug("getRemoved {}", comparator.getRemoved());
    LOG.debug("getModified {}", comparator.getModified());
    policyGroupEvaluator.onPolicyChange(null, comparator.getAdded(), comparator.getRemoved(),
            comparator.getModified(), sds);

    policyState.store(boltId, newPoliciesMap, policyGroupEvaluator.getPolicyDefinitionMap(),
            policyGroupEvaluator.getPolicyStreamHandlerMap());

    // update alert output collector
    Set<PublishPartition> newPublishPartitions = new HashSet<>();
    spec.getPublishPartitions().forEach(p -> {
        if (newPolicies.stream().filter(o -> o.getName().equals(p.getPolicyId())).count() > 0) {
            newPublishPartitions.add(p);
        }
    });

    Set<PublishPartition> cachedPublishPartitions = publishState.getCachedPublishPartitionsByBoltId(boltId);
    Collection<PublishPartition> addedPublishPartitions = CollectionUtils.subtract(newPublishPartitions,
            cachedPublishPartitions);
    Collection<PublishPartition> removedPublishPartitions = CollectionUtils.subtract(cachedPublishPartitions,
            newPublishPartitions);
    Collection<PublishPartition> modifiedPublishPartitions = CollectionUtils.intersection(newPublishPartitions,
            cachedPublishPartitions);

    LOG.debug("added PublishPartition " + addedPublishPartitions);
    LOG.debug("removed PublishPartition " + removedPublishPartitions);
    LOG.debug("modified PublishPartition " + modifiedPublishPartitions);

    alertOutputCollector.onAlertBoltSpecChange(addedPublishPartitions, removedPublishPartitions,
            modifiedPublishPartitions);

    publishState.storePublishPartitions(boltId, alertOutputCollector.getPublishPartitions());

}

From source file:org.apache.eagle.alert.engine.spark.function.StreamRouteBoltFunction.java

public void onStreamRouteBoltSpecChange(RouterSpec spec, Map<String, StreamDefinition> sds,
        StreamRouterImpl router, StreamRouterBoltOutputCollector routeCollector,
        final Map<StreamPartition, StreamSortSpec> cachedSSS,
        final Map<StreamPartition, List<StreamRouterSpec>> cachedSRS, int partitionNum) {
    //sanityCheck(spec);

    // figure out added, removed, modified StreamSortSpec
    Map<StreamPartition, StreamSortSpec> newSSS = spec.makeSSS();

    Set<StreamPartition> newStreamIds = newSSS.keySet();
    Set<StreamPartition> cachedStreamIds = cachedSSS.keySet();
    Collection<StreamPartition> addedStreamIds = CollectionUtils.subtract(newStreamIds, cachedStreamIds);
    Collection<StreamPartition> removedStreamIds = CollectionUtils.subtract(cachedStreamIds, newStreamIds);
    Collection<StreamPartition> modifiedStreamIds = CollectionUtils.intersection(newStreamIds, cachedStreamIds);

    Map<StreamPartition, StreamSortSpec> added = new HashMap<>();
    Map<StreamPartition, StreamSortSpec> removed = new HashMap<>();
    Map<StreamPartition, StreamSortSpec> modified = new HashMap<>();
    addedStreamIds.forEach(s -> added.put(s, newSSS.get(s)));
    removedStreamIds.forEach(s -> removed.put(s, cachedSSS.get(s)));
    modifiedStreamIds.forEach(s -> {//from   ww  w  .j ava  2  s .c  o  m
        if (!newSSS.get(s).equals(cachedSSS.get(s))) { // this means StreamSortSpec is changed for one specific streamId
            modified.put(s, newSSS.get(s));
        }
    });
    if (LOG.isDebugEnabled()) {
        LOG.debug("added StreamSortSpec " + added);
        LOG.debug("removed StreamSortSpec " + removed);
        LOG.debug("modified StreamSortSpec " + modified);
    }
    router.onStreamSortSpecChange(added, removed, modified);
    // switch cache
    this.cachedSSS = newSSS;

    // figure out added, removed, modified StreamRouterSpec
    Map<StreamPartition, List<StreamRouterSpec>> newSRS = spec.makeSRS();
    Set<StreamPartition> newStreamPartitions = newSRS.keySet();
    Set<StreamPartition> cachedStreamPartitions = cachedSRS.keySet();

    Collection<StreamPartition> addedStreamPartitions = CollectionUtils.subtract(newStreamPartitions,
            cachedStreamPartitions);
    Collection<StreamPartition> removedStreamPartitions = CollectionUtils.subtract(cachedStreamPartitions,
            newStreamPartitions);
    Collection<StreamPartition> modifiedStreamPartitions = CollectionUtils.intersection(newStreamPartitions,
            cachedStreamPartitions);

    Collection<StreamRouterSpec> addedRouterSpecs = new ArrayList<>();
    Collection<StreamRouterSpec> removedRouterSpecs = new ArrayList<>();
    Collection<StreamRouterSpec> modifiedRouterSpecs = new ArrayList<>();
    addedStreamPartitions.forEach(s -> addedRouterSpecs.addAll(newSRS.get(s)));
    removedStreamPartitions.forEach(s -> removedRouterSpecs.addAll(cachedSRS.get(s)));
    modifiedStreamPartitions.forEach(s -> {
        if (!CollectionUtils.isEqualCollection(newSRS.get(s), cachedSRS.get(s))) { // this means StreamRouterSpec is changed for one specific StreamPartition
            modifiedRouterSpecs.addAll(newSRS.get(s));
        }
    });

    if (LOG.isDebugEnabled()) {
        LOG.debug("added StreamRouterSpec " + addedRouterSpecs);
        LOG.debug("removed StreamRouterSpec " + removedRouterSpecs);
        LOG.debug("modified StreamRouterSpec " + modifiedRouterSpecs);
    }

    routeCollector.onStreamRouterSpecChange(addedRouterSpecs, removedRouterSpecs, modifiedRouterSpecs, sds);
    // switch cache
    this.cachedSRS = newSRS;
    routeState.store(routeCollector, cachedSSS, cachedSRS, partitionNum);
}