List of usage examples for org.apache.commons.collections CollectionUtils isEqualCollection
public static boolean isEqualCollection(final Collection a, final Collection b)
From source file:org.apache.eagle.alert.engine.coordinator.StreamDefinition.java
@Override public boolean equals(Object obj) { if (this == obj) { return true; }//from w ww .j a v a 2 s . c o m if (!(obj instanceof StreamDefinition)) { return false; } return Objects.equals(this.streamId, ((StreamDefinition) obj).streamId) && Objects.equals(this.description, ((StreamDefinition) obj).description) && Objects.equals(this.validate, ((StreamDefinition) obj).validate) && Objects.equals(this.timeseries, ((StreamDefinition) obj).timeseries) && Objects.equals(this.dataSource, ((StreamDefinition) obj).dataSource) && Objects.equals(this.siteId, ((StreamDefinition) obj).siteId) && CollectionUtils.isEqualCollection(this.columns, ((StreamDefinition) obj).columns); }
From source file:org.apache.eagle.alert.engine.coordinator.StreamPartition.java
@Override public boolean equals(Object other) { if (other == this) { return true; }//from w w w .j a v a2 s. c o m if (!(other instanceof StreamPartition)) { return false; } StreamPartition sp = (StreamPartition) other; return Objects.equals(streamId, sp.streamId) && Objects.equals(type, sp.type) && CollectionUtils.isEqualCollection(columns, sp.columns) && Objects.equals(sortSpec, sp.sortSpec); }
From source file:org.apache.eagle.alert.engine.runner.StreamRouterBolt.java
/** * Compare with metadata snapshot cache to generate diff like added, removed and modified between different versions. * * @param spec/*from w ww .j a v a2 s. c o m*/ */ @SuppressWarnings("unchecked") @Override public synchronized void onStreamRouteBoltSpecChange(RouterSpec spec, Map<String, StreamDefinition> sds) { sanityCheck(spec); // figure out added, removed, modified StreamSortSpec Map<StreamPartition, StreamSortSpec> newSSS = spec.makeSSS(); Set<StreamPartition> newStreamIds = newSSS.keySet(); Set<StreamPartition> cachedStreamIds = cachedSSS.keySet(); Collection<StreamPartition> addedStreamIds = CollectionUtils.subtract(newStreamIds, cachedStreamIds); Collection<StreamPartition> removedStreamIds = CollectionUtils.subtract(cachedStreamIds, newStreamIds); Collection<StreamPartition> modifiedStreamIds = CollectionUtils.intersection(newStreamIds, cachedStreamIds); Map<StreamPartition, StreamSortSpec> added = new HashMap<>(); Map<StreamPartition, StreamSortSpec> removed = new HashMap<>(); Map<StreamPartition, StreamSortSpec> modified = new HashMap<>(); addedStreamIds.forEach(s -> added.put(s, newSSS.get(s))); removedStreamIds.forEach(s -> removed.put(s, cachedSSS.get(s))); modifiedStreamIds.forEach(s -> { if (!newSSS.get(s).equals(cachedSSS.get(s))) { // this means StreamSortSpec is changed for one specific streamId modified.put(s, newSSS.get(s)); } }); if (LOG.isDebugEnabled()) { LOG.debug("added StreamSortSpec " + added); LOG.debug("removed StreamSortSpec " + removed); LOG.debug("modified StreamSortSpec " + modified); } router.onStreamSortSpecChange(added, removed, modified); // switch cache cachedSSS = newSSS; // figure out added, removed, modified StreamRouterSpec Map<StreamPartition, List<StreamRouterSpec>> newSRS = spec.makeSRS(); Set<StreamPartition> newStreamPartitions = newSRS.keySet(); Set<StreamPartition> cachedStreamPartitions = cachedSRS.keySet(); Collection<StreamPartition> addedStreamPartitions = CollectionUtils.subtract(newStreamPartitions, cachedStreamPartitions); Collection<StreamPartition> removedStreamPartitions = CollectionUtils.subtract(cachedStreamPartitions, newStreamPartitions); Collection<StreamPartition> modifiedStreamPartitions = CollectionUtils.intersection(newStreamPartitions, cachedStreamPartitions); Collection<StreamRouterSpec> addedRouterSpecs = new ArrayList<>(); Collection<StreamRouterSpec> removedRouterSpecs = new ArrayList<>(); Collection<StreamRouterSpec> modifiedRouterSpecs = new ArrayList<>(); addedStreamPartitions.forEach(s -> addedRouterSpecs.addAll(newSRS.get(s))); removedStreamPartitions.forEach(s -> removedRouterSpecs.addAll(cachedSRS.get(s))); modifiedStreamPartitions.forEach(s -> { if (!CollectionUtils.isEqualCollection(newSRS.get(s), cachedSRS.get(s))) { // this means StreamRouterSpec is changed for one specific StreamPartition modifiedRouterSpecs.addAll(newSRS.get(s)); } }); if (LOG.isDebugEnabled()) { LOG.debug("added StreamRouterSpec " + addedRouterSpecs); LOG.debug("removed StreamRouterSpec " + removedRouterSpecs); LOG.debug("modified StreamRouterSpec " + modifiedRouterSpecs); } routeCollector.onStreamRouterSpecChange(addedRouterSpecs, removedRouterSpecs, modifiedRouterSpecs, sds); // switch cache cachedSRS = newSRS; sdf = sds; specVersion = spec.getVersion(); }
From source file:org.apache.eagle.alert.engine.spark.function.StreamRouteBoltFunction.java
public void onStreamRouteBoltSpecChange(RouterSpec spec, Map<String, StreamDefinition> sds, StreamRouterImpl router, StreamRouterBoltOutputCollector routeCollector, final Map<StreamPartition, StreamSortSpec> cachedSSS, final Map<StreamPartition, List<StreamRouterSpec>> cachedSRS, int partitionNum) { //sanityCheck(spec); // figure out added, removed, modified StreamSortSpec Map<StreamPartition, StreamSortSpec> newSSS = spec.makeSSS(); Set<StreamPartition> newStreamIds = newSSS.keySet(); Set<StreamPartition> cachedStreamIds = cachedSSS.keySet(); Collection<StreamPartition> addedStreamIds = CollectionUtils.subtract(newStreamIds, cachedStreamIds); Collection<StreamPartition> removedStreamIds = CollectionUtils.subtract(cachedStreamIds, newStreamIds); Collection<StreamPartition> modifiedStreamIds = CollectionUtils.intersection(newStreamIds, cachedStreamIds); Map<StreamPartition, StreamSortSpec> added = new HashMap<>(); Map<StreamPartition, StreamSortSpec> removed = new HashMap<>(); Map<StreamPartition, StreamSortSpec> modified = new HashMap<>(); addedStreamIds.forEach(s -> added.put(s, newSSS.get(s))); removedStreamIds.forEach(s -> removed.put(s, cachedSSS.get(s))); modifiedStreamIds.forEach(s -> {/*from w w w. ja va 2 s .c om*/ if (!newSSS.get(s).equals(cachedSSS.get(s))) { // this means StreamSortSpec is changed for one specific streamId modified.put(s, newSSS.get(s)); } }); if (LOG.isDebugEnabled()) { LOG.debug("added StreamSortSpec " + added); LOG.debug("removed StreamSortSpec " + removed); LOG.debug("modified StreamSortSpec " + modified); } router.onStreamSortSpecChange(added, removed, modified); // switch cache this.cachedSSS = newSSS; // figure out added, removed, modified StreamRouterSpec Map<StreamPartition, List<StreamRouterSpec>> newSRS = spec.makeSRS(); Set<StreamPartition> newStreamPartitions = newSRS.keySet(); Set<StreamPartition> cachedStreamPartitions = cachedSRS.keySet(); Collection<StreamPartition> addedStreamPartitions = CollectionUtils.subtract(newStreamPartitions, cachedStreamPartitions); Collection<StreamPartition> removedStreamPartitions = CollectionUtils.subtract(cachedStreamPartitions, newStreamPartitions); Collection<StreamPartition> modifiedStreamPartitions = CollectionUtils.intersection(newStreamPartitions, cachedStreamPartitions); Collection<StreamRouterSpec> addedRouterSpecs = new ArrayList<>(); Collection<StreamRouterSpec> removedRouterSpecs = new ArrayList<>(); Collection<StreamRouterSpec> modifiedRouterSpecs = new ArrayList<>(); addedStreamPartitions.forEach(s -> addedRouterSpecs.addAll(newSRS.get(s))); removedStreamPartitions.forEach(s -> removedRouterSpecs.addAll(cachedSRS.get(s))); modifiedStreamPartitions.forEach(s -> { if (!CollectionUtils.isEqualCollection(newSRS.get(s), cachedSRS.get(s))) { // this means StreamRouterSpec is changed for one specific StreamPartition modifiedRouterSpecs.addAll(newSRS.get(s)); } }); if (LOG.isDebugEnabled()) { LOG.debug("added StreamRouterSpec " + addedRouterSpecs); LOG.debug("removed StreamRouterSpec " + removedRouterSpecs); LOG.debug("modified StreamRouterSpec " + modifiedRouterSpecs); } routeCollector.onStreamRouterSpecChange(addedRouterSpecs, removedRouterSpecs, modifiedRouterSpecs, sds); // switch cache this.cachedSRS = newSRS; routeState.store(routeCollector, cachedSSS, cachedSRS, partitionNum); }
From source file:org.apache.eagle.alert.metadata.impl.JdbcImplTest.java
@Test public void test_clearScheduleState() { int maxCapacity = 4; List<String> reservedOnes = new ArrayList<>(); for (int i = 0; i < 10; i++) { ScheduleState state = new ScheduleState(); String versionId = "state-" + (System.currentTimeMillis() + i); state.setVersion(versionId);/*from w w w . j av a 2 s . com*/ state.setGenerateTime(String.valueOf(new Date().getTime())); dao.addScheduleState(state); if (i >= 10 - maxCapacity) { reservedOnes.add(versionId); } } dao.clearScheduleState(maxCapacity); List<ScheduleState> scheduleStates = dao.listScheduleStates(); Assert.assertTrue(scheduleStates.size() == maxCapacity); List<String> targetOnes = new ArrayList<>(); scheduleStates.stream().forEach(state -> targetOnes.add(state.getVersion())); LOG.info("reservedOne={}", reservedOnes); LOG.info("targetOne={}", targetOnes); Assert.assertTrue(CollectionUtils.isEqualCollection(reservedOnes, targetOnes)); }
From source file:org.apache.eagle.alert.metadata.impl.JdbcImplTest.java
@Test public void testUpdate() throws SQLException { OpResult updateResult;/*from ww w. ja va2 s . co m*/ // update Publishment publishment = new Publishment(); publishment.setName("pub-"); publishment.setType("type1"); updateResult = dao.addPublishment(publishment); Assert.assertTrue(updateResult.code == OpResult.SUCCESS); publishment.setType("type2"); updateResult = dao.addPublishment(publishment); Assert.assertTrue(updateResult.code == OpResult.SUCCESS); Assert.assertTrue(dao.listPublishment().get(0).getType().equals("type2")); // remove updateResult = dao.removePublishment("pub-"); Assert.assertTrue(updateResult.code == OpResult.SUCCESS); Assert.assertTrue(dao.listPublishment().size() == 0); // update alert event AlertPublishEvent alert = new AlertPublishEvent(); String alertId = UUID.randomUUID().toString(); alert.setAlertTimestamp(System.currentTimeMillis()); alert.setAlertId(alertId); alert.setPolicyId("policyId"); alert.setPolicyValue( "from HDFS_AUDIT_LOG_ENRICHED_STREAM_SANDBOX[str:contains(src,'/tmp/test') and ((cmd=='rename' and str:contains(dst, '.Trash')) or cmd=='delete')] select * insert into hdfs_audit_log_enriched_stream_out"); Map<String, Object> alertData = new HashMap<>(); alertData.put("siteId", "sandbox"); alertData.put("policyId", "sample"); alert.setAlertData(alertData); List<String> appIds = new ArrayList<>(); appIds.add("app1"); appIds.add("app2"); alert.setAppIds(appIds); updateResult = dao.addAlertPublishEvent(alert); Assert.assertTrue(updateResult.code == OpResult.SUCCESS); AlertPublishEvent event = dao.getAlertPublishEvent(alertId); Assert.assertTrue(CollectionUtils.isEqualCollection(appIds, event.getAppIds())); Assert.assertTrue(alertData.equals(event.getAlertData())); }
From source file:org.apache.eagle.metadata.model.PolicyEntity.java
@Override public boolean equals(Object that) { if (that == this) { return true; }//w ww.j a v a 2s .c om if (!(that instanceof PolicyEntity)) { return false; } PolicyEntity another = (PolicyEntity) that; return Objects.equals(another.name, this.name) && Objects.equals(another.definition, this.definition) && CollectionUtils.isEqualCollection(another.getAlertPublishmentIds(), alertPublishmentIds); }
From source file:org.apache.eagle.metadata.service.TestPolicyEntityServiceMemoryImpl.java
@Test public void test() { // define a prototype policy without site info PolicyDefinition policyDefinition = new PolicyDefinition(); policyDefinition.setName("policy1"); PolicyDefinition.Definition definition = new PolicyDefinition.Definition("siddhi", "from STREAM select * insert into out"); policyDefinition.setDefinition(definition); policyDefinition.setInputStreams(Arrays.asList("STREAM")); policyDefinition.setOutputStreams(Arrays.asList("out")); // define publisher list List<String> alertPublisherIds = Arrays.asList("slack"); // create/*from ww w. j a v a 2 s . c o m*/ PolicyEntity policyEntity = new PolicyEntity(); policyEntity.setDefinition(policyDefinition); policyEntity.setAlertPublishmentIds(alertPublisherIds); PolicyEntity res = policyEntityService.createOrUpdatePolicyProto(policyEntity); Assert.assertTrue(res.getDefinition().equals(policyDefinition)); Assert.assertTrue(CollectionUtils.isEqualCollection(res.getAlertPublishmentIds(), alertPublisherIds)); Collection<PolicyEntity> policies = policyEntityService.getAllPolicyProto(); Assert.assertTrue(policies.size() == 1); PolicyEntity entity = policyEntityService.getByUUIDorName(policies.iterator().next().getUuid(), null); Assert.assertTrue(entity.equals(policies.iterator().next())); entity = policyEntityService.getByUUIDorName(null, "[null]policy1"); Assert.assertTrue(entity.equals(policies.iterator().next())); // test update entity.getDefinition().setName("policy2"); PolicyEntity updatedEntity = policyEntityService.update(entity); Assert.assertTrue(updatedEntity.getDefinition().getName().equals("policy2")); // test delete //Assert.assertTrue(policyEntityService.deletePolicyProtoByUUID(entity.getUuid())); policyEntityService.deletePolicyProtoByUUID(entity.getUuid()); Assert.assertTrue(policyEntityService.getAllPolicyProto().size() == 0); }
From source file:org.apache.eagle.metadata.store.jdbc.PolicyEntityServiceJDBCImplTest.java
@Test public void test() { // define a prototype policy without site info PolicyDefinition policyDefinition = new PolicyDefinition(); policyDefinition.setName("policy1"); PolicyDefinition.Definition definition = new PolicyDefinition.Definition("siddhi", "from STREAM select * insert into out"); policyDefinition.setDefinition(definition); policyDefinition.setInputStreams(Arrays.asList("STREAM")); policyDefinition.setOutputStreams(Arrays.asList("out")); // define publisher list List<String> alertPublisherIds = Arrays.asList("slack"); PolicyEntity policyEntity = new PolicyEntity(); policyEntity.setDefinition(policyDefinition); policyEntity.setAlertPublishmentIds(alertPublisherIds); PolicyEntity res = policyEntityService.createOrUpdatePolicyProto(policyEntity); Assert.assertTrue(res != null);/*from ww w.j a va 2 s.c o m*/ Assert.assertTrue(res.getDefinition().equals(policyDefinition)); Assert.assertTrue(CollectionUtils.isEqualCollection(res.getAlertPublishmentIds(), alertPublisherIds)); Collection<PolicyEntity> policies = policyEntityService.getAllPolicyProto(); Assert.assertTrue(policies.size() == 1); PolicyEntity entity = policyEntityService.getByUUIDorName(policies.iterator().next().getUuid(), null); Assert.assertTrue(entity.equals(policies.iterator().next())); entity = policyEntityService.getByUUIDorName(null, "[null]policy1"); Assert.assertTrue(entity.equals(policies.iterator().next())); // test update entity.getDefinition().setName("policy2"); PolicyEntity updatedEntity = policyEntityService.update(entity); Assert.assertTrue(updatedEntity.getDefinition().getName().equals("policy2")); // test delete //Assert.assertTrue(policyEntityService.deletePolicyProtoByUUID(entity.getUuid())); policyEntityService.deletePolicyProtoByUUID(entity.getUuid()); Assert.assertTrue(policyEntityService.getAllPolicyProto().size() == 0); }
From source file:org.apache.flink.runtime.checkpoint.StateObjectCollection.java
@Override public boolean equals(Object o) { if (this == o) { return true; }/*from ww w. ja v a 2 s . com*/ if (o == null || getClass() != o.getClass()) { return false; } StateObjectCollection<?> that = (StateObjectCollection<?>) o; // simple equals can cause troubles here because of how equals works e.g. between lists and sets. return CollectionUtils.isEqualCollection(stateObjects, that.stateObjects); }