Example usage for java.util Set equals

List of usage examples for java.util Set equals

Introduction

In this page you can find the example usage for java.util Set equals.

Prototype

boolean equals(Object o);

Source Link

Document

Compares the specified object with this set for equality.

Usage

From source file:net.sourceforge.seqware.pipeline.deciders.BasicDecider.java

/**
 * Tests if the files from the workflow run (workflowRunAcc) are the same as
 * those found in the database (filesToRun). True if the filesToRun has more
 * files than the workflow run. True if the filesToRun and the workflow run
 * have the same number of files but with different filepaths. False if the
 * filesToRun and the workflow run have the same number of files with the
 * same file paths. False and prints an error message if there are more
 * files in the workflow run than in the filesToRun.
 *//*from   w  ww  .ja v  a2s.  c  o m*/
protected FILE_STATUS compareWorkflowRunFiles(int workflowRunAcc, Collection<String> filesToRun) {
    List<String> ranOnList = getListOfFiles(workflowRunAcc);
    Log.info("Files to run: " + StringUtils.join(filesToRun, ','));
    Log.info("Files has run: " + StringUtils.join(ranOnList, ','));

    // use set operations to be more explicit about our cases
    Set<String> setToRun = new HashSet<String>(filesToRun);
    Set<String> setHasRun = new HashSet<String>(ranOnList);
    if (setToRun.equals(setHasRun)) {
        return FILE_STATUS.SAME_FILES;
    }
    if (SetOperations.isSubset(setHasRun, setToRun)) {
        return FILE_STATUS.PAST_SUBSET_OR_INTERSECTION;
    }
    if (SetOperations.isSuperset(setHasRun, setToRun)) {
        return FILE_STATUS.PAST_SUPERSET;
    }
    if (SetOperations.intersection(setToRun, setHasRun).size() > 0) {
        return FILE_STATUS.PAST_SUBSET_OR_INTERSECTION;
    }
    return FILE_STATUS.DISJOINT_SETS;
}

From source file:com.evolveum.midpoint.model.impl.lens.projector.PolicyRuleProcessor.java

private <F extends FocusType> boolean shouldSituationBeUpdated(EvaluatedAssignment<F> evaluatedAssignment,
        List<EvaluatedPolicyRuleTriggerType> triggers) {
    Set<String> currentSituations = new HashSet<>(evaluatedAssignment.getAssignmentType().getPolicySituation());
    Set<EvaluatedPolicyRuleTriggerType> currentTriggers = new HashSet<>(
            evaluatedAssignment.getAssignmentType().getTrigger());
    // if the current situations different from the ones in the old assignment => update
    // (provided that the situations in the assignment were _not_ changed directly via a delta!!!) TODO check this
    if (!currentSituations.equals(new HashSet<>(evaluatedAssignment.getPolicySituations()))) {
        LOGGER.trace("computed policy situations are different from the current ones");
        return true;
    }//  w  w w  .  j a v  a 2s.  co  m
    if (!currentTriggers.equals(new HashSet<>(triggers))) {
        LOGGER.trace("computed policy rules triggers are different from the current ones");
        return true;
    }
    return false;
}

From source file:com.tesora.dve.sql.schema.PETable.java

protected void updateExistingTriggers(SchemaContext sc, UserTable ut) throws PEException {
    HashMap<String, UserTrigger> persistent = new HashMap<String, UserTrigger>();
    HashMap<String, PETrigger> trans = new HashMap<String, PETrigger>();
    for (PETableTriggerEventInfo trig : triggers.values()) {
        for (PETrigger pet : trig.get()) {
            trans.put(pet.getName().getUnqualified().getUnquotedName().get(), pet);
        }//from ww  w. j  a  v  a2  s .  c om
    }
    for (UserTrigger trig : ut.getTriggers())
        persistent.put(trig.getName(), trig);
    // anything that exists in persistent but not in trans has been deleted
    // anything that exists in trans but not persistent has been added
    Set<String> persTrigNames = persistent.keySet();
    Set<String> transTrigNames = trans.keySet();
    if (persTrigNames.equals(transTrigNames))
        return; // nothing to do
    if (persTrigNames.size() < transTrigNames.size()) {
        // added
        transTrigNames.removeAll(persTrigNames);
        for (String s : transTrigNames) {
            ut.getTriggers().add(trans.get(s).persistTree(sc));
        }
    } else {
        // dropped
        persTrigNames.removeAll(transTrigNames);
        for (String s : persTrigNames) {
            ut.getTriggers().remove(trans.get(s).persistTree(sc));
        }
    }
}

From source file:org.eclipse.ecr.core.storage.sql.jdbc.dialect.DialectPostgreSQL.java

@Override
public void performAdditionalStatements(Connection connection) throws SQLException {
    // Warn user if BROWSE permissions has changed
    Set<String> dbPermissions = new HashSet<String>();
    String sql = "SELECT * FROM aclr_permission";
    Statement s = connection.createStatement();
    ResultSet rs = s.executeQuery(sql);
    while (rs.next()) {
        dbPermissions.add(rs.getString(1));
    }/*from   w  w w  . ja v  a 2  s  .c  o  m*/
    rs.close();
    s.close();
    Set<String> confPermissions = new HashSet<String>();
    SecurityService securityService = NXCore.getSecurityService();
    for (String perm : securityService.getPermissionsToCheck(SecurityConstants.BROWSE)) {
        confPermissions.add(perm);
    }
    if (!dbPermissions.equals(confPermissions)) {
        log.error("Security permission for BROWSE has changed, you need to rebuild the optimized read acls:"
                + "DROP TABLE aclr_permission; DROP TABLE aclr; then restart.");
    }
}

From source file:org.apache.geode.internal.cache.partitioned.PersistentPartitionedRegionTestBase.java

protected void waitForBuckets(VM vm, final Set<Integer> expectedBuckets, final String regionName) {
    SerializableCallable getBuckets = new SerializableCallable("get buckets") {

        public Object call() throws Exception {
            Cache cache = getCache();//w  ww. ja v  a  2 s.co m
            final PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
            Wait.waitForCriterion(new WaitCriterion() {

                public boolean done() {
                    return expectedBuckets.equals(getActualBuckets());
                }

                public String description() {
                    return "Buckets on vm " + getActualBuckets() + " never became equal to expected "
                            + expectedBuckets;
                }

                public TreeSet<Integer> getActualBuckets() {
                    return new TreeSet<Integer>(region.getDataStore().getAllLocalBucketIds());
                }
            }, 30 * 1000, 100, true);

            return null;
        }
    };

    vm.invoke(getBuckets);
}

From source file:org.openconcerto.sql.model.graph.DatabaseGraph.java

/**
 * Construit la carte des tables//www  .j a v  a 2s. c o m
 * 
 * @param toRefreshSpec the roots and tables to refresh.
 * @return roots and tables loaded from file.
 * @throws SQLException if an error occurs.
 */
private synchronized Map<String, Set<String>> mapTables(final ToRefreshSpec toRefreshSpec) throws SQLException {
    assert Thread.holdsLock(this.base.getTreeMutex()) : "Cannot graph a changing object";
    Map<String, Set<String>> res = new TablesMap();

    final Set<SQLTable> currentTables = this.getAllTables();
    final ToRefreshActual toRefresh = toRefreshSpec.getActual(this.base, currentTables);
    // clear graph and add tables (vertices)
    {
        final Set<SQLTable> newTablesInScope = toRefresh.getNewTablesInScope();
        final Set<SQLTable> oldTablesInScope = toRefresh.getOldTablesInScope();
        // refresh all ?
        final boolean clearGraph = oldTablesInScope.equals(currentTables);

        // clear cache
        synchronized (this) {
            if (clearGraph) {
                this.foreignLink.clear();
                this.foreignLinks.clear();
            } else {
                for (final Iterator<Entry<List<SQLField>, Link>> iter = this.foreignLink.entrySet()
                        .iterator(); iter.hasNext();) {
                    final Entry<List<SQLField>, Link> e = iter.next();
                    // don't use e.getValue() since it can be null
                    final SQLTable linkTable = e.getKey().get(0).getTable();
                    if (oldTablesInScope.contains(linkTable))
                        iter.remove();
                }
                for (final Iterator<Entry<SQLTable, Set<Link>>> iter = this.foreignLinks.entrySet()
                        .iterator(); iter.hasNext();) {
                    final Entry<SQLTable, Set<Link>> e = iter.next();
                    final SQLTable linkTable = e.getKey().getTable();
                    if (oldTablesInScope.contains(linkTable))
                        iter.remove();
                }
            }
        }

        if (clearGraph) {
            this.getGraphP().removeAllVertices(oldTablesInScope);
            assert this.getGraphP().vertexSet().size() == 0 && this.getGraphP().edgeSet().size() == 0;
        } else {
            // Removing a vertex also removes edges, so check that we also refresh referent
            // tables otherwise they won't have any foreign links anymore which is wrong if
            // removedTable was just renamed
            // Also the cache is only cleared for tables in scope, meaning that the cache for
            // those referent tables will be incoherent with the actual graph
            final Collection<SQLTable> removedTables = org.openconcerto.utils.CollectionUtils
                    .subtract(oldTablesInScope, newTablesInScope);
            for (final SQLTable removedTable : removedTables) {
                final Set<SQLTable> referentTables = getReferentTables(removedTable);
                // MAYBE add option to refresh needed tables instead of failing
                if (!oldTablesInScope.containsAll(referentTables)) {
                    throw new IllegalStateException(
                            removedTable + " has been removed but some of its referents won't be refreshed : "
                                    + org.openconcerto.utils.CollectionUtils.subtract(referentTables,
                                            oldTablesInScope));
                }
            }
            this.getGraphP().removeAllVertices(removedTables);

            // remove links that will be refreshed.
            final Set<Link> linksToRemove = new HashSet<Link>();
            for (final SQLTable t : org.openconcerto.utils.CollectionUtils.intersection(oldTablesInScope,
                    newTablesInScope)) {
                linksToRemove.addAll(this.getGraphP().outgoingEdgesOf(t));
            }
            this.getGraphP().removeAllEdges(linksToRemove);
        }

        // add new tables (and existing but it's OK graph vertices is a set)
        Graphs.addAllVertices(this.getGraphP(), newTablesInScope);
    }
    final TablesMap fromXML = toRefresh.getFromXML();
    final TablesMap fromJDBC = toRefresh.getFromJDBC();
    if (fromXML.size() > 0) {
        final DBItemFileCache dir = this.getFileCache();
        try {
            if (dir != null) {
                Log.get().config("for mapping " + this + " trying xmls in " + dir);
                final long t1 = System.currentTimeMillis();
                res = this.mapFromXML(fromXML);
                // remove what was loaded
                fromXML.removeAll(res);
                final long t2 = System.currentTimeMillis();
                Log.get().config("XML took " + (t2 - t1) + "ms for mapping the graph of " + this.base.getName()
                        + "." + res);
            }
        } catch (Exception e) {
            SQLBase.logCacheError(dir, e);
            this.deleteGraphFiles();
        }
        // add to JDBC what wasn't loaded
        fromJDBC.merge(fromXML);
    }
    if (!fromJDBC.isEmpty()) {
        final long t1 = System.currentTimeMillis();
        for (final Entry<String, Set<String>> e : fromJDBC.entrySet()) {
            final String rootName = e.getKey();
            final Set<String> tableNames = e.getValue();
            final DBRoot r = this.base.getRoot(rootName);
            // first try to map the whole root at once
            if (!this.map(r, tableNames)) {
                // if this isn't supported use standard JDBC
                for (final String table : tableNames) {
                    this.map(r, table, null);
                }
            }
            this.save(r);
        }
        final long t2 = System.currentTimeMillis();
        Log.get()
                .config("JDBC took " + (t2 - t1) + "ms for mapping the graph of " + this.base + "." + fromJDBC);
    }
    return res;
}

From source file:org.openlmis.functional.CreateUpdateCHW.java

@Test(groups = { "webserviceSmoke" })
public void testChwFeedWithValidParentFacilityCode() throws IOException, SQLException {
    HttpClient client = new HttpClient();
    client.createContext();/* ww w  .  j a va 2 s. c om*/
    Agent agentJson = readObjectFromFile(FULL_JSON_TXT_FILE_NAME, Agent.class);
    agentJson.setAgentCode(DEFAULT_AGENT_CODE);
    agentJson.setAgentName(DEFAULT_AGENT_NAME);
    agentJson.setParentFacilityCode(DEFAULT_PARENT_FACILITY_CODE);
    agentJson.setPhoneNumber(PHONE_NUMBER);
    agentJson.setActive(ACTIVE_STATUS);

    ResponseEntity responseEntity = client.SendJSON(getJsonStringFor(agentJson), CREATE_URL, POST,
            commTrackUser, "Admin123");
    assertTrue("Showing response as : " + responseEntity.getResponse(),
            responseEntity.getResponse().contains("{\"success\":\"CHW created successfully\"}"));

    assertEquals(dbWrapper.getRequisitionGroupId(DEFAULT_PARENT_FACILITY_CODE),
            dbWrapper.getRequisitionGroupId(DEFAULT_AGENT_CODE));
    List<Integer> listOfProgramsSupportedByParentFacility = dbWrapper
            .getAllProgramsOfFacility(DEFAULT_PARENT_FACILITY_CODE);
    List<Integer> listOfProgramsSupportedByVirtualFacility = dbWrapper
            .getAllProgramsOfFacility(DEFAULT_AGENT_CODE);
    Set<Integer> setOfProgramsSupportedByParentFacility = new HashSet<>();
    setOfProgramsSupportedByParentFacility.addAll(listOfProgramsSupportedByParentFacility);
    Set<Integer> setOfProgramsSupportedByVirtualFacility = new HashSet<>();
    setOfProgramsSupportedByVirtualFacility.addAll(listOfProgramsSupportedByVirtualFacility);
    assertTrue(setOfProgramsSupportedByParentFacility.equals(setOfProgramsSupportedByVirtualFacility));
    assertEquals(listOfProgramsSupportedByParentFacility.size(),
            listOfProgramsSupportedByVirtualFacility.size());
    for (Integer programId : listOfProgramsSupportedByParentFacility) {
        assertEquals(
                dbWrapper.getProgramFieldForProgramIdAndFacilityCode(programId, DEFAULT_PARENT_FACILITY_CODE,
                        "active"),
                dbWrapper.getProgramFieldForProgramIdAndFacilityCode(programId, DEFAULT_AGENT_CODE, "active"));
        assertEquals(
                dbWrapper.getProgramStartDateForProgramIdAndFacilityCode(programId,
                        DEFAULT_PARENT_FACILITY_CODE),
                dbWrapper.getProgramStartDateForProgramIdAndFacilityCode(programId, DEFAULT_AGENT_CODE));
    }
}

From source file:ubic.pubmedgate.resolve.ResolutionRDFModel.java

public Set<Resource> getUnMatchedConceptsOld(Set<Resource> concepts) {
    Set<Resource> result = new HashSet<Resource>();
    for (Resource concept : concepts) {
        Set<Resource> terms = getTermsFromConcepts(concept);
        Set<Resource> unMatchedTerms = getUnMatchedTerms(terms);
        // if all terms are unmatched
        if (terms.equals(unMatchedTerms))
            result.add(concept);/*w  w w  .ja  v a 2  s .  c om*/
    }
    return result;
}

From source file:com.headstrong.fusion.core.recovery.ProcessRecoveryContext.java

/**
 * Returns the unprocessed messages for the process.
 * /*  ww  w.ja  v  a 2  s .  c  o  m*/
 * @param processId process identifier.
 * @return List of unprocessed messages.
 * @throws Exception 
 */
public List<Recoverable> getUnProcessedMessages(ProcessContext processContext) throws Exception {

    RecoveryDataStore recStore = this.getRecoveryDataStore();

    List<RecoveryMessage> recoveryMessages = null;

    // Need to handle multicast process differently as messages to recover
    // is no longer only a difference of reclog and processed table.
    if (((CamelProcessContext) processContext).isMulticastProcess()) {

        List<RouterConfig> routerList = processContext.getProcessConfigurationModel().getRouters();

        recoveryMessages = new ArrayList<RecoveryMessage>();
        // Get the messages which has reached the reclog but not the
        // processed table at all.
        recoveryMessages
                .addAll(recStore.getUnProcessedMulticastRecoveryMessages(processContext.getProcessId(), null));

        for (RouterConfig routerConfig : routerList) {
            if (routerConfig instanceof MultiCastRouterConfig) {
                recoveryMessages.addAll(recStore.getUnProcessedMulticastRecoveryMessages(
                        processContext.getProcessId(), routerConfig.getId()));
            }
        }

    } else {
        // list of messages yet persisted in the recovery store.
        recoveryMessages = recStore.getUnProcessedRecoverMessages(processContext.getProcessId());
    }

    // for each processed message
    // assumption is the table will contain a single record for each message
    // id .
    List<Recoverable> messagesUndelivered = new ArrayList<Recoverable>();
    if (recoveryMessages != null) {

        for (RecoveryMessage recoveryMessage : recoveryMessages) {

            if (((CamelProcessContext) processContext).isMulticastProcess()) {
                List<RouterConfig> routerList = processContext.getProcessConfigurationModel().getRouters();

                Set<String> processedEndpointSet = ((MulticastRecoveryMessage) recoveryMessage)
                        .getEndPointSet();
                Set<String> actualSet = null;

                // Finding the partially processed ones among the messages returned by query 2.
                for (RouterConfig routerConfig : routerList) {
                    if (routerConfig instanceof MultiCastRouterConfig) {
                        if (processedEndpointSet.size() > 0) {
                            actualSet = new HashSet<String>();
                            for (EndPointSequence target : ((MultiCastRouterConfig) routerConfig)
                                    .getTargets()) {
                                actualSet.add(target.isDeadEnd() ? target.getEndPointId()
                                        : getDestinationEndpoint(processContext, target.getEndPointId()));
                            }
                        }
                    }
                }
                if (processedEndpointSet.size() > 0 && processedEndpointSet.equals(actualSet)) {
                    continue;
                }

            }

            // the message is already processed ..
            Recoverable recoverable = new Recoverable();
            recoverable.setProcessId(recoveryMessage.getProcessId());
            recoverable.setMessageId(recoveryMessage.getMessageId());
            recoverable.setSavepoint(recoveryMessage.getSavePoint());
            recoverable.setSessionId(processContext.getProcessRunId());
            ByteArrayInputStream bis = new ByteArrayInputStream(recoveryMessage.getData());
            // FIXME :: this defeats the purpose of having
            // ProcessContext interface

            Exchange exchange = new DefaultExchange(((CamelProcessContext) processContext).getCamelContext());

            ExchangeConverter.read(exchange, bis);
            recoverable.setExchange(exchange);
            if (recoveryMessage instanceof MulticastRecoveryMessage) {
                Set<String> processedEndPoints = ((MulticastRecoveryMessage) recoveryMessage).getEndPointSet();
                if (processedEndPoints != null && !processedEndPoints.isEmpty()) {
                    String separatedProcessedEndpoints = StringUtils.join(processedEndPoints.toArray(), ",");
                    exchange.getIn().setHeader("processedEndpoints", separatedProcessedEndpoints);
                }
            }
            messagesUndelivered.add(recoverable);

            // #Redmine Bug 664: To ensure that during recovery the
            // undelivered messages,
            // retain the right session id for a complete run. Prior to
            // this fix old session id was maintained in message_reclog table.
            recoveryMessage.setSessionId(processContext.getProcessRunId());
            recoveryMessage.setTimeStamp(new Date());
            recStore.saveRecoveryMessage(recoveryMessage);
        }
    }
    return messagesUndelivered;
}

From source file:org.openlmis.functional.CreateUpdateCHW.java

@Test(groups = { "webservice" })
public void testVerifyFieldsAfterChangeInParentFacilityCode() throws IOException, SQLException {
    String typeId = "typeId";
    String geographicZoneId = "geographicZoneId";
    String parentFacilityId = "parentFacilityId";
    String agentCode = "ABCDE";
    String firstParentFacility = DEFAULT_PARENT_FACILITY_CODE;
    String updateParentFacility = "F11";
    String id = "id";

    HttpClient client = new HttpClient();
    client.createContext();/* ww  w  . j  a  v a  2 s. c  o  m*/
    Agent agentJson = readObjectFromFile(FULL_JSON_TXT_FILE_NAME, Agent.class);
    agentJson.setAgentCode(agentCode);
    agentJson.setAgentName(DEFAULT_AGENT_NAME);
    agentJson.setParentFacilityCode(firstParentFacility);
    agentJson.setPhoneNumber(PHONE_NUMBER);
    agentJson.setActive(ACTIVE_STATUS);

    ResponseEntity responseEntity = client.SendJSON(getJsonStringFor(agentJson), CREATE_URL, POST,
            commTrackUser, "Admin123");
    assertTrue("Showing response as : " + responseEntity.getResponse(),
            responseEntity.getResponse().contains("{\"success\":\"CHW created successfully\"}"));

    assertEquals(dbWrapper.getAttributeFromTable("facilities", typeId, "code", firstParentFacility),
            dbWrapper.getAttributeFromTable("facilities", typeId, "code", agentCode));
    assertEquals(dbWrapper.getAttributeFromTable("facilities", geographicZoneId, "code", firstParentFacility),
            dbWrapper.getAttributeFromTable("facilities", geographicZoneId, "code", agentCode));
    assertEquals(dbWrapper.getAttributeFromTable("facilities", id, "code", firstParentFacility),
            dbWrapper.getAttributeFromTable("facilities", parentFacilityId, "code", agentCode));
    assertEquals(dbWrapper.getAttributeFromTable("facilities", "name", "code", agentCode), DEFAULT_AGENT_NAME);
    assertNotEquals(dbWrapper.getAttributeFromTable("facilities", "id", "code", agentCode),
            dbWrapper.getAttributeFromTable("facilities", "id", "code", firstParentFacility));
    assertEquals(dbWrapper.getAttributeFromTable("facilities", "code", "code", agentCode), agentCode);
    assertNull(dbWrapper.getAttributeFromTable("facilities", "description", "code", agentCode));
    assertNull(dbWrapper.getAttributeFromTable("facilities", "gln", "code", agentCode));
    assertEquals(dbWrapper.getAttributeFromTable("facilities", "mainPhone", "code", agentCode), PHONE_NUMBER);
    assertNull(dbWrapper.getAttributeFromTable("facilities", "fax", "code", agentCode));
    assertNull(dbWrapper.getAttributeFromTable("facilities", "address1", "code", agentCode));
    assertNull(dbWrapper.getAttributeFromTable("facilities", "address2", "code", agentCode));
    assertNull(dbWrapper.getAttributeFromTable("facilities", "catchmentPopulation", "code", agentCode));
    assertNull(dbWrapper.getAttributeFromTable("facilities", "operatedById", "code", agentCode));
    assertEquals(dbWrapper.getAttributeFromTable("facilities", "active", "code", agentCode), "t");
    assertEquals(dbWrapper.getAttributeFromTable("facilities", "enabled", "code", agentCode), TRUE_FLAG);
    assertEquals(dbWrapper.getAttributeFromTable("facilities", "virtualFacility", "code", agentCode),
            TRUE_FLAG);
    assertEquals(dbWrapper.getRequisitionGroupId(firstParentFacility),
            dbWrapper.getRequisitionGroupId(agentCode));

    agentJson.setParentFacilityCode(updateParentFacility);

    ResponseEntity responseEntityUpdated = client.SendJSON(getJsonStringFor(agentJson),
            UPDATE_URL + agentCode + JSON_EXTENSION, PUT, commTrackUser, "Admin123");
    assertTrue("Showing response as : " + responseEntityUpdated.getResponse(),
            responseEntityUpdated.getResponse().contains("{\"success\":\"CHW updated successfully\"}"));
    assertEquals(dbWrapper.getAttributeFromTable("facilities", typeId, "code", updateParentFacility),
            dbWrapper.getAttributeFromTable("facilities", typeId, "code", agentCode));
    assertEquals(dbWrapper.getAttributeFromTable("facilities", geographicZoneId, "code", updateParentFacility),
            dbWrapper.getAttributeFromTable("facilities", geographicZoneId, "code", agentCode));
    assertEquals(dbWrapper.getAttributeFromTable("facilities", id, "code", updateParentFacility),
            dbWrapper.getAttributeFromTable("facilities", parentFacilityId, "code", agentCode));
    assertEquals(dbWrapper.getRequisitionGroupId(updateParentFacility),
            dbWrapper.getRequisitionGroupId(agentCode));

    List<Integer> listOfProgramsSupportedByParentFacility = dbWrapper
            .getAllProgramsOfFacility(updateParentFacility);
    List<Integer> listOfProgramsSupportedByVirtualFacility = dbWrapper.getAllProgramsOfFacility(agentCode);
    Set<Integer> setOfProgramsSupportedByParentFacility = new HashSet<>();
    setOfProgramsSupportedByParentFacility.addAll(listOfProgramsSupportedByParentFacility);
    Set<Integer> setOfProgramsSupportedByVirtualFacility = new HashSet<>();
    setOfProgramsSupportedByVirtualFacility.addAll(listOfProgramsSupportedByVirtualFacility);
    assertTrue(setOfProgramsSupportedByParentFacility.equals(setOfProgramsSupportedByVirtualFacility));
    assertEquals(listOfProgramsSupportedByParentFacility.size(),
            listOfProgramsSupportedByVirtualFacility.size());
    for (Integer programId : listOfProgramsSupportedByParentFacility) {
        assertEquals(
                dbWrapper.getProgramFieldForProgramIdAndFacilityCode(programId, updateParentFacility, "active"),
                dbWrapper.getProgramFieldForProgramIdAndFacilityCode(programId, agentCode, "active"));
        assertEquals(dbWrapper.getProgramStartDateForProgramIdAndFacilityCode(programId, updateParentFacility),
                dbWrapper.getProgramStartDateForProgramIdAndFacilityCode(programId, agentCode));
    }
}