Example usage for java.util Set clear

List of usage examples for java.util Set clear

Introduction

In this page you can find the example usage for java.util Set clear.

Prototype

void clear();

Source Link

Document

Removes all of the elements from this set (optional operation).

Usage

From source file:com.google.gwt.emultest.java.util.TreeSetTest.java

/**
 * Test method for 'java.util.Set.clear()'.
 *
 * @see java.util.Set#clear()//from w  w w.ja v a 2s  . c  om
 */
public void testClear() {
    // The _throwsUnsupportedOperationException version of this test will
    // verify that the method is not supported.
    if (isClearSupported) {
        // Execute this test only if supported.
        Set<E> set = createSet();
        set.add(getKeys()[0]);
        assertFalse(set.isEmpty());
        set.clear();
        _assertEmpty(set);
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils.java

@Test(timeout = 30000)
public void testNormalizeNodeLabelExpression() throws IOException {
    // mock queue and scheduler
    YarnScheduler scheduler = mock(YarnScheduler.class);
    Set<String> queueAccessibleNodeLabels = Sets.newHashSet();
    QueueInfo queueInfo = mock(QueueInfo.class);
    when(queueInfo.getQueueName()).thenReturn("queue");
    when(queueInfo.getAccessibleNodeLabels()).thenReturn(queueAccessibleNodeLabels);
    when(queueInfo.getDefaultNodeLabelExpression()).thenReturn(" x ");
    when(scheduler.getQueueInfo(any(String.class), anyBoolean(), anyBoolean())).thenReturn(queueInfo);

    Resource maxResource = Resources.createResource(
            YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
            YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);

    // queue has labels, success cases
    try {/*from ww  w  . j a v a  2  s  . c  om*/
        // set queue accessible node labels to [x, y]
        queueAccessibleNodeLabels.clear();
        queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
        rmContext.getNodeLabelManager()
                .addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x"), NodeLabel.newInstance("y")));
        Resource resource = Resources.createResource(0,
                YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
        ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY,
                resource, 1);
        SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext);
        Assert.assertTrue(resReq.getNodeLabelExpression().equals("x"));

        resReq.setNodeLabelExpression(" y ");
        SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue", scheduler, rmContext);
        Assert.assertTrue(resReq.getNodeLabelExpression().equals("y"));
    } catch (InvalidResourceRequestException e) {
        e.printStackTrace();
        fail("Should be valid when request labels is a subset of queue labels");
    } finally {
        rmContext.getNodeLabelManager().removeFromClusterNodeLabels(Arrays.asList("x", "y"));
    }
}

From source file:org.broadleafcommerce.common.extensibility.jpa.MergePersistenceUnitManager.java

@Override
@SuppressWarnings({ "unchecked", "ToArrayCallWithZeroLengthArrayArgument" })
public void preparePersistenceUnitInfos() {
    //Need to use reflection to try and execute the logic in the DefaultPersistenceUnitManager
    //SpringSource added a block of code in version 3.1 to "protect" the user from having more than one PU with
    //the same name.  Of course, in our case, this happens before a merge occurs.  They have added
    //a block of code to throw an exception if more than one PU has the same name.  We want to
    //use the logic of the DefaultPersistenceUnitManager without the exception in the case of
    //a duplicate name. This will require reflection in order to do what we need.
    try {//from  w  w  w .j  ava2 s . c om
        Set<String> persistenceUnitInfoNames = null;
        Map<String, PersistenceUnitInfo> persistenceUnitInfos = null;
        ResourcePatternResolver resourcePatternResolver = null;
        Field[] fields = getClass().getSuperclass().getDeclaredFields();
        for (Field field : fields) {
            if ("persistenceUnitInfoNames".equals(field.getName())) {
                field.setAccessible(true);
                persistenceUnitInfoNames = (Set<String>) field.get(this);
            } else if ("persistenceUnitInfos".equals(field.getName())) {
                field.setAccessible(true);
                persistenceUnitInfos = (Map<String, PersistenceUnitInfo>) field.get(this);
            } else if ("resourcePatternResolver".equals(field.getName())) {
                field.setAccessible(true);
                resourcePatternResolver = (ResourcePatternResolver) field.get(this);
            }
        }

        persistenceUnitInfoNames.clear();
        persistenceUnitInfos.clear();

        Method readPersistenceUnitInfos = getClass().getSuperclass()
                .getDeclaredMethod("readPersistenceUnitInfos");
        readPersistenceUnitInfos.setAccessible(true);

        //In Spring 3.0 this returns an array
        //In Spring 3.1 this returns a List
        Object pInfosObject = readPersistenceUnitInfos.invoke(this);
        Object[] puis;
        if (pInfosObject.getClass().isArray()) {
            puis = (Object[]) pInfosObject;
        } else {
            puis = ((Collection) pInfosObject).toArray();
        }

        for (Object pui : puis) {
            MutablePersistenceUnitInfo mPui = (MutablePersistenceUnitInfo) pui;
            if (mPui.getPersistenceUnitRootUrl() == null) {
                Method determineDefaultPersistenceUnitRootUrl = getClass().getSuperclass()
                        .getDeclaredMethod("determineDefaultPersistenceUnitRootUrl");
                determineDefaultPersistenceUnitRootUrl.setAccessible(true);
                mPui.setPersistenceUnitRootUrl((URL) determineDefaultPersistenceUnitRootUrl.invoke(this));
            }
            ConfigurationOnlyState state = ConfigurationOnlyState.getState();
            if ((state == null || !state.isConfigurationOnly()) && mPui.getNonJtaDataSource() == null) {
                mPui.setNonJtaDataSource(getDefaultDataSource());
            }
            if (super.getLoadTimeWeaver() != null) {
                Method puiInitMethod = mPui.getClass().getDeclaredMethod("init", LoadTimeWeaver.class);
                puiInitMethod.setAccessible(true);
                puiInitMethod.invoke(pui, getLoadTimeWeaver());
            } else {
                Method puiInitMethod = mPui.getClass().getDeclaredMethod("init", ClassLoader.class);
                puiInitMethod.setAccessible(true);
                puiInitMethod.invoke(pui, resourcePatternResolver.getClassLoader());
            }
            postProcessPersistenceUnitInfo((MutablePersistenceUnitInfo) pui);
            String name = mPui.getPersistenceUnitName();
            persistenceUnitInfoNames.add(name);

            persistenceUnitInfos.put(name, mPui);
        }
    } catch (Exception e) {
        throw new RuntimeException("An error occured reflectively invoking methods on " + "class: "
                + getClass().getSuperclass().getName(), e);
    }

    try {
        List<String> managedClassNames = new ArrayList<String>();

        boolean weaverRegistered = true;
        for (PersistenceUnitInfo pui : mergedPus.values()) {
            for (BroadleafClassTransformer transformer : classTransformers) {
                try {
                    if (!(transformer instanceof NullClassTransformer)
                            && pui.getPersistenceUnitName().equals("blPU")) {
                        pui.addTransformer(transformer);
                    }
                } catch (Exception e) {
                    Exception refined = ExceptionHelper.refineException(IllegalStateException.class,
                            RuntimeException.class, e);
                    if (refined instanceof IllegalStateException) {
                        LOG.warn(
                                "A BroadleafClassTransformer is configured for this persistence unit, but Spring "
                                        + "reported a problem (likely that a LoadTimeWeaver is not registered). As a result, "
                                        + "the Broadleaf Commerce ClassTransformer ("
                                        + transformer.getClass().getName() + ") is "
                                        + "not being registered with the persistence unit.");
                        weaverRegistered = false;
                    } else {
                        throw refined;
                    }
                }
            }
        }

        // Only validate transformation results if there was a LoadTimeWeaver registered in the first place
        if (weaverRegistered) {
            for (PersistenceUnitInfo pui : mergedPus.values()) {
                for (String managedClassName : pui.getManagedClassNames()) {
                    if (!managedClassNames.contains(managedClassName)) {
                        // Force-load this class so that we are able to ensure our instrumentation happens globally.
                        // If transformation is happening, it should be tracked in EntityMarkerClassTransformer
                        Class.forName(managedClassName, true, getClass().getClassLoader());
                        managedClassNames.add(managedClassName);
                    }
                }
            }

            // If a class happened to be loaded by the ClassLoader before we had a chance to set up our instrumentation,
            // it may not be in a consistent state. This verifies with the EntityMarkerClassTransformer that it
            // actually saw the classes loaded by the above process
            List<String> nonTransformedClasses = new ArrayList<String>();
            for (PersistenceUnitInfo pui : mergedPus.values()) {
                for (String managedClassName : pui.getManagedClassNames()) {
                    // We came across a class that is not a real persistence class (doesn't have the right annotations)
                    // but is still being transformed/loaded by
                    // the persistence unit. This might have unexpected results downstream, but it could also be benign
                    // so just output a warning
                    if (entityMarkerClassTransformer.getTransformedNonEntityClassNames()
                            .contains(managedClassName)) {
                        LOG.warn("The class " + managedClassName
                                + " is marked as a managed class within the MergePersistenceUnitManager"
                                + " but is not annotated with @Entity, @MappedSuperclass or @Embeddable."
                                + " This class is still referenced in a persistence.xml and is being transformed by"
                                + " PersistenceUnit ClassTransformers which may result in problems downstream"
                                + " and represents a potential misconfiguration. This class should be removed from"
                                + " your persistence.xml");
                    } else if (!entityMarkerClassTransformer.getTransformedEntityClassNames()
                            .contains(managedClassName)) {
                        // This means the class not in the 'warning' list, but it is also not in the list that we would
                        // expect it to be in of valid entity classes that were transformed. This means that we
                        // never got the chance to transform the class AT ALL even though it is a valid entity class
                        nonTransformedClasses.add(managedClassName);
                    }
                }
            }

            if (CollectionUtils.isNotEmpty(nonTransformedClasses)) {
                String message = "The classes\n" + Arrays.toString(nonTransformedClasses.toArray())
                        + "\nare managed classes within the MergePersistenceUnitManager"
                        + "\nbut were not detected as being transformed by the EntityMarkerClassTransformer. These"
                        + "\nclasses are likely loaded earlier in the application startup lifecyle by the servlet"
                        + "\ncontainer. Verify that an empty <absolute-ordering /> element is contained in your"
                        + "\nweb.xml to disable scanning for ServletContainerInitializer classes by your servlet"
                        + "\ncontainer which can trigger early class loading. If the problem persists, ensure that"
                        + "\nthere are no bean references to your entity class anywhere else in your Spring applicationContext"
                        + "\nand consult the documentation for your servlet container to determine if classes are loaded"
                        + "\nprior to the Spring context initialization. Finally, ensure that Session Persistence is"
                        + "\nalso disabled by your Servlet Container. To do this in Tomcat, add <Manager pathname=\"\" />"
                        + "\ninside of the <Context> element in context.xml in your app's META-INF folder or your server's conf folder";
                LOG.error(message);
                throw new IllegalStateException(message);
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:com.xwtec.xwserver.util.json.JsonConfig.java

/**
 * Removes all property exclusions assigned to the target class.<br>
 * [Java -&gt; JSON]/*from  w  ww.  jav a 2s.  c o m*/
 * 
 * @param target a class used for searching property exclusions.
 */
public void unregisterPropertyExclusions(Class target) {
    if (target != null) {
        Set set = (Set) exclusionMap.get(target);
        if (set != null) {
            set.clear();
        }
    }
}

From source file:io.mindmaps.graql.internal.analytics.AnalyticsTest.java

@Test
public void testDegreeIsCorrectAssertionAboutAssertion()
        throws MindmapsValidationException, ExecutionException, InterruptedException {
    // create a simple graph
    RoleType pet = graph.putRoleType("pet");
    RoleType owner = graph.putRoleType("owner");
    RelationType mansBestFriend = graph.putRelationType("mans-best-friend").hasRole(pet).hasRole(owner);
    RoleType target = graph.putRoleType("target");
    RoleType value = graph.putRoleType("value");
    RelationType hasName = graph.putRelationType("has-name").hasRole(value).hasRole(target);
    EntityType person = graph.putEntityType("person").playsRole(owner);
    EntityType animal = graph.putEntityType("animal").playsRole(pet).playsRole(target);
    ResourceType<String> name = graph.putResourceType("name", ResourceType.DataType.STRING).playsRole(value);
    ResourceType<String> altName = graph.putResourceType("alternate-name", ResourceType.DataType.STRING)
            .playsRole(value);//w  w w .  j  a va 2s .  c om
    RoleType ownership = graph.putRoleType("ownership");
    RoleType ownershipResource = graph.putRoleType("ownership-resource");
    RelationType hasOwnershipResource = graph.putRelationType("has-ownership-resource").hasRole(ownership)
            .hasRole(ownershipResource);
    ResourceType<String> startDate = graph.putResourceType("start-date", ResourceType.DataType.STRING)
            .playsRole(ownershipResource);
    mansBestFriend.playsRole(ownership);

    // add data to the graph
    Entity coco = graph.putEntity("coco", animal);
    Entity dave = graph.putEntity("dave", person);
    Resource coconut = graph.putResource("coconut", name);
    Resource stinky = graph.putResource("stinky", altName);
    Relation daveOwnsCoco = graph.addRelation(mansBestFriend).putRolePlayer(owner, dave).putRolePlayer(pet,
            coco);
    graph.addRelation(hasName).putRolePlayer(target, coco).putRolePlayer(value, coconut);
    graph.addRelation(hasName).putRolePlayer(target, coco).putRolePlayer(value, stinky);
    Resource sd = graph.putResource("01/01/01", startDate);
    Relation ownsFrom = graph.addRelation(hasOwnershipResource).putRolePlayer(ownershipResource, sd)
            .putRolePlayer(ownership, daveOwnsCoco);

    // manually compute the degree
    Map<String, Long> referenceDegrees1 = new HashMap<>();
    referenceDegrees1.put(coco.getId(), 1L);
    referenceDegrees1.put(dave.getId(), 1L);
    referenceDegrees1.put(daveOwnsCoco.getId(), 3L);
    referenceDegrees1.put(sd.getId(), 1L);
    referenceDegrees1.put(ownsFrom.getId(), 2L);

    // manually compute degrees
    Map<String, Long> referenceDegrees2 = new HashMap<>();
    referenceDegrees2.put(coco.getId(), 1L);
    referenceDegrees2.put(dave.getId(), 1L);
    referenceDegrees2.put(daveOwnsCoco.getId(), 2L);

    graph.commit();

    mansBestFriend = graph.getRelationType("mans-best-friend");
    person = graph.getEntityType("person");
    animal = graph.getEntityType("animal");
    startDate = graph.getResourceType("start-date");
    hasOwnershipResource = graph.getRelationType("has-ownership-resource");

    // create a subgraph with assertion on assertion
    Set<Type> ct = new HashSet<>();
    ct.add(animal);
    ct.add(person);
    ct.add(mansBestFriend);
    ct.add(startDate);
    ct.add(hasOwnershipResource);
    Analytics analytics = new Analytics(keyspace, ct);
    Map<Instance, Long> degrees = analytics.degrees();
    assertFalse(degrees.isEmpty());
    degrees.entrySet().forEach(entry -> {
        assertEquals(referenceDegrees1.get(entry.getKey().getId()), entry.getValue());
    });

    // create subgraph without assertion on assertion
    ct.clear();
    ct.add(animal);
    ct.add(person);
    ct.add(mansBestFriend);
    analytics = new Analytics(keyspace, ct);
    degrees = analytics.degrees();
    assertFalse(degrees.isEmpty());
    degrees.entrySet().forEach(entry -> {
        assertEquals(referenceDegrees2.get(entry.getKey().getId()), entry.getValue());
    });
}

From source file:org.apache.hadoop.hive.ql.optimizer.MapJoinProcessor.java

/**
 * Get a list of big table candidates. Only the tables in the returned set can
 * be used as big table in the join operation.
 *
 * The logic here is to scan the join condition array from left to right. If
 * see a inner join, and the bigTableCandidates is empty or the outer join
 * that we last saw is a right outer join, add both side of this inner join to
 * big table candidates only if they are not in bad position. If see a left
 * outer join, set lastSeenRightOuterJoin to false, and the bigTableCandidates
 * is empty, add the left side to it, and if the bigTableCandidates is not
 * empty, do nothing (which means the bigTableCandidates is from left side).
 * If see a right outer join, set lastSeenRightOuterJoin to true, clear the
 * bigTableCandidates, and add right side to the bigTableCandidates, it means
 * the right side of a right outer join always win. If see a full outer join,
 * return empty set immediately (no one can be the big table, can not do a
 * mapjoin).//from  w  w w  .j  a  v a2 s  . c om
 *
 *
 * @param condns
 * @return set of big table candidates
 */
public static Set<Integer> getBigTableCandidates(JoinCondDesc[] condns) {
    Set<Integer> bigTableCandidates = new HashSet<Integer>();

    boolean seenOuterJoin = false;
    Set<Integer> seenPostitions = new HashSet<Integer>();
    Set<Integer> leftPosListOfLastRightOuterJoin = new HashSet<Integer>();

    // is the outer join that we saw most recently is a right outer join?
    boolean lastSeenRightOuterJoin = false;
    for (JoinCondDesc condn : condns) {
        int joinType = condn.getType();
        seenPostitions.add(condn.getLeft());
        seenPostitions.add(condn.getRight());

        if (joinType == JoinDesc.FULL_OUTER_JOIN) {
            // setting these 2 parameters here just in case that if the code got
            // changed in future, these 2 are not missing.
            seenOuterJoin = true;
            lastSeenRightOuterJoin = false;
            // empty set - cannot convert
            return new HashSet<Integer>();
        } else if (joinType == JoinDesc.LEFT_OUTER_JOIN || joinType == JoinDesc.LEFT_SEMI_JOIN) {
            seenOuterJoin = true;
            if (bigTableCandidates.size() == 0) {
                bigTableCandidates.add(condn.getLeft());
            }

            lastSeenRightOuterJoin = false;
        } else if (joinType == JoinDesc.RIGHT_OUTER_JOIN) {
            seenOuterJoin = true;
            lastSeenRightOuterJoin = true;
            // add all except the right side to the bad positions
            leftPosListOfLastRightOuterJoin.clear();
            leftPosListOfLastRightOuterJoin.addAll(seenPostitions);
            leftPosListOfLastRightOuterJoin.remove(condn.getRight());

            bigTableCandidates.clear();
            bigTableCandidates.add(condn.getRight());
        } else if (joinType == JoinDesc.INNER_JOIN) {
            if (!seenOuterJoin || lastSeenRightOuterJoin) {
                // is the left was at the left side of a right outer join?
                if (!leftPosListOfLastRightOuterJoin.contains(condn.getLeft())) {
                    bigTableCandidates.add(condn.getLeft());
                }
                // is the right was at the left side of a right outer join?
                if (!leftPosListOfLastRightOuterJoin.contains(condn.getRight())) {
                    bigTableCandidates.add(condn.getRight());
                }
            }
        }
    }

    return bigTableCandidates;
}

From source file:com.netflix.nicobar.core.module.ScriptModuleLoaderTest.java

@Test
public void testOldArchiveRejected() throws Exception {
    long originalCreateTime = 2000;
    Set<ScriptArchive> updateArchives = new HashSet<ScriptArchive>();
    updateArchives.add(new TestDependecyScriptArchive(
            new ScriptModuleSpec.Builder("A").addCompilerPluginId("mockPlugin").build(), originalCreateTime));

    when(MOCK_COMPILER.shouldCompile(Mockito.any(ScriptArchive.class))).thenReturn(true);
    when(MOCK_COMPILER.compile(Mockito.any(ScriptArchive.class), Mockito.any(JBossModuleClassLoader.class),
            Mockito.any(Path.class))).thenReturn(Collections.<Class<?>>emptySet());

    ScriptModuleListener mockListener = createMockListener();
    ScriptModuleLoader moduleLoader = new ScriptModuleLoader.Builder()
            .addPluginSpec(new ScriptCompilerPluginSpec.Builder("mockPlugin")
                    .withPluginClassName(MockScriptCompilerPlugin.class.getName()).build())
            .addListener(mockListener).build();
    moduleLoader.updateScriptArchives(updateArchives);
    reset(mockListener);//w w w  . j  a  va2  s  .  c  om

    // updated graph: D->C->B->A
    updateArchives.clear();
    long updatedCreateTime = 1000;
    TestDependecyScriptArchive updatedArchive = new TestDependecyScriptArchive(
            new ScriptModuleSpec.Builder("A").addCompilerPluginId("mockPlugin").build(), updatedCreateTime);
    updateArchives.add(updatedArchive);

    moduleLoader.updateScriptArchives(updateArchives);

    // validate that the update was rejected due to a old timestamp
    verify(mockListener).archiveRejected(updatedArchive, ArchiveRejectedReason.HIGHER_REVISION_AVAILABLE, null);
    verifyNoMoreInteractions(mockListener);

    // validate the post-condition of the module database
    assertEquals(moduleLoader.getScriptModule("A").getCreateTime(), originalCreateTime);
}

From source file:de.alpharogroup.message.system.service.MessagesBusinessServiceTest.java

@Test(enabled = false)
public void testSaveMessageWithRecipients() {
    IMessageContentModel messageModel = new MessageContentModel();
    ISendInformationModel sendInformationModel = new SendInformationModel();
    messageModel.setContent("Hello guys,\n\nhow are you?\n\nCheers\n\nMichael");
    messageModel.setSubject("Hi guys");
    IBaseMessageModel model = new BaseMessageModel();
    model.setMessageContentModel(messageModel);
    model.setSendInformationModel(sendInformationModel);
    model.setMessageState(MessageState.UNREPLIED);
    model.setMessageType(MessageType.MAIL);

    final Users sender = getUser("Michael", "Knight", "michael.knight@gmail.com", "knight");
    final Set<Users> recipients = new HashSet<>();

    final Users recipient = getUser("Anton", "Einstein", "anton.einstein@gmail.com", "einstein");
    recipients.add(recipient);//  www.ja  va2s.c  o m
    model.getSendInformationModel().setRecipients(recipients);
    model.getSendInformationModel().setSender(sender);
    model.getSendInformationModel().setSentDate(new Date(System.currentTimeMillis()));
    final Messages message = messagesService.saveMessageWithRecipients(model);
    AssertJUnit.assertTrue(messagesService.exists(message.getId()));
    final Set<Users> r = messagesService.getRecipients(message);
    AssertJUnit.assertTrue(r != null && !r.isEmpty());
    AssertJUnit.assertTrue(r.iterator().next().equals(recipient));

    // Test the find reply messages...
    // Create a reply message...
    messageModel = new MessageContentModel();
    sendInformationModel = new SendInformationModel();
    messageModel.setContent("Hello Michael,\n\nim fine and you?\n\nCheers\n\nAnton");
    messageModel.setSubject("Re:Hi guys");
    model = new BaseMessageModel();
    model.setMessageContentModel(messageModel);
    model.setSendInformationModel(sendInformationModel);
    model.setMessageState(MessageState.UNREPLIED);
    model.setMessageType(MessageType.REPLY);
    // clear recipients
    recipients.clear();
    // its a reply so the sender is now the recipient...
    recipients.add(sender);
    model.getSendInformationModel().setRecipients(recipients);
    model.getSendInformationModel().setSender(recipient);
    model.getSendInformationModel().setSentDate(new Date(System.currentTimeMillis()));
    final Messages replyMessage = messagesService.saveMessageWithRecipients(model);
    replyMessage.setParent(message);
    messagesService.merge(replyMessage);
    final List<Messages> replies = messagesService.findReplyMessages(recipient);
    System.out.println(replies);
}

From source file:org.alfresco.repo.node.getchildren.GetChildrenCannedQueryTest.java

public void testRestrictByAssocType() throws Exception {
    NodeRef parentNodeRef = getOrCreateParentTestFolder("GetChildrenCannedQueryTest-testFolder-" + TEST_RUN_ID);

    Set<QName> assocTypeQNames = new HashSet<QName>(3);
    Set<QName> childTypeQNames = new HashSet<QName>(3);

    assocTypeQNames.clear();
    assocTypeQNames.add(ContentModel.ASSOC_CONTAINS);
    childTypeQNames.clear();// w  w w.  ja  va 2  s  .  c o m
    childTypeQNames.add(ContentModel.TYPE_CONTENT);
    List<NodeRef> children = filterByAssocTypeAndCheck(parentNodeRef, assocTypeQNames, childTypeQNames);
    assertEquals(1, children.size());

    assocTypeQNames.clear();
    assocTypeQNames.add(QName.createQName("http://www.alfresco.org/test/getchildrentest/1.0", "contains1"));
    childTypeQNames.clear();
    childTypeQNames.add(ContentModel.TYPE_CONTENT);
    children = filterByAssocTypeAndCheck(parentNodeRef, assocTypeQNames, childTypeQNames);
    assertEquals(1, children.size());

    assocTypeQNames.clear();
    assocTypeQNames.add(QName.createQName("http://www.alfresco.org/test/getchildrentest/1.0", "contains1"));
    assocTypeQNames.add(ContentModel.ASSOC_CONTAINS);
    childTypeQNames.clear();
    childTypeQNames.add(ContentModel.TYPE_CONTENT);
    children = filterByAssocTypeAndCheck(parentNodeRef, assocTypeQNames, childTypeQNames);
    assertEquals(2, children.size());
}

From source file:edu.brown.costmodel.SingleSitedCostModel.java

/**
 * Invalidate a single QueryCacheEntry Returns true if the query's
 * TransactionCacheEntry parent needs to be invalidated as well
 * /*from www.  ja va 2s. co  m*/
 * @param txn_entry
 * @param query_entry
 * @param invalidate_removedTouchedPartitions
 * @return
 */
private boolean invalidateQueryCacheEntry(TransactionCacheEntry txn_entry, QueryCacheEntry query_entry,
        Histogram<Integer> invalidate_removedTouchedPartitions) {
    if (trace.get())
        LOG.trace("Invalidate:" + query_entry);
    boolean invalidate_txn = false;

    if (query_entry.isUnknown()) {
        txn_entry.unknown_queries -= query_entry.weight;
    } else {
        txn_entry.examined_queries -= query_entry.weight;
    }
    if (query_entry.isSinglesited()) {
        txn_entry.singlesite_queries -= query_entry.weight;
    } else {
        txn_entry.multisite_queries -= query_entry.weight;
    }

    // DEBUG!
    if (txn_entry.singlesite_queries < 0 || txn_entry.multisite_queries < 0) {
        LOG.error("!!! NEGATIVE QUERY COUNTS - TRACE #" + txn_entry.getTransactionId() + " !!!");
        LOG.error(txn_entry.debug());
        LOG.error(StringUtil.SINGLE_LINE);
        for (QueryCacheEntry q : txn_entry.query_entries) {
            LOG.error(q);
        }
    }
    assert (txn_entry.examined_queries >= 0) : txn_entry + " has negative examined queries!\n"
            + txn_entry.debug();
    assert (txn_entry.unknown_queries >= 0) : txn_entry + " has negative unknown queries!\n"
            + txn_entry.debug();
    assert (txn_entry.singlesite_queries >= 0) : txn_entry + " has negative singlesited queries!\n"
            + txn_entry.debug();
    assert (txn_entry.multisite_queries >= 0) : txn_entry + " has negative multisited queries!\n"
            + txn_entry.debug();

    // Populate this histogram so that we know what to remove from the
    // global histogram
    invalidate_removedTouchedPartitions.putAll(query_entry.getAllPartitions(),
            query_entry.weight * txn_entry.weight);

    // Remove the partitions this query touches from the txn's touched
    // partitions histogram
    final String debugBefore = txn_entry.debug();
    try {
        txn_entry.touched_partitions.removeValues(query_entry.getAllPartitions(), query_entry.weight);
    } catch (Throwable ex) {
        LOG.error(debugBefore, ex);
        throw new RuntimeException(ex);
    }

    // If this transaction is out of queries, then we'll remove it
    // completely
    if (txn_entry.examined_queries == 0) {
        invalidate_txn = true;
    }

    // Make sure that we do this last so we can subtract values from the
    // TranasctionCacheEntry
    query_entry.invalid = true;
    query_entry.singlesited = true;
    query_entry.unknown = true;
    for (Set<Integer> q_partitions : query_entry.partitions.values()) {
        q_partitions.clear();
    } // FOR
    query_entry.all_partitions.clear();

    this.query_ctr.addAndGet(-1 * query_entry.weight);
    return (invalidate_txn);
}