Example usage for java.util Set containsAll

List of usage examples for java.util Set containsAll

Introduction

In this page you can find the example usage for java.util Set containsAll.

Prototype

boolean containsAll(Collection<?> c);

Source Link

Document

Returns true if this set contains all of the elements of the specified collection.

Usage

From source file:org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.java

protected void checkReplaceLabelsOnNode(Map<NodeId, Set<String>> replaceLabelsToNode) throws IOException {
    if (null == replaceLabelsToNode || replaceLabelsToNode.isEmpty()) {
        return;/* ww w. j  a v  a2s  . c  o  m*/
    }

    // check all labels being added existed
    Set<String> knownLabels = labelCollections.keySet();
    for (Entry<NodeId, Set<String>> entry : replaceLabelsToNode.entrySet()) {
        NodeId nodeId = entry.getKey();
        Set<String> labels = entry.getValue();

        // As in YARN-2694, we disable user add more than 1 labels on a same host
        if (labels.size() > 1) {
            String msg = String.format("%d labels specified on host=%s"
                    + ", please note that we do not support specifying multiple"
                    + " labels on a single host for now.", labels.size(), nodeId.getHost());
            LOG.error(msg);
            throw new IOException(msg);
        }

        if (!knownLabels.containsAll(labels)) {
            String msg = "Not all labels being replaced contained by known " + "label collections, please check"
                    + ", new labels=[" + StringUtils.join(labels, ",") + "]";
            LOG.error(msg);
            throw new IOException(msg);
        }
    }
}

From source file:ezbake.data.mongo.EzMongoHandler.java

private void validateFormalVisibilityForInsert(EzSecurityToken security, Visibility vis, DBObject dbObject,
        boolean fromDriver) throws EzMongoBaseException, VisibilityParseException {

    if (fromDriver) {
        Object fvFieldObj = dbObject.get(RedactHelper.FORMAL_VISIBILITY_FIELD);
        if (fvFieldObj == null) {
            return;
        }//from w  ww.j  av a  2 s  . co m
        // we already have set the _ezFV field with the double array [[ ]] format.
        //   iterate through the inner list elements and see if the token's auths have
        //   any of them as a whole.
        boolean canInsertBooleanExpression = false;
        List outerList = (List) fvFieldObj;
        Set<String> tokenAuths = security.getAuthorizations().getFormalAuthorizations();
        for (Object innerListObj : outerList) {
            // check if the token auths have all of this inner list element
            List innerList = (List) innerListObj;
            Set<String> innerSet = new HashSet<String>(innerList);
            if (tokenAuths.containsAll(innerSet)) {
                canInsertBooleanExpression = true;
                break;
            }
        }
        if (!canInsertBooleanExpression) {
            final String message = "User does not have all the required Formal Visibility auths to insert: "
                    + outerList + ", auths List: " + tokenAuths;
            appLog.error(message);
            throw new EzMongoBaseException(message);
        }

    } else {
        // check if the user can insert the Formal Visibility (Boolean expression).
        String classification = vis.getFormalVisibility();
        if (!StringUtils.isEmpty(classification)) {
            appLog.info("checking if the user has the required classification to insert: {}", classification);

            final boolean canInsertBooleanExpression = ClassificationUtils
                    .confirmAuthsForAccumuloClassification(security, classification,
                            ClassificationUtils.USER_FORMAL_AUTHS);

            if (!canInsertBooleanExpression) {
                final String message = "User does not have all the required classifications to insert: "
                        + classification;
                appLog.error(message);
                throw new EzMongoBaseException(message);
            }
        }
    }
}

From source file:ezbake.data.mongo.EzMongoHandler.java

private void validateExternalCommunityVisibilityForInsert(EzSecurityToken security, Visibility vis,
        DBObject dbObject, boolean fromDriver) throws EzMongoBaseException, VisibilityParseException {
    if (fromDriver) {
        Object extCVFieldObj = dbObject.get(RedactHelper.EXTERNAL_COMMUNITY_VISIBILITY_FIELD);
        if (extCVFieldObj == null) {
            return;
        }//from w ww .  jav a  2 s.  c  o  m
        // we already have set the _ezExtV field with the double array [[ ]] format.
        //   iterate through the inner list elements and see if the token's auths have
        //   any of them as a whole.
        boolean canInsertExtViz = false;
        List outerList = (List) extCVFieldObj;
        Set<String> tokenAuths = security.getAuthorizations().getExternalCommunityAuthorizations();
        for (Object innerListObj : outerList) {
            // check if the token auths have all of this inner list element
            List innerList = (List) innerListObj;
            Set<String> innerSet = new HashSet<String>(innerList);
            if (tokenAuths.containsAll(innerSet)) {
                canInsertExtViz = true;
                break;
            }
        }
        if (!canInsertExtViz) {
            final String message = "User does not have all the required External Community auths to insert: "
                    + outerList + ", auths List: " + tokenAuths;
            appLog.error(message);
            throw new EzMongoBaseException(message);
        }
    } else {
        if (!vis.isSetAdvancedMarkings()) {
            appLog.info("validateExternalCommunityVisibilityForInsert - AdvancedMarkings is not set.");
            return;
        }
        // check if the user can insert the External Community Visibility (boolean expression).
        String externalCommunityBooleanExpression = vis.getAdvancedMarkings().getExternalCommunityVisibility();
        if (!StringUtils.isEmpty(externalCommunityBooleanExpression)) {
            appLog.info("checking if the user has the required classification to insert: {}",
                    externalCommunityBooleanExpression);

            final boolean canInsertExternalCommunityViz = ClassificationUtils
                    .confirmAuthsForAccumuloClassification(security, externalCommunityBooleanExpression,
                            ClassificationUtils.USER_EXTERNAL_COMMUNITY_AUTHS);

            if (!canInsertExternalCommunityViz) {
                final String message = "User does not have all the required External Community auths to insert: "
                        + externalCommunityBooleanExpression;
                appLog.error(message);
                throw new EzMongoBaseException(message);
            }
        }
    }
}

From source file:org.opencb.opencga.storage.mongodb.variant.MongoDBVariantStoragePipeline.java

private StudyConfiguration preMerge(List<Integer> fileIds) throws StorageEngineException {
    int studyId = getStudyId();
    Set<Integer> fileIdsSet = new HashSet<>(fileIds);
    return dbAdaptor.getStudyConfigurationManager().lockAndUpdate(studyId, studyConfiguration -> {
        for (Integer fileId : fileIds) {
            if (studyConfiguration.getIndexedFiles().contains(fileId)) {
                throw StorageEngineException.alreadyLoaded(fileId, studyConfiguration);
            }// ww w. j a  v a 2 s. c  om
        }

        boolean loadMergeResume = isResumeMerge(options);

        List<BatchFileOperation> batches = studyConfiguration.getBatches();
        BatchFileOperation operation = null;
        for (int i = batches.size() - 1; i >= 0; i--) {
            BatchFileOperation op = batches.get(i);
            if (op.getOperationName().equals(MERGE.key()) && fileIds.size() == op.getFileIds().size()
                    && fileIdsSet.containsAll(op.getFileIds())) {
                switch (op.currentStatus()) {
                case READY:// Already indexed!
                    // TODO: Believe this ready? What if deleted?
                    // It was not "indexed" so suppose "deleted" ?
                    break;
                case DONE:
                    // Already merged but still needs some work.
                    logger.info("Files " + fileIds
                            + " where already merged, but where not marked as indexed files.");
                    options.put(MERGE_SKIP.key(), true);
                case RUNNING:
                    if (!loadMergeResume) {
                        throw MongoVariantStorageEngineException.filesBeingMergedException(fileIds);
                    }
                    break;
                case ERROR:
                    // Resume merge
                    loadMergeResume = true;
                    options.put(MERGE_RESUME.key(), loadMergeResume);
                    break;
                default:
                    throw new IllegalStateException("Unknown status: " + op.currentStatus());
                }
                operation = op;
                break;
            } else {
                // Can not merge any file if there is an ongoing MERGE or STAGE operation
                if (op.getOperationName().equals(MERGE.key()) || op.getOperationName().equals(STAGE.key())) {
                    if (!op.currentStatus().equals(BatchFileOperation.Status.READY)) {
                        throw MongoVariantStorageEngineException.operationInProgressException(op);
                    }
                }
            }
        }

        if (operation == null) {
            operation = new BatchFileOperation(MERGE.key(), fileIds, System.currentTimeMillis(),
                    BatchFileOperation.Type.LOAD);
            studyConfiguration.getBatches().add(operation);
            operation.addStatus(Calendar.getInstance().getTime(), BatchFileOperation.Status.RUNNING);
        } else if (operation.currentStatus() == BatchFileOperation.Status.ERROR) {
            // Only set to RUNNING if it was on ERROR
            operation.addStatus(Calendar.getInstance().getTime(), BatchFileOperation.Status.RUNNING);
        }
        return studyConfiguration;
    });
}

From source file:net.sourceforge.fenixedu.presentationTier.Action.teacher.onlineTests.TestsManagementAction.java

public ActionForward chooseAddShifts(ActionMapping mapping, ActionForm form, HttpServletRequest request,
        HttpServletResponse response) throws FenixActionException {
    final String distributedTestCode = getStringFromRequest(request, "distributedTestCode");

    final DistributedTest distributedTest = FenixFramework.getDomainObject(distributedTestCode);
    final ExecutionCourse executionCourse = getExecutionCourse(request);
    if (executionCourse == null) {
        throw new FenixActionException();
    }//  w  w w  . ja v a2  s.c om
    final Set<Registration> students = distributedTest.findStudents();
    final Set<Shift> associatedShifts = executionCourse.getAssociatedShifts();
    List<Shift> shiftList = new ArrayList<Shift>();
    for (Shift shift : associatedShifts) {
        Collection<Registration> shiftStudents = shift.getStudentsSet();
        if (!students.containsAll(shiftStudents)) {
            shiftList.add(shift);
        }
    }
    // Collections.sort(shiftList, new InfoShiftComparatorByLessonType());
    request.setAttribute("shiftList", shiftList);
    request.setAttribute("distributedTestCode", distributedTestCode);
    return doForward(request, "addShiftsToDistributedTest");
}

From source file:org.openconcerto.sql.model.SQLDataSource.java

synchronized void setTables(Set<SQLTable> tables) {
    // don't change the cache if we're only adding tables
    final boolean update = this.cache == null || !tables.containsAll(this.tables);
    this.tables = Collections.unmodifiableSet(new HashSet<SQLTable>(tables));
    if (update)/*from w  w w.ja va  2  s .c  om*/
        updateCache();
}

From source file:org.icgc.dcc.portal.resource.core.DownloadResource.java

@ApiOperation("Get archive based by type subject to the supplied filter condition(s)")
@GET//from  w  w  w.  j  ava 2  s  .com
@Timed
@Path("/{downloadId}")
public Response getFullArchive(

        @Auth(required = false) User user,

        @PathParam("downloadId") String downloadId

) throws IOException {
    boolean isLogin = isLogin(user);
    ResponseBuilder rb = ok();
    StreamingOutput archiveStream = null;
    String filename = null;
    // dynamic download
    if (!downloader.isServiceAvailable() || downloader.isOverCapacity())
        throw new ServiceUnavailableException("Downloader is disabled");

    final Set<DataType> allowedDataTypes = isLogin ? AccessControl.FullAccessibleDataTypes
            : AccessControl.PublicAccessibleDataTypes;

    Map<String, JobStatus> jobStatus = downloader.getStatus(ImmutableSet.<String>of(downloadId));
    JobStatus status = jobStatus.get(downloadId);
    if (status == null || status.isExpired()) {

        throw new NotFoundException(downloadId, "download");
    }

    Map<DataType, JobProgress> typeProgressMap = status.getProgressMap();
    for (Entry<DataType, JobProgress> typeStatus : typeProgressMap.entrySet()) {
        if (!typeStatus.getValue().isCompleted()) {
            throw new NotFoundException(downloadId, "download");
        }
    }
    // check if types are allowed for download
    Set<DataType> availableDataTypeGroup = Sets.intersection(typeProgressMap.keySet(),
            DataTypeGroupMap.keySet());
    if (!allowedDataTypes.containsAll(availableDataTypeGroup)) {
        log.error("permission denied for download types that need access control: " + typeProgressMap.entrySet()
                + ", download id: " + downloadId);
        throw new NotFoundException(downloadId, "download");
    }

    archiveStream = archiveStream(downloadId, ImmutableList.copyOf(typeProgressMap.keySet()));
    filename = fileName(FULL_ARCHIVE_EXTENSION);

    return rb.entity(archiveStream).type(getFileMimeType(filename))
            .header(CONTENT_DISPOSITION, type("attachment").fileName(filename).creationDate(new Date()).build())
            .build();
}

From source file:org.ow2.mind.unit.Launcher.java

protected void invokeOptionHandlers(final PluginManager pluginManagerItf, final CommandLine cmdLine,
        final Map<Object, Object> context) throws InvalidCommandLineException {
    final List<CmdOption> toBeExecuted = new LinkedList<CmdOption>(cmdLine.getOptions().getOptions());
    final Set<String> executedId = new HashSet<String>(toBeExecuted.size());
    while (!toBeExecuted.isEmpty()) {
        final int toBeExecutedSize = toBeExecuted.size();
        final Iterator<CmdOption> iter = toBeExecuted.iterator();
        while (iter.hasNext()) {
            final CmdOption option = iter.next();
            final List<String> precedenceIds = CommandLineOptionExtensionHelper.getPrecedenceIds(option,
                    pluginManagerItf);/*from   w  ww .j a va 2  s. c  o m*/
            if (executedId.containsAll(precedenceIds)) {
                // task ready to be executed
                for (final CommandOptionHandler handler : CommandLineOptionExtensionHelper.getHandler(option,
                        pluginManagerItf)) {
                    handler.processCommandOption(option, cmdLine, context);
                }
                executedId.add(option.getId());
                iter.remove();
            }
        }
        if (toBeExecutedSize == toBeExecuted.size()) {
            // nothing has been executed. there is a circular dependency
            throw new CompilerError(GenericErrors.GENERIC_ERROR,
                    "Circular dependency in command line option handlers: " + toBeExecuted);
        }
    }
}

From source file:edu.unc.lib.dl.services.DigitalObjectManagerMoveTest.java

@Test
public void oneSourceTest() throws Exception {

    makeMatcherPair("/fedora/containerRELSEXT1.xml", source1PID);

    List<PID> moving = Arrays.asList(new PID("uuid:child1"), new PID("uuid:child5"));

    when(tripleStoreQueryService.fetchContainer(any(PID.class))).thenReturn(source1PID);

    digitalMan.move(moving, destPID, "user", "");

    verify(accessClient, times(2)).getDatastreamDissemination(eq(source1PID), eq(RELS_EXT.getName()),
            anyString());//from  w w  w  .  j a  va 2  s  .com

    ArgumentCaptor<Document> sourceRelsExtUpdateCaptor = ArgumentCaptor.forClass(Document.class);

    verify(managementClient, times(2)).modifyDatastream(eq(source1PID), eq(RELS_EXT.getName()), anyString(),
            anyString(), sourceRelsExtUpdateCaptor.capture());

    List<Document> sourceRelsAnswers = sourceRelsExtUpdateCaptor.getAllValues();
    // Check the state of the source after removal but before cleanup
    Document sourceRelsExt = sourceRelsAnswers.get(0);
    Set<PID> children = JDOMQueryUtil.getRelationSet(sourceRelsExt.getRootElement(), contains);
    assertEquals("Incorrect number of children in source container after move", 10, children.size());

    Set<PID> removed = JDOMQueryUtil.getRelationSet(sourceRelsExt.getRootElement(), removedChild);
    assertEquals("Moved child gravestones not correctly set in source container", 2, removed.size());

    // Check that tombstones were cleaned up by the end of the operation
    Document cleanRelsExt = sourceRelsAnswers.get(1);
    children = JDOMQueryUtil.getRelationSet(cleanRelsExt.getRootElement(), contains);
    assertEquals("Incorrect number of children in source container after cleanup", 10, children.size());

    removed = JDOMQueryUtil.getRelationSet(cleanRelsExt.getRootElement(), removedChild);
    assertEquals("Child tombstones not cleaned up", 0, removed.size());

    // Verify that the destination had the moved children added to it
    verify(accessClient).getDatastreamDissemination(eq(destPID), eq(RELS_EXT.getName()), anyString());
    ArgumentCaptor<Document> destRelsExtUpdateCaptor = ArgumentCaptor.forClass(Document.class);
    verify(managementClient).modifyDatastream(eq(destPID), eq(RELS_EXT.getName()), anyString(), anyString(),
            destRelsExtUpdateCaptor.capture());
    assertFalse("Moved children were still present in source", children.containsAll(moving));

    Document destRelsExt = destRelsExtUpdateCaptor.getValue();
    children = JDOMQueryUtil.getRelationSet(destRelsExt.getRootElement(), contains);
    assertEquals("Incorrect number of children in destination container after moved", 9, children.size());
    assertTrue("Moved children were not present in destination", children.containsAll(moving));
}

From source file:com.vmware.vhadoop.vhm.hadoop.HadoopAdaptor.java

@Override
/* Returns the set of active dnsNames based on input Set */
public Set<String> checkTargetTTsSuccess(String opType, Set<String> ttDnsNames, int totalTargetEnabled,
        HadoopClusterInfo cluster) {/*from w w w .ja va 2 s. co m*/
    String scriptRemoteFilePath = JOB_TRACKER_DEFAULT_SCRIPT_DEST_PATH + JOB_TRACKER_CHECK_SCRIPT_FILE_NAME;
    String listRemoteFilePath = null;
    String opDesc = "checkTargetTTsSuccess";

    if (ttDnsNames == null) {
        _log.warning("No valid TT names provided");
        return null;
    }

    /* We don't expect null or empty values, but weed out anyway */
    ttDnsNames.remove(null);
    ttDnsNames.remove("");
    if (ttDnsNames.size() == 0) {
        _log.warning("No valid TT names provided");
        return null;
    }

    _log.log(Level.INFO, "Affected TTs: " + ttDnsNames);

    setErrorParamsForCommand(cluster, opDesc, scriptRemoteFilePath, listRemoteFilePath);

    int iterations = 0;
    CompoundStatus getActiveStatus = null;
    int rc = UNKNOWN_ERROR;
    Set<String> allActiveTTs = null;
    long lastCheckAttemptTime = Long.MAX_VALUE;
    do {
        if (iterations > 0) {
            /* 1141429: Ensure that if the script fails, there is a minimum wait before the next retry attempt */
            long millisSinceLastCheck = (System.currentTimeMillis() - lastCheckAttemptTime);
            long underWaitMillis = JOB_TRACKER_CHECK_SCRIPT_MIN_RETRY_MILLIS - millisSinceLastCheck;
            if (underWaitMillis > 0) {
                try {
                    _log.fine("Sleeping for underWaitMillis = " + underWaitMillis);
                    Thread.sleep(underWaitMillis);
                } catch (InterruptedException e) {
                }
            }
            _log.log(Level.INFO, "Target TTs not yet achieved...checking again - " + iterations);
            _log.log(Level.INFO, "Affected TTs: " + ttDnsNames);
        }

        getActiveStatus = new CompoundStatus(ACTIVE_TTS_STATUS_KEY);

        lastCheckAttemptTime = System.currentTimeMillis();
        allActiveTTs = getActiveTTs(cluster, totalTargetEnabled, getActiveStatus);

        //Declare success as long as the we manage to de/recommission only the TTs we set out to handle (rather than checking correctness for all TTs)
        if ((allActiveTTs != null) && ((opType.equals("Recommission") && allActiveTTs.containsAll(ttDnsNames))
                || (opType.equals("Decommission") && ttDnsNames.retainAll(allActiveTTs)
                        && ttDnsNames.isEmpty()))) {
            _log.log(Level.INFO, "All selected TTs correctly %sed", opType.toLowerCase());
            rc = SUCCESS;
            break;
        }

        /* If there was an error reported by getActiveTTs... */
        TaskStatus taskStatus = getActiveStatus.getFirstFailure(STATUS_INTERPRET_ERROR_CODE);
        if (taskStatus != null) {
            rc = taskStatus.getErrorCode();
        } else {
            /*
             * JG: Sometimes we don't know the hostnames (e.g., localhost); in these cases as long as the check script returns success based
             * on target #TTs we are good.
             * TODO: Change check script to return success if #newly added + #current_enabled is met rather than target #TTs is met. This is
             * to address scenarios where there is a mismatch (#Active TTs != #poweredOn VMs) to begin with...
             * CHANGED: We have changed the time at which this function is invoked -- it gets invoked only when dns/hostnames are available.
             * So we no longer have this issue of not knowing hostnames and still meeting target #TTs. Our only successful exit is when the
             * TTs that have been explicitly asked to be checked, have been correctly de/recommissioned.
             *
             * rc = SUCCESS; //Note: removing this
             *
             * We also notice that in this case, where #Active TTs matches target, but all the requested TTs haven't been de/recommissioned yet,
             * the check script returns immediately (because it only looks for a match of these values, which is true here). So we recompute
             * target TTs based on latest information to essentially put back the delay...
             */

            Set<String> deltaTTs = new HashSet<String>(ttDnsNames);
            if (opType.equals("Recommission")) {
                deltaTTs.removeAll(allActiveTTs); //get TTs that haven't been recommissioned yet...
                totalTargetEnabled = allActiveTTs.size() + deltaTTs.size();
            } else { //optype = Decommission
                deltaTTs.retainAll(allActiveTTs); //get TTs that haven't been decommissioned yet...
                totalTargetEnabled = allActiveTTs.size() - deltaTTs.size();
            }

            _log.log(Level.INFO,
                    "Even though #ActiveTTs = #TargetTTs, not all requested TTs have been "
                            + opType.toLowerCase() + "ed yet - Trying again with updated target: "
                            + totalTargetEnabled);
        }

        /* Break out if there is an error other than the ones we expect to be resolved in a subsequent invocation of the check script */
        if (rc != ERROR_FEWER_TTS && rc != ERROR_EXCESS_TTS && rc != UNKNOWN_ERROR) {
            break;
        }
    } while (iterations++ < ACTIVE_TASK_TRACKERS_CHECK_RETRY_ITERATIONS);

    getCompoundStatus().addStatus(_errorCodes.interpretErrorCode(_log, rc, getErrorParamValues(cluster)));
    if (rc != SUCCESS) {
        getActiveStatus.registerTaskFailed(false, "Check Test Failed");
        getCompoundStatus().addStatus(getActiveStatus);
    }

    return allActiveTTs;
}