Example usage for java.util LinkedList addAll

List of usage examples for java.util LinkedList addAll

Introduction

In this page you can find the example usage for java.util LinkedList addAll.

Prototype

public boolean addAll(Collection<? extends E> c) 

Source Link

Document

Appends all of the elements in the specified collection to the end of this list, in the order that they are returned by the specified collection's iterator.

Usage

From source file:uk.ac.cam.cl.dtg.util.locations.PostCodeIOLocationResolver.java

@Override
public List<Long> filterPostcodesWithinProximityOfPostcode(final Map<String, List<Long>> postCodeIDMap,
        final String targetPostCode, final PostCodeRadius postCodeRadius)
        throws LocationServerException, SegueDatabaseException {

    if (null == postCodeIDMap) {
        throw new LocationServerException("Map of postcodes cannot be null");
    }//w ww . j a  va2  s.c om

    final Map<String, List<Long>> cleanPostCodeIDMap = Maps.newHashMap();
    for (String key : postCodeIDMap.keySet()) {
        List<Long> val = postCodeIDMap.get(key);
        if (key != null) {
            cleanPostCodeIDMap.put(key.replace(" ", ""), val);
        }
    }

    LinkedList<Long> resultingUserIds = new LinkedList<Long>();

    // first do a database lookup, then fallback on the service
    List<PostCode> knownPostCodes = Lists.newArrayList();
    List<String> unknownPostCodes = Lists.newArrayList();
    for (String postCode : cleanPostCodeIDMap.keySet()) {
        PostCode result = this.locationHistory.getPostCode(postCode);
        if (null == result) {
            unknownPostCodes.add(postCode);
        } else {
            knownPostCodes.add(result);
        }
    }

    // add the target postcode, so we can do it in one request
    PostCode targetPostCodeObject = this.locationHistory.getPostCode(targetPostCode);

    if (null == targetPostCodeObject) {
        List<String> targetPostCodeList = Lists.newArrayList();
        targetPostCodeList.add(targetPostCode);
        List<PostCode> results = submitPostCodeRequest(targetPostCodeList);
        if (results != null && results.size() == 1) {
            targetPostCodeObject = results.get(0);
        } else {
            throw new LocationServerException(
                    "Location service failed to return valid lat/lon for target postcode");
        }
    }

    List<PostCode> foundPostCodes = carryOutExternalPostCodeServiceRequest(unknownPostCodes);

    // Store new postcodes back to the database
    this.locationHistory.storePostCodes(foundPostCodes);

    knownPostCodes.addAll(foundPostCodes);

    for (PostCode postCode : knownPostCodes) {

        if (null == postCode.getLat() || null == postCode.getLon()) {
            continue;
        }

        double distInMiles = getLatLonDistanceInMiles(targetPostCodeObject.getLat(),
                targetPostCodeObject.getLon(), postCode.getLat(), postCode.getLon());

        if (distInMiles <= postCodeRadius.getDistance()
                && cleanPostCodeIDMap.containsKey(postCode.getPostCode())) {
            // Add this to a list, with user ids
            resultingUserIds.addAll(cleanPostCodeIDMap.get(postCode.getPostCode()));
        }

    }

    return resultingUserIds;
}

From source file:org.artifactory.repo.service.RepositoryServiceImpl.java

private ItemInfo collectLastModified(RepoPath pathToSearch) {
    TreeBrowsingCriteria criteria = new TreeBrowsingCriteriaBuilder().applySecurity().build();
    ItemTree itemTree = new ItemTree(pathToSearch, criteria);
    LinkedList<ItemNode> fringe = Lists.newLinkedList();
    fringe.add(itemTree.getRootNode());/*from   ww w.jav a  2  s  . c o  m*/
    ItemInfo lastModified = null;
    while (!fringe.isEmpty()) {
        ItemNode last = fringe.removeLast();
        if (last.hasChildren()) {
            fringe.addAll(last.getChildren());
        }
        if (!last.isFolder()) {
            if (lastModified == null || last.getItemInfo().getLastModified() > lastModified.getLastModified()) {
                lastModified = last.getItemInfo();
            }
        }
    }
    return lastModified;
}

From source file:org.bimserver.charting.SupportFunctions.java

public static ArrayList<LinkedHashMap<String, Object>> getDataWithTreeStructure(String structureKeyword,
        IfcModelInterface model, Chart chart) {
    ArrayList<LinkedHashMap<String, Object>> rawData = new ArrayList<>();
    // Get units.
    String units = "units";
    SIPrefix prefix = SupportFunctions.getLengthUnitPrefix(model);
    if (prefix != null)
        units = prefix.getLiteral();//from  w  w  w.j a v  a 2 s . co  m
    // Prepare for static iteration.
    int maxDepth = 0;
    LinkedList<IfcObjectWithTrace> leaves = new LinkedList<>();
    LinkedList<IfcObjectWithTrace> parts = new LinkedList<>();
    // Iterate, but start with projects.
    for (IfcProject ifcProject : model.getAll(IfcProject.class))
        parts.add(new IfcObjectWithTrace(ifcProject));
    // Iterate the IFC going 1 level at a time (ex: Projects -> Sites, then Sites -> Buildings, then Buildings -> IfcProducts, then IfcProducts -> IfcProducts).
    while (parts.size() > 0) {
        IfcObjectWithTrace entry = parts.pop();
        StackTrace traceAtThisPoint = entry.Key;
        IfcObject parentObject = entry.Value;
        // Get name to be added to stack.
        int parentId = parentObject.getExpressId();
        String ifcParentName = (parentId >= 0)
                ? String.format("%s (%d)", parentObject.getName(), parentObject.getExpressId())
                : parentObject.getName();
        // Make the stack trace.
        StackTrace traceAtChildren = new StackTrace(traceAtThisPoint);
        traceAtChildren.add(ifcParentName);
        // Track the children that are getting put into the raw data at this point.
        LinkedList<IfcObjectWithTrace> childrenInThisPass = new LinkedList<>();
        // Walk the relationship from the parent to its child objects.
        for (IfcRelDecomposes ifcRelDecomposes : parentObject.getIsDecomposedBy()) {
            // Iterate what the object decomposes into.
            for (IfcObjectDefinition definition : ifcRelDecomposes.getRelatedObjects())
                childrenInThisPass.add(new IfcObjectWithTrace(traceAtChildren, (IfcObject) definition));
        }
        // If IfcObject happens to be something like an IfcBuildingStorey, go looking through its structure.
        if (parentObject instanceof IfcSpatialStructureElement) {
            IfcSpatialStructureElement ifcSpatialStructureElement = (IfcSpatialStructureElement) parentObject;
            for (IfcRelContainedInSpatialStructure ifcRelContainedInSpatialStructure : ifcSpatialStructureElement
                    .getContainsElements())
                for (IfcProduct ifcProduct : ifcRelContainedInSpatialStructure.getRelatedElements()) {
                    Double area = getRoughAreaEstimateFromIfcProduct(ifcProduct);
                    childrenInThisPass.add(new IfcObjectWithTrace(traceAtChildren, ifcProduct, area));
                }
        }
        // Test if this node is a leaf. If it is, keep it.
        if (childrenInThisPass.size() == 0) {
            leaves.add(entry);
            // Update depth.
            int depthAtThisPoint = traceAtThisPoint.size() + 1;
            if (depthAtThisPoint > maxDepth)
                maxDepth = depthAtThisPoint;
        } else
            parts.addAll(childrenInThisPass);
    }
    // Derive the column names.
    ArrayList<String> hierarchyColumnNames = new ArrayList<>();
    for (int i = 0; i < maxDepth; i++)
        hierarchyColumnNames.add(String.format("%s%d", structureKeyword, i + 1));
    // Update the chart configuration.
    chart.setDimensionLookupKeys(structureKeyword, hierarchyColumnNames);
    chart.setDimensionLookupKey("size", "size");
    chart.setDimensionLookupKey("label", "label");
    chart.setDimensionLookupKey("color", hierarchyColumnNames.get(Math.max(0, maxDepth - 2)));
    // Iterate the leaf nodes.
    for (IfcObjectWithTrace leaf : leaves) {
        StackTrace traceAtThisPoint = leaf.Key;
        IfcObject leafObject = leaf.Value;
        // Prepare to store this raw data entry.
        LinkedHashMap<String, Object> leafDataEntry = new LinkedHashMap<>();
        // Prepare to iterate backwards along column names (ex. hierarchy10, ..., hierarchy1).
        int leafDepthIndex = maxDepth - 1;
        int sizeOfStack = traceAtThisPoint.size();
        int stackUpperBound = leafDepthIndex - 1;
        int stackLowerRange = stackUpperBound - sizeOfStack;
        // Iterate backwards along column names.
        for (int i = leafDepthIndex; i >= 0; i--) {
            String column = hierarchyColumnNames.get(i);
            String value;
            if (i == leafDepthIndex) {
                value = String.format("%s (%d)", leafObject.getName(), leafObject.getOid());
                if (units != null && leaf.Size != null) {
                    if (leaf.Size > 0)
                        value += String.format(" ~%s %s\u00B2", leaf.Size.intValue(), units);
                    else
                        value += String.format(" %s %s\u00B2", leaf.Size, units);
                }
                leafDataEntry.put("label", leafObject.getName());
                leafDataEntry.put("size", leaf.Size);
            } else if (stackLowerRange < i && i <= stackUpperBound) {
                int index = sizeOfStack - (stackUpperBound - i) - 1;
                value = traceAtThisPoint.get(index);
            } else
                value = null;
            // Add column.
            leafDataEntry.put(column, value);
        }
        // Add the data.
        rawData.add(leafDataEntry);
    }
    // Send it all back.
    return rawData;
}

From source file:org.opencb.opencga.storage.mongodb.variant.VariantMongoDBAdaptor.java

private Document parseQuery(Query query, Document mongoQuery) {
    QueryBuilder builder = new QueryBuilder();
    if (query != null) {
        /** VARIANT PARAMS **/
        if (query.get(VariantQueryParams.CHROMOSOME.key()) != null
                && !query.getString(VariantQueryParams.CHROMOSOME.key()).isEmpty()) {
            List<String> chromosomes = query.getAsStringList(VariantQueryParams.CHROMOSOME.key());
            LinkedList<String> regions = new LinkedList<>(
                    query.getAsStringList(VariantQueryParams.REGION.key()));
            regions.addAll(chromosomes);
            query.put(VariantQueryParams.REGION.key(), regions);
        }/* w  w  w.  j ava  2  s.c o  m*/

        if (query.get(VariantQueryParams.REGION.key()) != null
                && !query.getString(VariantQueryParams.REGION.key()).isEmpty()) {
            List<String> stringList = query.getAsStringList(VariantQueryParams.REGION.key());
            List<Region> regions = new ArrayList<>(stringList.size());
            for (String reg : stringList) {
                Region region = Region.parseRegion(reg);
                regions.add(region);
            }
            getRegionFilter(regions, builder);
        }

        if (query.get(VariantQueryParams.ID.key()) != null
                && !query.getString(VariantQueryParams.ID.key()).isEmpty()) {
            List<String> idsList = query.getAsStringList(VariantQueryParams.ID.key());
            for (String id : idsList) {
                if (id.contains(":")) {
                    try {
                        Variant variant = new Variant(id);
                        String mongoId = MongoDBVariantStageLoader.STRING_ID_CONVERTER.buildId(variant);
                        addQueryStringFilter("_id", mongoId, builder, QueryOperation.OR);
                    } catch (IllegalArgumentException ignore) {
                        logger.info("Wrong variant " + id);
                    }
                }
            }
            String ids = query.getString(VariantQueryParams.ID.key());
            addQueryStringFilter(
                    DocumentToVariantConverter.ANNOTATION_FIELD + "."
                            + DocumentToVariantAnnotationConverter.XREFS_FIELD + "."
                            + DocumentToVariantAnnotationConverter.XREF_ID_FIELD,
                    ids, builder, QueryOperation.OR);
            addQueryStringFilter(DocumentToVariantConverter.IDS_FIELD, ids, builder, QueryOperation.OR);
        }

        if (query.containsKey(VariantQueryParams.GENE.key())) {
            String xrefs = query.getString(VariantQueryParams.GENE.key());
            addQueryStringFilter(
                    DocumentToVariantConverter.ANNOTATION_FIELD + "."
                            + DocumentToVariantAnnotationConverter.XREFS_FIELD + "."
                            + DocumentToVariantAnnotationConverter.XREF_ID_FIELD,
                    xrefs, builder, QueryOperation.OR);
        }

        if (query.containsKey(VariantQueryParams.REFERENCE.key())
                && query.getString(VariantQueryParams.REFERENCE.key()) != null) {
            addQueryStringFilter(DocumentToVariantConverter.REFERENCE_FIELD,
                    query.getString(VariantQueryParams.REFERENCE.key()), builder, QueryOperation.AND);
        }

        if (query.containsKey(VariantQueryParams.ALTERNATE.key())
                && query.getString(VariantQueryParams.ALTERNATE.key()) != null) {
            addQueryStringFilter(DocumentToVariantConverter.ALTERNATE_FIELD,
                    query.getString(VariantQueryParams.ALTERNATE.key()), builder, QueryOperation.AND);
        }

        if (query.containsKey(VariantQueryParams.TYPE.key())
                && !query.getString(VariantQueryParams.TYPE.key()).isEmpty()) {
            addQueryFilter(DocumentToVariantConverter.TYPE_FIELD,
                    query.getString(VariantQueryParams.TYPE.key()), builder, QueryOperation.AND, s -> {
                        Set<VariantType> subTypes = Variant.subTypes(VariantType.valueOf(s));
                        List<String> types = new ArrayList<>(subTypes.size() + 1);
                        types.add(s);
                        subTypes.forEach(subType -> types.add(subType.toString()));
                        return types;
                    }); //addQueryStringFilter(DBObjectToVariantConverter.TYPE_FIELD,
            //                query.getString(VariantQueryParams.TYPE.key()), builder, QueryOperation.AND);
        }

        /** ANNOTATION PARAMS **/
        parseAnnotationQueryParams(query, builder);

        /** STUDIES **/
        final StudyConfiguration defaultStudyConfiguration = parseStudyQueryParams(query, builder);

        /** STATS PARAMS **/
        parseStatsQueryParams(query, builder, defaultStudyConfiguration);
    }
    logger.debug("Find = " + builder.get());
    mongoQuery.putAll(builder.get().toMap());
    return mongoQuery;
}

From source file:com.redsqirl.workflow.server.Workflow.java

/**
 * Get a list of DataFlowElements from two list and remove duplicates
 * //from   w  w w . j ava  2s.  c om
 * @param l1
 * @param l2
 * @return List of DataFlowElements without duplicates
 */
protected LinkedList<DataFlowElement> getAllWithoutDuplicate(List<DataFlowElement> l1,
        List<DataFlowElement> l2) {
    LinkedList<DataFlowElement> ans = new LinkedList<DataFlowElement>();
    ans.addAll(l1);
    Iterator<DataFlowElement> itCur = l2.iterator();
    while (itCur.hasNext()) {
        DataFlowElement cans = itCur.next();
        if (!ans.contains(cans)) {
            ans.add(cans);
        }
    }
    return ans;
}

From source file:com.oltpbenchmark.benchmarks.seats.SEATSWorker.java

/**
 * Execute the FindOpenSeat procedure//from w  ww.  ja v  a  2s . c  o  m
 * @throws SQLException
 */
private boolean executeFindOpenSeats(FindOpenSeats proc) throws SQLException {
    final FlightId search_flight = this.profile.getRandomFlightId();
    assert (search_flight != null);
    Long airport_depart_id = search_flight.getDepartAirportId();

    if (LOG.isTraceEnabled())
        LOG.trace("Calling " + proc);
    Object[][] results = proc.run(conn, search_flight.encode());
    conn.commit();

    int rowCount = results.length;
    assert (rowCount <= SEATSConstants.FLIGHTS_NUM_SEATS) : String
            .format("Unexpected %d open seats returned for %s", rowCount, search_flight);

    // there is some tiny probability of an empty flight .. maybe 1/(20**150)
    // if you hit this assert (with valid code), play the lottery!
    if (rowCount == 0)
        return (true);

    LinkedList<Reservation> cache = CACHE_RESERVATIONS.get(CacheType.PENDING_INSERTS);
    assert (cache != null) : "Unexpected " + CacheType.PENDING_INSERTS;

    // Store pending reservations in our queue for a later transaction            
    BitSet seats = getSeatsBitSet(search_flight);
    tmp_reservations.clear();

    for (Object row[] : results) {
        if (row == null)
            continue; //  || rng.nextInt(100) < 75) continue; // HACK
        Integer seatnum = (Integer) row[1];

        // We first try to get a CustomerId based at this departure airport
        if (LOG.isTraceEnabled())
            LOG.trace("Looking for a random customer to fly on " + search_flight);
        CustomerId customer_id = profile.getRandomCustomerId(airport_depart_id);

        // We will go for a random one if:
        //  (1) The Customer is already booked on this Flight
        //  (2) We already made a new Reservation just now for this Customer
        int tries = SEATSConstants.FLIGHTS_NUM_SEATS;
        while (tries-- > 0 && (customer_id == null)) { //  || isCustomerBookedOnFlight(customer_id, flight_id))) {
            customer_id = profile.getRandomCustomerId();
            if (LOG.isTraceEnabled())
                LOG.trace("RANDOM CUSTOMER: " + customer_id);
        } // WHILE
        assert (customer_id != null) : String.format(
                "Failed to find a unique Customer to reserve for seat #%d on %s", seatnum, search_flight);

        Reservation r = new Reservation(profile.getNextReservationId(getId()), search_flight, customer_id,
                seatnum.intValue());
        seats.set(seatnum);
        tmp_reservations.add(r);
        if (LOG.isTraceEnabled())
            LOG.trace(
                    "QUEUED INSERT: " + search_flight + " / " + search_flight.encode() + " -> " + customer_id);
    } // WHILE

    if (tmp_reservations.isEmpty() == false) {
        Collections.shuffle(tmp_reservations);
        cache.addAll(tmp_reservations);
        while (cache.size() > SEATSConstants.CACHE_LIMIT_PENDING_INSERTS) {
            cache.remove();
        } // WHILE
        if (LOG.isDebugEnabled())
            LOG.debug(String.format("Stored %d pending inserts for %s [totalPendingInserts=%d]",
                    tmp_reservations.size(), search_flight, cache.size()));
    }
    return (true);
}

From source file:eu.stratosphere.nephele.instance.ec2.EC2CloudManager.java

/**
 * {@inheritDoc}/*from w  w w. java  2  s  .  co m*/
 */
@Override
public void requestInstance(final JobID jobID, Configuration conf, final InstanceRequestMap instanceRequestMap,
        final List<String> splitAffinityList) throws InstanceException {

    if (conf == null) {
        throw new IllegalArgumentException("No job configuration provided, unable to acquire credentials");
    }

    // First check, if all required configuration entries are available

    final String awsAccessId = conf.getString(AWS_ACCESS_ID_KEY, null);
    if (awsAccessId == null) {
        throw new InstanceException("Unable to allocate cloud instance: Cannot find AWS access ID");
    }

    final String awsSecretKey = conf.getString(AWS_SECRET_KEY_KEY, null);
    if (awsSecretKey == null) {
        throw new InstanceException("Unable to allocate cloud instance: Cannot find AWS secret key");
    }

    if (conf.getString(AWS_AMI_KEY, null) == null) {
        throw new InstanceException("Unable to allocate cloud instance: Cannot find AMI image ID");
    }

    // First we check, if there are any orphaned instances that are accessible with the provided configuration
    checkAndConvertOrphanedInstances(conf);

    // Check if there already exist a mapping for this job
    JobToInstancesMapping jobToInstanceMapping = null;
    synchronized (this.jobToInstancesAssignmentMap) {
        jobToInstanceMapping = this.jobToInstancesAssignmentMap.get(jobID);

        // Create new mapping if it does not yet exist
        if (jobToInstanceMapping == null) {
            LOG.debug("Creating new mapping for job " + jobID);
            jobToInstanceMapping = new JobToInstancesMapping(awsAccessId, awsSecretKey);
            this.jobToInstancesAssignmentMap.put(jobID, jobToInstanceMapping);
        }
    }

    // Check if there already exists a network topology for this job
    NetworkTopology networkTopology = null;
    synchronized (this.networkTopologies) {
        networkTopology = this.networkTopologies.get(jobID);
        if (networkTopology == null) {
            networkTopology = NetworkTopology.createEmptyTopology();
            this.networkTopologies.put(jobID, networkTopology);
        }
    }

    // Our bill with all instances that we will provide...
    final LinkedList<FloatingInstance> floatingInstances = new LinkedList<FloatingInstance>();
    final LinkedList<String> requestedInstances = new LinkedList<String>();

    // We iterate over the maximum of requested Instances...
    final Iterator<Map.Entry<InstanceType, Integer>> it = instanceRequestMap.getMaximumIterator();

    while (it.hasNext()) {

        final Map.Entry<InstanceType, Integer> e = it.next();

        // This is our actual type...
        final InstanceType actualtype = e.getKey();
        final int maxcount = e.getValue();
        final int mincount = maxcount;
        LOG.info("Requesting " + maxcount + " instances of type " + actualtype + " for job " + jobID);

        // And this is the list of instances we will have...
        LinkedList<FloatingInstance> actualFloatingInstances = null;
        LinkedList<String> actualRequestedInstances = null;

        // Check if floating instances available...
        actualFloatingInstances = anyFloatingInstancesAvailable(awsAccessId, awsSecretKey, actualtype,
                maxcount);

        // Do we need more instances?
        if (actualFloatingInstances.size() < maxcount) {
            int minimumrequestcount = Math.max(mincount - actualFloatingInstances.size(), 1);
            int maximumrequestcount = maxcount - actualFloatingInstances.size();

            actualRequestedInstances = allocateCloudInstance(conf, actualtype, minimumrequestcount,
                    maximumrequestcount);
        } else {
            actualRequestedInstances = new LinkedList<String>();
        }

        // Add provided Instances to overall bill...
        floatingInstances.addAll(actualFloatingInstances);
        requestedInstances.addAll(actualRequestedInstances);

        // Are we outer limits?
        if (actualRequestedInstances.size() + actualFloatingInstances.size() < mincount) {
            LOG.error("Requested: " + mincount + " to " + maxcount + " instances of type "
                    + actualtype.getIdentifier() + ", but could only provide "
                    + (actualRequestedInstances.size() + actualFloatingInstances.size()) + ".");

            // something went wrong.. give the floating instances back!
            synchronized (this.floatingInstances) {
                for (FloatingInstance i : floatingInstances) {
                    this.floatingInstances.put(i.getInstanceConnectionInfo(), i);
                }
            }
            throw new InstanceException("Could not allocate enough cloud instances. See logs for details.");
        } // End outer limits

    } // End iterating over instance types..

    // Convert and allocate Floating Instances...
    final List<AllocatedResource> allocatedResources = new ArrayList<AllocatedResource>();

    for (final FloatingInstance fi : floatingInstances) {
        final EC2CloudInstance ci = fi.asCloudInstance(networkTopology.getRootNode());
        jobToInstanceMapping.assignInstanceToJob(ci);
        allocatedResources.add(ci.asAllocatedResource());
    }

    // Finally, inform the scheduler about the instances which have been floating before
    if (!allocatedResources.isEmpty()) {
        final EC2CloudInstanceNotifier notifier = new EC2CloudInstanceNotifier(this.instanceListener, jobID,
                allocatedResources);
        notifier.start();
    }

    // Add reserved Instances to Job Mapping...
    for (final String i : requestedInstances) {
        this.reservedInstancesToJobMapping.put(i, jobID);
    }
}

From source file:org.alfresco.repo.workflow.activiti.ActivitiWorkflowEngine.java

@SuppressWarnings("unchecked")
private List<WorkflowInstance> getWorkflowsInternal(WorkflowInstanceQuery workflowInstanceQuery,
        boolean isActive, int maxItems, int skipCount) {
    // MNT-9074 My Tasks fails to render if tasks quantity is excessive
    HistoricProcessInstanceQuery query = createQuery(workflowInstanceQuery, isActive);

    LinkedList<WorkflowInstance> results = new LinkedList<WorkflowInstance>();

    List<HistoricProcessInstance> completedInstances;
    if (maxItems > 0) {
        completedInstances = query.orderByProcessInstanceDuration().desc().listPage(skipCount, maxItems);
    } else {/*from   w w  w.  j  a va 2s  .  co  m*/
        completedInstances = query.list();
    }

    List<WorkflowInstance> completedResults = typeConverter.doSpecialTenantFilterAndSafeConvert(
            completedInstances, new Function<HistoricProcessInstance, String>() {
                public String apply(HistoricProcessInstance historicProcessInstance) {
                    ProcessDefinition procDef = activitiUtil
                            .getProcessDefinition(historicProcessInstance.getProcessDefinitionId());
                    return procDef.getKey();
                }
            });

    results.addAll(completedResults);
    return results;
}

From source file:org.ow2.proactive_grid_cloud_portal.cli.CommandFactory.java

/**
 * Returns an ordered {@link Command} list for specified user arguments.
 *
 * @param cli the command-line arguments
 * @return an ordered {@link Command} list.
 *//*from w  w w . ja v  a 2s  . c  o m*/
protected List<Command> getCommandList(CommandLine cli, Map<String, Command> map,
        ApplicationContext currentContext) {
    LinkedList<Command> list = new LinkedList<>();

    if (map.containsKey(opt(COMMON_HELP))) {
        list.add(map.remove(opt(COMMON_HELP)));
        return list;
    }

    if (map.containsKey(opt(RM_HELP))) {
        list.add(map.remove(opt(RM_HELP)));
        return list;
    }

    if (map.containsKey(opt(SCHEDULER_HELP))) {
        list.add(map.remove(opt(SCHEDULER_HELP)));
        return list;
    }

    if (map.containsKey(opt(SILENT))) {
        list.add(map.remove(opt(SILENT)));
    }

    if (map.containsKey(opt(DEBUG))) {
        list.add(map.remove(opt(DEBUG)));
    }

    if (map.containsKey(opt(URL))) {
        list.addFirst(map.remove(opt(URL)));
    }

    if (map.containsKey(opt(INSECURE))) {
        list.add(map.remove(opt(INSECURE)));

    } else if (map.containsKey(opt(CA_CERTS))) {
        list.add(map.remove(opt(CA_CERTS)));

        if (map.containsKey(opt(CA_CERTS_PASSWORD))) {
            list.add(map.remove(opt(CA_CERTS_PASSWORD)));
        }
    }

    if (map.containsKey(opt(SESSION_ID))) {
        list.add(map.remove(opt(SESSION_ID)));

    } else if (map.containsKey(opt(SESSION_ID_FILE))) {
        list.add(map.remove(opt(SESSION_ID_FILE)));
    }

    if (map.containsKey(opt(PASSWORD))) {
        list.add(map.remove(opt(PASSWORD)));
    }

    if (map.containsKey(opt(LOGIN))) {
        list.add(map.remove(opt(LOGIN)));

    } else if (map.containsKey(opt(CREDENTIALS))) {
        list.add(map.remove(opt(CREDENTIALS)));

    } else {
        // auto login
        String resourceType = currentContext.getResourceType();
        String filename = resourceType + ".cc";
        File credFile = new File(DFLT_SESSION_DIR, filename);
        if (credFile.exists()) {
            list.add(new LoginWithCredentialsCommand(credFile.getAbsolutePath(), true));
        } else {
            String schedulerHome = ClasspathUtils.findSchedulerHome();
            File defaultCredentials = new File(schedulerHome, DEFAULT_CREDENTIALS_PATH);
            if (defaultCredentials.exists()) {
                list.add(new LoginWithCredentialsCommand(defaultCredentials.getAbsolutePath(), true));
            }
        }
    }

    if (map.containsKey(opt(INFRASTRUCTURE))) {
        list.add(map.remove(opt(INFRASTRUCTURE)));
    }

    if (map.containsKey(opt(POLICY))) {
        list.add(map.remove(opt(POLICY)));
    }

    if (map.isEmpty()) {
        list.add(new ImodeCommand());
    } else {
        Command output = map.remove(opt(OUTPUT));
        list.addAll(map.values());
        if (output != null) {
            list.add(output);
        }
    }

    return list;
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

@Override
public Collection<BasicMessageBean> validateModule(IEnrichmentModuleContext context, DataBucketBean bucket,
        EnrichmentControlMetadataBean control) {

    final LinkedList<BasicMessageBean> mutable_errs = new LinkedList<>();

    // Validation

    // 1) Check that has doc schema enabled unless override set 

    final DedupConfigBean dedup_config = BeanTemplateUtils
            .from(Optional.ofNullable(control.config()).orElse(Collections.emptyMap()), DedupConfigBean.class)
            .get();/*from  w  w w.  j ava2  s.  c o m*/

    final DocumentSchemaBean doc_schema = Optional.ofNullable(dedup_config.doc_schema_override())
            .orElse(bucket.data_schema().document_schema()); //(exists by construction)
    if (null == doc_schema) { // Has to either have a doc schema or an override 
        mutable_errs.add(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(), "validateModule",
                ErrorUtils.get(ErrorUtils.MISSING_DOCUMENT_SERVICE)));
        return mutable_errs; //(no point going any further here)
    }

    if (!Optional.ofNullable(doc_schema.lookup_service_override()).filter(s -> !s.isEmpty()).isPresent()) {
        final boolean doc_schema_enabled = Optionals.of(() -> bucket.data_schema().document_schema())
                .map(ds -> Optional.ofNullable(ds.enabled()).orElse(true)).orElse(false);
        if (!doc_schema_enabled) {
            mutable_errs.add(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(), "validateModule",
                    ErrorUtils.get(ErrorUtils.MISSING_DOCUMENT_SERVICE)));
        }
    }
    //(else up to the user to ensure that the required service is included)

    // 1.5) Validate that the service override is valid

    final Validation<String, Tuple2<Optional<Class<? extends IUnderlyingService>>, Optional<String>>> service_to_use = getDataService(
            doc_schema);

    if (service_to_use.isFail()) {
        mutable_errs.add(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(), "validateModule",
                service_to_use.fail()));
    }

    // 2) Validate any child modules

    Optional<EnrichmentControlMetadataBean> custom_config = Optionals
            .ofNullable(doc_schema.custom_deduplication_configs()).stream()
            .filter(cfg -> Optional.ofNullable(cfg.enabled()).orElse(true)).findFirst();

    custom_config.ifPresent(cfg -> {
        mutable_errs.addAll(getEnrichmentModules(context, cfg).stream()
                .flatMap(module -> module.validateModule(context, bucket, cfg).stream())
                .collect(Collectors.toList()));
    });
    return mutable_errs;
}