Example usage for java.util HashSet isEmpty

List of usage examples for java.util HashSet isEmpty

Introduction

In this page you can find the example usage for java.util HashSet isEmpty.

Prototype

public boolean isEmpty() 

Source Link

Document

Returns true if this set contains no elements.

Usage

From source file:com.android.mail.ui.AnimatedAdapter.java

private void delete(Collection<Conversation> conversations, ListItemsRemovedListener listener,
        HashSet<Long> list) {
    // Clear out any remaining items and add the new ones
    mLastDeletingItems.clear();/* ww w.ja v a 2s .com*/
    // Since we are deleting new items, clear any remaining undo items
    mUndoingItems.clear();

    final int startPosition = mListView.getFirstVisiblePosition();
    final int endPosition = mListView.getLastVisiblePosition();

    // Only animate visible items
    for (Conversation c : conversations) {
        if (c.position >= startPosition && c.position <= endPosition) {
            mLastDeletingItems.add(c.id);
            list.add(c.id);
        }
    }

    if (list.isEmpty()) {
        // If we have no deleted items on screen, skip the animation
        listener.onListItemsRemoved();
        // If we have an action queued up, perform it
        performAndSetNextAction(null);
    } else {
        performAndSetNextAction(listener);
    }
    notifyDataSetChanged();
}

From source file:org.rhq.enterprise.server.sync.SynchronizationManagerBean.java

private <E, X> Set<ConsistencyValidatorFailureReport> validateEntities(XMLStreamReader rdr, Subject subject,
        Set<ConsistencyValidator> consistencyValidators, Map<String, Configuration> importConfigurations)
        throws Exception {
    String synchronizerClass = rdr.getAttributeValue(null, SynchronizationConstants.ID_ATTRIBUTE);
    HashSet<ConsistencyValidatorFailureReport> ret = new HashSet<ConsistencyValidatorFailureReport>();

    @SuppressWarnings("unchecked")
    Synchronizer<E, X> synchronizer = instantiate(synchronizerClass, Synchronizer.class,
            "The id attribute of entities doesn't correspond to a class implementing the Synchronizer interface.");

    synchronizer.initialize(subject, entityManager);

    Importer<E, X> importer = synchronizer.getImporter();

    Set<ConsistencyValidator> requriedConsistencyValidators = synchronizer.getRequiredValidators();

    //check that all the required consistency validators were run
    for (ConsistencyValidator v : requriedConsistencyValidators) {
        if (!consistencyValidators.contains(v)) {
            ret.add(new ConsistencyValidatorFailureReport(v.getClass().getName(),
                    "The validator '" + v.getClass().getName() + "' is required by the synchronizer '"
                            + synchronizerClass + "' but was not found in the export file."));
        }/* w  w  w  . j a v  a 2  s.  c  om*/
    }

    //don't bother checking if there are inconsistencies in the export file
    if (!ret.isEmpty()) {
        return ret;
    }

    boolean configured = false;
    Configuration importConfiguration = importConfigurations.get(synchronizerClass);

    Set<EntityValidator<X>> validators = null;

    //the passed in configuration has precedence over the default one inlined in 
    //the config file.
    if (importConfiguration != null) {
        importer.configure(importConfiguration);
        validators = importer.getEntityValidators();
        for (EntityValidator<X> v : validators) {
            v.initialize(subject, entityManager);
        }
        configured = true;
    }

    while (rdr.hasNext()) {
        boolean bailout = false;
        switch (rdr.next()) {
        case XMLStreamConstants.START_ELEMENT:
            if (SynchronizationConstants.DEFAULT_CONFIGURATION_ELEMENT.equals(rdr.getName().getLocalPart())) {
                if (!configured) {
                    importConfiguration = getDefaultConfiguration(rdr);
                }
            } else if (SynchronizationConstants.DATA_ELEMENT.equals(rdr.getName().getLocalPart())) {

                //first check if the configure method has been called
                if (!configured) {
                    importer.configure(importConfiguration);
                    validators = importer.getEntityValidators();
                    for (EntityValidator<X> v : validators) {
                        v.initialize(subject, entityManager);
                    }
                    configured = true;
                }

                //now do the validation

                rdr.nextTag();
                X exportedEntity = importer.unmarshallExportedEntity(new ExportReader(rdr));

                for (EntityValidator<X> validator : validators) {
                    try {
                        validator.validateExportedEntity(exportedEntity);
                    } catch (Exception e) {
                        ValidationException v = new ValidationException(
                                "Failed to validate entity [" + exportedEntity + "]", e);
                        ret.add(new ConsistencyValidatorFailureReport(validator.getClass().getName(),
                                printExceptionToString(v)));
                    }
                }
            }
            break;
        case XMLStreamConstants.END_ELEMENT:
            if (SynchronizationConstants.ENTITIES_EXPORT_ELEMENT.equals(rdr.getName().getLocalPart())) {
                bailout = true;
            }
        }

        if (bailout) {
            break;
        }
    }

    return ret;
}

From source file:org.apereo.portal.portlets.marketplace.PortletMarketplaceController.java

private Set<PortletCategory> getPermittedCategories(PortletRequest req) {

    Set<PortletCategory> rslt = Collections.emptySet(); // default
    final PortletPreferences prefs = req.getPreferences();
    final String[] permittedCategories = prefs.getValues(PERMITTED_CATEGORIES_PREFERENCE, new String[0]);

    if (permittedCategories.length != 0) {
        // Expensive to create, use cache for this collection...
        Set<String> cacheKey = new HashSet<>(Arrays.asList(permittedCategories));
        net.sf.ehcache.Element cacheElement = marketplaceCategoryCache.get(cacheKey);

        if (cacheElement == null) {
            // Nothing in cache currently;  need to populate cache
            HashSet<PortletCategory> portletCategories = new HashSet<>();
            for (final String categoryName : permittedCategories) {
                EntityIdentifier[] cats = GroupService.searchForGroups(categoryName, IGroupConstants.IS,
                        IPortletDefinition.class);
                if (cats != null && cats.length > 0) {
                    PortletCategory pc = portletCategoryRegistry.getPortletCategory(cats[0].getKey());
                    if (pc != null) {
                        portletCategories.add(pc);
                    } else {
                        logger.warn("No PortletCategory found in portletCategoryRegistry for id '{}'",
                                cats[0].getKey());
                    }/*from  w  w w.  jav  a 2 s . c  om*/
                } else {
                    logger.warn("No category found in GroupService for name '{}'", categoryName);
                }
            }
            /*
             * Sanity Check:  Since at least 1 category name was specified, we
             * need to make certain there's at least 1 PortletCategory in the
             * set;  otherwise, a restricted Marketplace portlet would become
             * an unrestricted one.
             */
            if (portletCategories.isEmpty()) {
                throw new IllegalStateException(
                        "None of the specified category " + "names could be resolved to a PortletCategory:  "
                                + Arrays.asList(permittedCategories));
            }
            cacheElement = new net.sf.ehcache.Element(cacheKey, portletCategories);
            this.marketplaceCategoryCache.put(cacheElement);
        }
        rslt = (Set<PortletCategory>) cacheElement.getObjectValue();
    }
    return rslt;
}

From source file:nl.umcg.westrah.binarymetaanalyzer.BinaryMetaAnalysis.java

private void createSNPIndex(String outdir) throws IOException {

    HashSet<String> confineToTheseSNPs = null;

    HashSet<String> snpPreSelection = null;
    if (settings.getSNPProbeSelection() != null) {
        System.out.println("Getting SNPs from SNP/Probe selection file: " + settings.getSNPProbeSelection());
        snpPreSelection = new HashSet<String>();
        TextFile tf = new TextFile(settings.getSNPProbeSelection(), TextFile.R);
        String[] elems = tf.readLineElems(TextFile.tab);
        while (elems != null) {
            String snp = elems[0];
            snpPreSelection.add(snp);/*from w  w w .  j av  a  2  s .  c  om*/
            elems = tf.readLineElems(TextFile.tab);
        }
        tf.close();
        System.out.println("Found " + snpPreSelection.size() + " unique snps in SNP/Probe selection file.");
        if (snpPreSelection.isEmpty()) {
            System.err.println("Error: SNP/Probe selection file defined, but no SNPs found.");
            System.exit(-1);
        }
    }

    if (settings.getSNPSelection() != null) {
        System.out.println("Selecting SNPs from file: " + settings.getSNPSelection());
        confineToTheseSNPs = new HashSet<String>();
        TextFile tf = new TextFile(settings.getSNPSelection(), TextFile.R);
        ArrayList<String> snps = tf.readAsArrayList();
        tf.close();
        if (snpPreSelection == null) {
            confineToTheseSNPs.addAll(snps);
        } else {
            System.out.println("Intersecting with SNP/Probe selection.");
            for (String snp : snps) {
                if (snpPreSelection.contains(snp)) {
                    confineToTheseSNPs.add(snp);
                }
            }
        }
        System.out.println(confineToTheseSNPs.size() + " SNPs loaded.");
    } else if (snpPreSelection != null) {
        confineToTheseSNPs = snpPreSelection;
    }

    // create a list of all available SNPs
    HashSet<String> allSNPs = new HashSet<String>();
    for (BinaryMetaAnalysisDataset dataset : datasets) {
        String[] snps = dataset.getSNPs();
        for (String snp : snps) {
            if (confineToTheseSNPs == null || confineToTheseSNPs.contains(snp)) {
                allSNPs.add(snp);
            }
        }
        System.out.println(snps.length + " in dataset " + dataset.getName() + "\t" + allSNPs.size()
                + " unique SNPs found");
    }

    if (allSNPs.isEmpty()) {
        System.err.println("Error: no SNPs found that match your request");
        System.exit(-1);
    }

    // create a temporary map that maps each SNP to a meta-analysis position
    int ctr = 0;
    TObjectIntHashMap<String> snpMap = new TObjectIntHashMap<String>(allSNPs.size(), 0.85f, -9);
    snpList = new String[allSNPs.size()];
    for (String s : allSNPs) {
        snpMap.put(s, ctr);
        snpList[ctr] = s;
        ctr++;
    }

    // TODO: for faster disk access, we would need to sort the SNPs by dataset ID...

    // fill index
    snpIndex = new int[allSNPs.size()][datasets.length];
    for (int d = 0; d < datasets.length; d++) {
        for (int s = 0; s < allSNPs.size(); s++) {
            snpIndex[s][d] = -9;
        }
    }

    for (int d = 0; d < datasets.length; d++) {
        String[] snps = datasets[d].getSNPs();
        for (int s = 0; s < snps.length; s++) {
            String snp = snps[s];
            int id = snpMap.get(snp);
            if (id != -9) {
                snpIndex[id][d] = s;
            }
        }
    }

    TextFile tf = new TextFile(outdir + "snpindex.txt", TextFile.W);
    String header = "metaID";
    for (int d = 0; d < datasets.length; d++) {
        header += "\t" + datasets[d].getName() + "-sid";
    }
    tf.writeln(header);
    for (int s = 0; s < snpList.length; s++) {
        String ln = snpList[s];
        for (int d = 0; d < datasets.length; d++) {
            ln += "\t" + snpIndex[s][d];
        }
        tf.writeln(ln);
    }
    tf.close();
}

From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java

private void createConfigXML(Writer out, String title, String input, String fields, boolean isCustomTable,
        String outputDatabase, String output, String tempOutputCollection, String mapper, String reducer,
        String combiner, String query, List<ObjectId> communityIds, String outputKey, String outputValue,
        String arguments) throws IOException {
    String dbserver = prop_general.getDatabaseServer();
    output = outputDatabase + "." + tempOutputCollection;

    int nSplits = 8;
    int nDocsPerSplit = 12500;

    //add communities to query if this is not a custom table
    if (!isCustomTable) {
        // Start with the old query:
        BasicDBObject oldQueryObj = null;
        if (query.startsWith("{")) {
            oldQueryObj = (BasicDBObject) com.mongodb.util.JSON.parse(query);
        } else {/* w ww.j ava  2s  . c om*/
            oldQueryObj = new BasicDBObject();
        }

        // Community Ids aren't indexed in the metadata collection, but source keys are, so we need to transform to that
        BasicDBObject keyQuery = new BasicDBObject(SourcePojo.communityIds_,
                new BasicDBObject(DbManager.in_, communityIds));
        boolean bAdminOverride = false;
        if (oldQueryObj.containsField("admin")) { // For testing only...
            if (1 == communityIds.size()) {
                ObjectId communityId = communityIds.get(0);
                if (RESTTools.adminLookup(communityId.toString())) {
                    bAdminOverride = true;
                    if (oldQueryObj.containsField("max.splits")) {
                        nSplits = oldQueryObj.getInt("max.splits");
                    }
                    if (oldQueryObj.containsField("max.docs.per.split")) {
                        nDocsPerSplit = oldQueryObj.getInt("max.docs.per.split");
                    }
                }
            }
        } //(end diagnostic/benchmarking/test code for admins only part 1)
        if (bAdminOverride) {
            oldQueryObj = (BasicDBObject) oldQueryObj.get("admin");
            //(end diagnostic/benchmarking/test code for admins only part 2)
        } else if (oldQueryObj.containsField(DocumentPojo.sourceKey_) || input.startsWith("feature.")) {
            // Source Key specified by user, stick communityIds check in for security
            oldQueryObj.put(DocumentPojo.communityId_, new BasicDBObject(DbManager.in_, communityIds));
        } else { // Source key not specified by user, transform communities->sourcekeys
            BasicDBObject keyFields = new BasicDBObject(SourcePojo.key_, 1);
            DBCursor dbc = MongoDbManager.getIngest().getSource().find(keyQuery, keyFields);
            if (dbc.count() > 500) {
                // (too many source keys let's keep the query size sensible...)
                oldQueryObj.put(DocumentPojo.communityId_, new BasicDBObject(DbManager.in_, communityIds));
            } else {
                HashSet<String> sourceKeys = new HashSet<String>();
                while (dbc.hasNext()) {
                    DBObject dbo = dbc.next();
                    String sourceKey = (String) dbo.get(SourcePojo.key_);
                    if (null != sourceKey) {
                        sourceKeys.add(sourceKey);
                    }
                }
                if (sourceKeys.isEmpty()) { // query returns empty
                    throw new RuntimeException("Communities contain no sources");
                }
                BasicDBObject newQueryClauseObj = new BasicDBObject(DbManager.in_, sourceKeys);
                // Now combine the queries...
                oldQueryObj.put(DocumentPojo.sourceKey_, newQueryClauseObj);

            } // (end if too many source keys across the communities)
        } //(end if need to break source keys down into communities)
        query = oldQueryObj.toString();
    } else {
        //get the custom table (and database)
        input = getCustomDbAndCollection(input);
    }
    if (arguments == null)
        arguments = "";

    // Generic configuration
    out.write("<?xml version=\"1.0\"?>\n<configuration>");

    // Mongo specific configuration

    out.write("\n\t<property><!-- name of job shown in jobtracker --><name>mongo.job.name</name><value>" + title
            + "</value></property>"
            + "\n\t<property><!-- run the job verbosely ? --><name>mongo.job.verbose</name><value>true</value></property>"
            + "\n\t<property><!-- Run the job in the foreground and wait for response, or background it? --><name>mongo.job.background</name><value>false</value></property>"
            + "\n\t<property><!-- If you are reading from mongo, the URI --><name>mongo.input.uri</name><value>mongodb://"
            + dbserver + "/" + input + "</value></property>"
            + "\n\t<property><!-- If you are writing to mongo, the URI --><name>mongo.output.uri</name><value>mongodb://"
            + dbserver + "/" + output + "</value>  </property>"
            + "\n\t<property><!-- The query, in JSON, to execute [OPTIONAL] --><name>mongo.input.query</name><value>"
            + query + "</value></property>"
            + "\n\t<property><!-- The fields, in JSON, to read [OPTIONAL] --><name>mongo.input.fields</name><value>"
            + ((fields == null) ? ("") : fields) + "</value></property>"
            + "\n\t<property><!-- A JSON sort specification for read [OPTIONAL] --><name>mongo.input.sort</name><value></value></property>"
            + "\n\t<property><!-- The number of documents to limit to for read [OPTIONAL] --><name>mongo.input.limit</name><value>0</value><!-- 0 == no limit --></property>"
            + "\n\t<property><!-- The number of documents to skip in read [OPTIONAL] --><!-- TODO - Are we running limit() or skip() first? --><name>mongo.input.skip</name><value>0</value> <!-- 0 == no skip --></property>"
            + "\n\t<property><!-- Class for the mapper --><name>mongo.job.mapper</name><value>" + mapper
            + "</value></property>"
            + "\n\t<property><!-- Reducer class --><name>mongo.job.reducer</name><value>" + reducer
            + "</value></property>"
            + "\n\t<property><!-- InputFormat Class --><name>mongo.job.input.format</name><value>com.ikanow.infinit.e.data_model.custom.InfiniteMongoInputFormat</value></property>"
            + "\n\t<property><!-- OutputFormat Class --><name>mongo.job.output.format</name><value>com.mongodb.hadoop.MongoOutputFormat</value></property>"
            + "\n\t<property><!-- Output key class for the output format --><name>mongo.job.output.key</name><value>"
            + outputKey + "</value></property>"
            + "\n\t<property><!-- Output value class for the output format --><name>mongo.job.output.value</name><value>"
            + outputValue + "</value></property>"
            + "\n\t<property><!-- Output key class for the mapper [optional] --><name>mongo.job.mapper.output.key</name><value></value></property>"
            + "\n\t<property><!-- Output value class for the mapper [optional] --><name>mongo.job.mapper.output.value</name><value></value></property>"
            + "\n\t<property><!-- Class for the combiner [optional] --><name>mongo.job.combiner</name><value>"
            + combiner + "</value></property>"
            + "\n\t<property><!-- Partitioner class [optional] --><name>mongo.job.partitioner</name><value></value></property>"
            + "\n\t<property><!-- Sort Comparator class [optional] --><name>mongo.job.sort_comparator</name><value></value></property>"
            + "\n\t<property><!-- Split Size [optional] --><name>mongo.input.split_size</name><value>32</value></property>");

    // Infinit.e specific configuration

    out.write("\n\t<property><!-- User Arguments [optional] --><name>arguments</name><value>"
            + StringEscapeUtils.escapeXml(arguments) + "</value></property>"
            + "\n\t<property><!-- Maximum number of splits [optional] --><name>max.splits</name><value>"
            + nSplits + "</value></property>"
            + "\n\t<property><!-- Maximum number of docs per split [optional] --><name>max.docs.per.split</name><value>"
            + nDocsPerSplit + "</value></property>");

    // Closing thoughts:
    out.write("\n</configuration>");

    out.flush();
    out.close();
}

From source file:org.apache.hadoop.hive.ql.QTestUtil.java

public static Set<String> getSrcTables() {
    HashSet<String> srcTables = new HashSet<String>();
    // FIXME: moved default value to here...for now
    // i think this features is never really used from the command line
    String defaultTestSrcTables = "src,src1,srcbucket,srcbucket2,src_json,src_thrift,src_sequencefile,srcpart,alltypesorc,src_hbase,cbo_t1,cbo_t2,cbo_t3,src_cbo,part,lineitem";
    for (String srcTable : System.getProperty("test.src.tables", defaultTestSrcTables).trim().split(",")) {
        srcTable = srcTable.trim();//from  w ww.j av a2s . c om
        if (!srcTable.isEmpty()) {
            srcTables.add(srcTable);
        }
    }
    if (srcTables.isEmpty()) {
        throw new RuntimeException("Source tables cannot be empty");
    }
    return srcTables;
}

From source file:gda.data.scan.datawriter.NexusDataWriter.java

/**
 * this is run when processing the first ScanDataPoint
 * the file is in the root node/*w ww  .jav  a  2 s .  c o m*/
 * we add all the one off metadata here
 */
protected Collection<Scannable> makeConfiguredScannablesAndMonitors(
        Collection<Scannable> scannablesAndMonitors) {
    Set<String> metadatascannablestowrite = new HashSet<String>(metadatascannables);

    for (Detector det : thisPoint.getDetectors()) {
        logger.info("found detector named: " + det.getName());
        String detname = det.getName();
        if (metadataScannablesPerDetector.containsKey(detname)) {
            HashSet<String> metasPerDet = metadataScannablesPerDetector.get(detname);
            if (metasPerDet != null && !metasPerDet.isEmpty()) {
                metadatascannablestowrite.addAll(metasPerDet);
            }
        }
    }

    try {
        file.opengroup(this.entryName, "NXentry");

        Set<Scannable> wehavewritten = new HashSet<Scannable>();
        for (Iterator<Scannable> iterator = scannablesAndMonitors.iterator(); iterator.hasNext();) {
            Scannable scannable = iterator.next();
            String scannableName = scannable.getName();
            if (weKnowTheLocationFor(scannableName)) {
                wehavewritten.add(scannable);
                Collection<String> prerequisites = locationmap.get(scannableName)
                        .getPrerequisiteScannableNames();
                if (prerequisites != null)
                    metadatascannablestowrite.addAll(prerequisites);
                scannableID.addAll(locationmap.get(scannableName).makeScannable(file, scannable,
                        getSDPositionFor(scannableName), generateDataDim(false, scanDimensions, null)));
            }
        }

        int oldsize;
        do { // add dependencies of metadata scannables
            oldsize = metadatascannablestowrite.size();
            Set<String> aux = new HashSet<String>();
            for (String s : metadatascannablestowrite) {
                if (weKnowTheLocationFor(s)) {
                    Collection<String> prerequisites = locationmap.get(s).getPrerequisiteScannableNames();
                    if (prerequisites != null)
                        aux.addAll(prerequisites);
                }
            }
            metadatascannablestowrite.addAll(aux);
        } while (metadatascannablestowrite.size() > oldsize);

        // remove the ones in the scan, as they are not metadata
        for (Scannable scannable : scannablesAndMonitors) {
            metadatascannablestowrite.remove(scannable.getName());
        }
        // only use default writing for the ones we haven't written yet 
        scannablesAndMonitors.removeAll(wehavewritten);

        makeMetadataScannables(metadatascannablestowrite);

        // Close NXentry
        file.closegroup();
    } catch (NexusException e) {
        // FIXME NexusDataWriter should allow exceptions to be thrown
        logger.error("TODO put description of error here", e);
    }
    return scannablesAndMonitors;
}

From source file:org.eclipse.wb.tests.gef.GraphicalRobot.java

/**
 * Asserts that feedback layer contains exactly same {@link Figure}'s as described.
 *//*from w  ww . j  a v a2  s.co m*/
public void assertFeedbackFigures(FigureDescription... descriptions) {
    HashSet<Figure> feedbackFigures = new HashSet<Figure>(getFeedbackFigures());
    //
    for (int i = 0; i < descriptions.length; i++) {
        FigureDescription description = descriptions[i];
        // try to find figure for current description
        boolean figureFound = false;
        for (Iterator<Figure> I = feedbackFigures.iterator(); I.hasNext();) {
            Figure figure = I.next();
            if (description.match(figure)) {
                I.remove();
                figureFound = true;
                break;
            }
        }
        // figure should be found
        assertThat(figureFound).describedAs("No figure found for " + description).isTrue();
    }
    // all figure should be matched
    if (!feedbackFigures.isEmpty()) {
        String message = "Following figures are not matched:";
        for (Figure figure : feedbackFigures) {
            message += "\n\t" + figure.getClass().getName() + " " + figure.getBounds() + " "
                    + figure.toString();
        }
        assertThat(true).describedAs(message).isFalse();
    }
}

From source file:org.nuxeo.ecm.core.storage.sql.SessionImpl.java

/**
 * Send a core event about the merged invalidations (NXP-5808)
 *
 * @param pair//from w  w  w .jav  a  2s . co  m
 */
protected void sendInvalidationEvent(InvalidationsPair pair) {
    if (!repository.repositoryDescriptor.sendInvalidationEvents) {
        return;
    }
    // compute modified doc ids and parent ids (as strings)
    HashSet<String> modifiedDocIds = new HashSet<String>();
    HashSet<String> modifiedParentIds = new HashSet<String>();

    // merge cache and events because of clustering (NXP-5808)
    collectModified(pair.cacheInvalidations, modifiedDocIds, modifiedParentIds);
    collectModified(pair.eventInvalidations, modifiedDocIds, modifiedParentIds);

    // TODO check what we can do about invalidations.deleted

    if (modifiedDocIds.isEmpty() && modifiedParentIds.isEmpty()) {
        return;
    }

    EventContext ctx = new EventContextImpl(null, null);
    ctx.setRepositoryName(repository.getName());
    ctx.setProperty(EventConstants.INVAL_MODIFIED_DOC_IDS, modifiedDocIds);
    ctx.setProperty(EventConstants.INVAL_MODIFIED_PARENT_IDS, modifiedParentIds);
    Event event = new EventImpl(EventConstants.EVENT_VCS_INVALIDATIONS, ctx);
    try {
        repository.eventService.fireEvent(event);
    } catch (ClientException e) {
        log.error("Failed to send invalidation event: " + e, e);
    }
}

From source file:org.mskcc.cbio.portal.servlet.QueryBuilder.java

/**
 * process a good request/*  w w  w  .  j  a v a  2s .c om*/
 * 
*/
private void processData(String cancerStudyStableId, String geneList, HashSet<String> geneticProfileIdSet,
        ArrayList<GeneticProfile> profileList, String sampleSetId, String sampleIds,
        ArrayList<SampleList> sampleSetList, String patientCaseSelect, ServletContext servletContext,
        HttpServletRequest request, HttpServletResponse response, XDebug xdebug)
        throws IOException, ServletException, DaoException {

    request.setAttribute(PATIENT_CASE_SELECT, patientCaseSelect);

    HashSet<String> setOfSampleIds = null;

    String sampleIdsKey = null;

    // user-specified patients, but patient_ids parameter is missing,
    // so try to retrieve sample_ids by using sample_ids_key parameter.
    // this is required for survival plot requests  
    if (sampleSetId.equals("-1") && sampleIds == null) {
        sampleIdsKey = request.getParameter(CASE_IDS_KEY);

        if (sampleIdsKey != null) {
            sampleIds = SampleSetUtil.getSampleIds(sampleIdsKey);
        }
    }

    if (!sampleSetId.equals("-1")) {
        for (SampleList sampleSet : sampleSetList) {
            if (sampleSet.getStableId().equals(sampleSetId)) {
                sampleIds = sampleSet.getSampleListAsString();
                setOfSampleIds = new HashSet<String>(sampleSet.getSampleList());
                break;
            }
        }
    }
    //if user specifies patients, add these to hashset, and send to GetMutationData
    else if (sampleIds != null) {
        String[] sampleIdSplit = sampleIds.split("\\s+");
        setOfSampleIds = new HashSet<String>();

        for (String sampleID : sampleIdSplit) {
            if (null != sampleID) {
                setOfSampleIds.add(sampleID);
            }
        }

        sampleIds = sampleIds.replaceAll("\\s+", " ");
    }

    if (setOfSampleIds == null || setOfSampleIds.isEmpty()) {
        redirectStudyUnavailable(request, response);
    }

    request.setAttribute(SET_OF_CASE_IDS, sampleIds);

    // Map user selected samples Ids to patient Ids
    HashMap<String, String> patientSampleIdMap = new HashMap<String, String>();
    CancerStudy selectedCancerStudy = DaoCancerStudy.getCancerStudyByStableId(cancerStudyStableId);
    int cancerStudyInternalId = selectedCancerStudy.getInternalId();
    Iterator<String> itr = setOfSampleIds.iterator();
    while (itr.hasNext()) {
        String sampleId = itr.next();
        ArrayList<String> sampleIdList = new ArrayList<String>();
        sampleIdList.add(sampleId);

        Sample sample = DaoSample.getSampleByCancerStudyAndSampleId(cancerStudyInternalId, sampleId);
        Patient patient = DaoPatient.getPatientById(sample.getInternalPatientId());
        patientSampleIdMap.put(sampleId, patient.getStableId());
    }
    request.setAttribute(SELECTED_PATIENT_SAMPLE_ID_MAP, patientSampleIdMap);

    if (sampleIdsKey == null) {
        sampleIdsKey = SampleSetUtil.shortenSampleIds(sampleIds);
    }

    // retrieve information about the cancer types
    Map<String, List<String>> cancerTypeInfo = DaoClinicalData.getCancerTypeInfo(cancerStudyInternalId);
    request.setAttribute(CANCER_TYPES_MAP, cancerTypeInfo);

    // this will create a key even if the patient set is a predefined set,
    // because it is required to build a patient id string in any case
    request.setAttribute(CASE_IDS_KEY, sampleIdsKey);

    Iterator<String> profileIterator = geneticProfileIdSet.iterator();
    ArrayList<DownloadLink> downloadLinkSet = new ArrayList<>();
    while (profileIterator.hasNext()) {
        String profileId = profileIterator.next();
        GeneticProfile profile = GeneticProfileUtil.getProfile(profileId, profileList);
        if (null == profile) {
            continue;
        }
        GetProfileData remoteCall = new GetProfileData(profile,
                new ArrayList<>(Arrays.asList(geneList.split(" "))), StringUtils.join(setOfSampleIds, " "));
        DownloadLink downloadLink = new DownloadLink(profile,
                new ArrayList<>(Arrays.asList(geneList.split(" "))), sampleIds, remoteCall.getRawContent());
        downloadLinkSet.add(downloadLink);
    }

    request.getSession().setAttribute(DOWNLOAD_LINKS, downloadLinkSet);
    String tabIndex = request.getParameter(QueryBuilder.TAB_INDEX);
    if (tabIndex != null && tabIndex.equals(QueryBuilder.TAB_VISUALIZE)) {
        double zScoreThreshold = ZScoreUtil.getZScore(geneticProfileIdSet, profileList, request);
        double rppaScoreThreshold = ZScoreUtil.getRPPAScore(request);
        request.setAttribute(Z_SCORE_THRESHOLD, zScoreThreshold);
        request.setAttribute(RPPA_SCORE_THRESHOLD, rppaScoreThreshold);

        // Store download links in session (for possible future retrieval).
        RequestDispatcher dispatcher = getServletContext().getRequestDispatcher("/WEB-INF/jsp/visualize.jsp");
        dispatcher.forward(request, response);
    } else if (tabIndex != null && tabIndex.equals(QueryBuilder.TAB_DOWNLOAD)) {
        ShowData.showDataAtSpecifiedIndex(servletContext, request, response, 0, xdebug);
    }
}