List of usage examples for java.util LinkedHashSet isEmpty
boolean isEmpty();
From source file:Main.java
public static void main(String[] args) { LinkedHashSet<Integer> lhashSet = new LinkedHashSet<Integer>(); lhashSet.add(new Integer("1")); lhashSet.add(new Integer("2")); lhashSet.add(new Integer("3")); System.out.println(lhashSet); lhashSet.clear();/*from w ww . j a va 2 s .c om*/ System.out.println(lhashSet); System.out.println(lhashSet.isEmpty()); }
From source file:nl.systemsgenetics.genenetworkbackend.hpo.TestDiseaseGenePerformance.java
/** * @param args the command line arguments * @throws java.lang.Exception// w ww . j ava 2 s . c o m */ public static void main(String[] args) throws Exception { final File diseaseGeneHpoFile = new File( "C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\HPO\\135\\ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes.txt"); final File ncbiToEnsgMapFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\ensgNcbiId.txt"); final File hgncToEnsgMapFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\ensgHgnc.txt"); final File ensgSymbolMappingFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\ensgHgnc.txt"); final File predictionMatrixFile = new File( "C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions_zscores.txt.gz"); final File predictionMatrixCorrelationFile = new File( "C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions_pathwayCorrelation.txt"); final File significantTermsFile = new File( "C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions_bonSigTerms.txt"); final double correctedPCutoff = 0.05; final File hpoOboFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\HPO\\135\\hp.obo"); final File hpoPredictionInfoFile = new File( "C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions_auc_bonferroni.txt"); final File hposToExcludeFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\hpoToExclude.txt"); final File skewnessFile = new File( "C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\skewnessSummary.txt"); final boolean randomize = true; final File annotationMatrixFile = new File( "C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\PathwayMatrix\\ALL_SOURCES_ALL_FREQUENCIES_phenotype_to_genes.txt_matrix.txt.gz"); final File backgroundForRandomize = new File( "C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\PathwayMatrix\\Ensembl2Reactome_All_Levels.txt_genesInPathways.txt"); //final File backgroundForRandomize = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\expressedReactomeGenes.txt"); final boolean randomizeCustomBackground = true; Map<String, String> ensgSymbolMapping = loadEnsgToHgnc(ensgSymbolMappingFile); final File outputFile; final ArrayList<String> backgroundGenes; if (randomize) { if (randomizeCustomBackground) { System.err.println("First need to fix so ranking list contains all genes in background list"); return; // backgroundGenes = loadBackgroundGenes(backgroundForRandomize); // outputFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\hpoDiseaseBenchmarkRandomizedCustomBackground.txt"); } else { backgroundGenes = null; outputFile = new File( "C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\hpoDiseaseBenchmarkRandomizedExtraNorm.txt"); } } else { backgroundGenes = null; outputFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\hpoDiseaseBenchmarkExtraNorm.txt"); } final HashMap<String, ArrayList<String>> ncbiToEnsgMap = loadNcbiToEnsgMap(ncbiToEnsgMapFile); final HashMap<String, ArrayList<String>> hgncToEnsgMap = loadHgncToEnsgMap(hgncToEnsgMapFile); final HashSet<String> exludedHpo = loadHpoExclude(hposToExcludeFile); final SkewnessInfo skewnessInfo = new SkewnessInfo(skewnessFile); LinkedHashSet<String> significantTerms = loadSignificantTerms(significantTermsFile); DoubleMatrixDataset<String, String> predictionMatrix = DoubleMatrixDataset .loadDoubleData(predictionMatrixFile.getAbsolutePath()); DoubleMatrixDataset<String, String> predictionMatrixSignificant = predictionMatrix .viewColSelection(significantTerms); DoubleMatrixDataset<String, String> predictionMatrixSignificantCorrelationMatrix = DoubleMatrixDataset .loadDoubleData(predictionMatrixCorrelationFile.getAbsolutePath()); DiseaseGeneHpoData diseaseGeneHpoData = new DiseaseGeneHpoData(diseaseGeneHpoFile, ncbiToEnsgMap, hgncToEnsgMap, exludedHpo, new HashSet(predictionMatrix.getHashRows().keySet()), "OMIM"); //NOTE if one would use a differnt background this needs to be updated HashSet<String> diseaseGenes = new HashSet<>(diseaseGeneHpoData.getDiseaseGenes()); if (randomize) { diseaseGeneHpoData = diseaseGeneHpoData.getPermutation(1, backgroundGenes); } for (String gene : diseaseGenes) { if (!predictionMatrixSignificant.containsRow(gene)) { throw new Exception("Error: " + gene); } } int[] mapGeneIndexToDiseaseGeneIndex = new int[predictionMatrix.rows()]; ArrayList<String> predictedGenes = predictionMatrix.getRowObjects(); int g2 = 0; for (int g = 0; g < predictedGenes.size(); ++g) { mapGeneIndexToDiseaseGeneIndex[g] = diseaseGenes.contains(predictedGenes.get(g)) ? g2++ : -1; } DoubleMatrixDataset<String, String> annotationnMatrix = DoubleMatrixDataset .loadDoubleData(annotationMatrixFile.getAbsolutePath()); DoubleMatrixDataset<String, String> annotationMatrixSignificant = annotationnMatrix .viewColSelection(significantTerms); HashMap<String, MeanSd> hpoMeanSds = calculatePathayMeansOfAnnotatedGenes(predictionMatrixSignificant, annotationMatrixSignificant); Map<String, PredictionInfo> predictionInfo = HpoFinder.loadPredictionInfo(hpoPredictionInfoFile); Ontology hpoOntology = HpoFinder.loadHpoOntology(hpoOboFile); HpoFinder hpoFinder = new HpoFinder(hpoOntology, predictionInfo); final int totalGenes = predictionMatrixSignificant.rows(); final int totalDiseaseGenes = diseaseGenes.size(); final double[] geneScores = new double[totalGenes]; final double[] geneScoresDiseaseGenes = new double[totalDiseaseGenes]; final NaturalRanking naturalRanking = new NaturalRanking(NaNStrategy.FAILED, TiesStrategy.MAXIMUM); CSVWriter writer = new CSVWriter(new FileWriter(outputFile), '\t', '\0', '\0', "\n"); String[] outputLine = new String[16]; int c = 0; outputLine[c++] = "Disease"; outputLine[c++] = "Gene"; outputLine[c++] = "Hgnc"; outputLine[c++] = "Rank"; outputLine[c++] = "RankAmongDiseaseGenes"; outputLine[c++] = "Z-score"; outputLine[c++] = "HPO_skewness"; outputLine[c++] = "Other_mean_skewness"; outputLine[c++] = "Other_max_skewness"; outputLine[c++] = "HPO_phenotypic_match_score"; outputLine[c++] = "HPO_count"; outputLine[c++] = "HPO_sum_auc"; outputLine[c++] = "HPO_mean_auc"; outputLine[c++] = "HPO_median_auc"; outputLine[c++] = "HPO_terms"; outputLine[c++] = "HPO_terms_match_score"; writer.writeNext(outputLine); Random random = new Random(1); Mean meanCalculator = new Mean(); Median medianCalculator = new Median(); for (DiseaseGeneHpoData.DiseaseGene diseaseGene : diseaseGeneHpoData.getDiseaseGeneHpos()) { String gene = diseaseGene.getGene(); String disease = diseaseGene.getDisease(); if (!predictionMatrixSignificant.containsRow(gene)) { continue; } Set<String> geneHpos = diseaseGeneHpoData.getDiseaseEnsgHpos(diseaseGene); LinkedHashSet<String> geneHposPredictable = new LinkedHashSet<>(); for (String hpo : geneHpos) { geneHposPredictable .addAll(hpoFinder.getTermsToNames(hpoFinder.getPredictableTerms(hpo, correctedPCutoff))); } if (geneHposPredictable.isEmpty()) { continue; } // if(geneHposPredictable.size() > 1){ // String hpoSelected = geneHposPredictable.toArray(new String[geneHposPredictable.size()])[random.nextInt(geneHposPredictable.size())]; // geneHposPredictable = new LinkedHashSet<>(1); // geneHposPredictable.add(hpoSelected); // } DoubleMatrixDataset<String, String> predictionCaseTerms = predictionMatrixSignificant .viewColSelection(geneHposPredictable); DoubleMatrix2D predictionCaseTermsMatrix = predictionCaseTerms.getMatrix(); double denominator = Math.sqrt(geneHposPredictable.size()); for (int g = 0; g < totalGenes; ++g) { geneScores[g] = predictionCaseTermsMatrix.viewRow(g).zSum() / denominator; if (Double.isNaN(geneScores[g])) { geneScores[g] = 0; } g2 = mapGeneIndexToDiseaseGeneIndex[g]; if (g2 >= 0) { geneScoresDiseaseGenes[g2] = geneScores[g]; } } double[] geneRanks = naturalRanking.rank(geneScores); int diseaseGeneIndex = predictionMatrixSignificant.getRowIndex(gene); double[] geneRanksDiseaseGenes = naturalRanking.rank(geneScoresDiseaseGenes); int diseaseGeneIndexInDiseaseGenesOnly = mapGeneIndexToDiseaseGeneIndex[diseaseGeneIndex]; double zscore = geneScores[diseaseGeneIndex]; double rank = (totalGenes - geneRanks[diseaseGeneIndex]) + 1; double rankAmongDiseaseGenes = (totalDiseaseGenes - geneRanksDiseaseGenes[diseaseGeneIndexInDiseaseGenesOnly]) + 1; double hpoPhenotypicMatchScore = 0; StringBuilder individualMatchScore = new StringBuilder(); boolean notFirst = false; int usedHpos = 0; double[] aucs = new double[geneHposPredictable.size()]; double sumAucs = 0; int i = 0; for (String hpo : geneHposPredictable) { usedHpos++; MeanSd hpoMeanSd = hpoMeanSds.get(hpo); double hpoPredictionZ = predictionMatrixSignificant.getElement(gene, hpo); double hpoPredictionOutlierScore = ((hpoPredictionZ - hpoMeanSd.getMean()) / hpoMeanSd.getSd()); if (notFirst) { individualMatchScore.append(';'); } notFirst = true; individualMatchScore.append(hpoPredictionOutlierScore); hpoPhenotypicMatchScore += hpoPredictionOutlierScore; aucs[i++] = predictionInfo.get(hpo).getAuc(); sumAucs += predictionInfo.get(hpo).getAuc(); } double meanAuc = meanCalculator.evaluate(aucs); double medianAuc = medianCalculator.evaluate(aucs); if (usedHpos == 0) { hpoPhenotypicMatchScore = Double.NaN; } else { hpoPhenotypicMatchScore = hpoPhenotypicMatchScore / usedHpos; } String symbol = ensgSymbolMapping.get(gene); if (symbol == null) { symbol = ""; } c = 0; outputLine[c++] = disease; outputLine[c++] = gene; outputLine[c++] = symbol; outputLine[c++] = String.valueOf(rank); outputLine[c++] = String.valueOf(rankAmongDiseaseGenes); outputLine[c++] = String.valueOf(zscore); outputLine[c++] = String.valueOf(skewnessInfo.getHpoSkewness(gene)); outputLine[c++] = String.valueOf(skewnessInfo.getMeanSkewnessExHpo(gene)); outputLine[c++] = String.valueOf(skewnessInfo.getMaxSkewnessExHpo(gene)); outputLine[c++] = String.valueOf(hpoPhenotypicMatchScore); outputLine[c++] = String.valueOf(geneHposPredictable.size()); outputLine[c++] = String.valueOf(sumAucs); outputLine[c++] = String.valueOf(meanAuc); outputLine[c++] = String.valueOf(medianAuc); outputLine[c++] = String.join(";", geneHposPredictable); outputLine[c++] = individualMatchScore.toString(); writer.writeNext(outputLine); } writer.close(); }
From source file:com.google.gwt.emultest.java.util.LinkedHashSetTest.java
/** * Check the state of a newly constructed, empty LinkedHashSet. * //from ww w . j a v a 2 s . c om * @param hashSet */ private static void checkEmptyLinkedHashSetAssumptions(LinkedHashSet<?> hashSet) { assertNotNull(hashSet); assertTrue(hashSet.isEmpty()); }
From source file:org.opencb.opencga.storage.core.metadata.StudyConfiguration.java
public static LinkedHashMap<String, Integer> getReturnedSamplesPosition(StudyConfiguration studyConfiguration, LinkedHashSet<String> returnedSamples, Function<StudyConfiguration, BiMap<String, Integer>> getIndexedSamplesPosition) { LinkedHashMap<String, Integer> samplesPosition; if (returnedSamples == null || returnedSamples.isEmpty()) { BiMap<Integer, String> unorderedSamplesPosition = getIndexedSamplesPosition(studyConfiguration) .inverse();/*from w ww. j a v a 2 s . co m*/ samplesPosition = new LinkedHashMap<>(unorderedSamplesPosition.size()); for (int i = 0; i < unorderedSamplesPosition.size(); i++) { samplesPosition.put(unorderedSamplesPosition.get(i), i); } } else { samplesPosition = new LinkedHashMap<>(returnedSamples.size()); int index = 0; BiMap<String, Integer> indexedSamplesId = getIndexedSamplesPosition.apply(studyConfiguration); for (String returnedSample : returnedSamples) { if (!returnedSample.isEmpty() && StringUtils.isNumeric(returnedSample)) { returnedSample = studyConfiguration.getSampleIds().inverse() .get(Integer.parseInt(returnedSample)); } if (!samplesPosition.containsKey(returnedSample)) { if (indexedSamplesId.containsKey(returnedSample)) { samplesPosition.put(returnedSample, index++); } } } // for (String sample : indexedSamplesId.keySet()) { // samplesPosition.put(sample, index++); // } } return samplesPosition; }
From source file:org.opencb.opencga.storage.mongodb.variant.MongoDBVariantStoragePipeline.java
/** * Check if the samples from the selected file can be loaded. * Check if the samples from the selected file can be loaded. * <p>/*from w w w . j a va2s .com*/ * MongoDB storage plugin is not able to load batches of samples in a unordered way. * A batch of samples is a group of samples of any size. It may be composed of one or several VCF files, depending * on whether it is split by region (horizontally) or not. * All the files from the same batch must be loaded, before loading the next batch. If a new batch of * samples begins to be loaded, it won't be possible to load other files from previous batches * <p> * The StudyConfiguration must be complete, with all the indexed files, and samples in files. * Provided StudyConfiguration won't be modified * Requirements: * - All samples in file must be or loaded or not loaded * - If all samples loaded, must match (same order and samples) with the last loaded file. * * @param studyConfiguration StudyConfiguration from the selected study * @param fileId File to load * @return Returns if this file represents a new batch of samples * @throws StorageEngineException If there is any unaccomplished requirement */ public static boolean checkCanLoadSampleBatch(final StudyConfiguration studyConfiguration, int fileId) throws StorageEngineException { LinkedHashSet<Integer> sampleIds = studyConfiguration.getSamplesInFiles().get(fileId); if (!sampleIds.isEmpty()) { boolean allSamplesRepeated = true; boolean someSamplesRepeated = false; BiMap<String, Integer> indexedSamples = StudyConfiguration.getIndexedSamples(studyConfiguration); for (Integer sampleId : sampleIds) { if (!indexedSamples.containsValue(sampleId)) { allSamplesRepeated = false; } else { someSamplesRepeated = true; } } if (allSamplesRepeated) { ArrayList<Integer> indexedFiles = new ArrayList<>(studyConfiguration.getIndexedFiles()); if (!indexedFiles.isEmpty()) { int lastIndexedFile = indexedFiles.get(indexedFiles.size() - 1); //Check that are the same samples in the same order if (!new ArrayList<>(studyConfiguration.getSamplesInFiles().get(lastIndexedFile)) .equals(new ArrayList<>(sampleIds))) { //ERROR if (studyConfiguration.getSamplesInFiles().get(lastIndexedFile).containsAll(sampleIds)) { throw new StorageEngineException("Unable to load this batch. Wrong samples order"); //TODO: Should it care? } else { throw new StorageEngineException( "Unable to load this batch. Another sample batch has been loaded already."); } } //Ok, the batch of samples matches with the last loaded batch of samples. return false; // This is NOT a new batch of samples } } else if (someSamplesRepeated) { throw new StorageEngineException("There was some already indexed samples, but not all of them. " + "Unable to load in Storage-MongoDB"); } } return true; // This is a new batch of samples }
From source file:org.mskcc.cbio.oncokb.util.AlterationUtils.java
private static List<Alteration> getAlterations(Gene gene, String alteration, AlterationType alterationType, String consequence, Integer proteinStart, Integer proteinEnd, Set<Alteration> fullAlterations) { List<Alteration> alterations = new ArrayList<>(); VariantConsequence variantConsequence = null; if (gene != null && alteration != null) { if (consequence != null) { Alteration alt = new Alteration(); alt.setAlteration(alteration); variantConsequence = VariantConsequenceUtils.findVariantConsequenceByTerm(consequence); if (variantConsequence == null) { variantConsequence = new VariantConsequence(consequence, null, false); }//from w w w . java 2s. co m alt.setConsequence(variantConsequence); alt.setAlterationType(alterationType == null ? AlterationType.MUTATION : alterationType); alt.setGene(gene); alt.setProteinStart(proteinStart); alt.setProteinEnd(proteinEnd); AlterationUtils.annotateAlteration(alt, alt.getAlteration()); LinkedHashSet<Alteration> alts = alterationBo.findRelevantAlterations(alt, fullAlterations, true); if (!alts.isEmpty()) { alterations.addAll(alts); } } else { Alteration alt = new Alteration(); alt.setAlteration(alteration); alt.setAlterationType(alterationType == null ? AlterationType.MUTATION : alterationType); alt.setGene(gene); alt.setProteinStart(proteinStart); alt.setProteinEnd(proteinEnd); AlterationUtils.annotateAlteration(alt, alt.getAlteration()); LinkedHashSet<Alteration> alts = alterationBo.findRelevantAlterations(alt, fullAlterations, true); if (!alts.isEmpty()) { alterations.addAll(alts); } } } if (isFusion(alteration)) { Alteration alt = new Alteration(); alt.setAlteration(alteration); alt.setAlterationType(alterationType == null ? AlterationType.MUTATION : alterationType); alt.setGene(gene); AlterationUtils.annotateAlteration(alt, alt.getAlteration()); Alteration revertFusion = getRevertFusions(alt); if (revertFusion != null) { LinkedHashSet<Alteration> alts = alterationBo.findRelevantAlterations(revertFusion, fullAlterations, true); if (alts != null) { alterations.addAll(alts); } } } return alterations; }
From source file:com.spotify.hamcrest.jackson.IsJsonObject.java
private void describeMismatches(final ObjectNode node, final Description mismatchDescription, final LinkedHashSet<String> mismatchedKeys) { checkArgument(!mismatchedKeys.isEmpty(), "mismatchKeys must not be empty"); String previousMismatchKey = null; String previousKey = null;//from w w w.j a v a 2s . com mismatchDescription.appendText("{\n"); for (String key : entryMatchers.keySet()) { if (mismatchedKeys.contains(key)) { // If this is not the first key and the previous key was not a mismatch then add ellipsis if (previousKey != null && !Objects.equals(previousMismatchKey, previousKey)) { mismatchDescription.appendText(" ...\n"); } final Matcher<?> valueMatcher = entryMatchers.get(key); final JsonNode value = node.path(key); describeKey(key, mismatchDescription, d -> valueMatcher.describeMismatch(value, d)); previousMismatchKey = key; } previousKey = key; } // If the last element was not a mismatch then add ellipsis if (!Objects.equals(previousMismatchKey, previousKey)) { mismatchDescription.appendText(" ...\n"); } mismatchDescription.appendText("}"); }
From source file:org.nuxeo.ecm.webengine.loader.store.ResourceStoreClassLoader.java
protected ResourceStoreClassLoader(final ClassLoader pParent, LinkedHashSet<ResourceStore> cp) { super(pParent); this.cp = cp; if (!cp.isEmpty()) { stores = cp.toArray(new ResourceStore[cp.size()]); }/*from w w w .ja v a 2 s. c o m*/ }
From source file:net.sf.taverna.t2.activities.apiconsumer.ApiConsumerActivityHealthChecker.java
public VisitReport visit(ApiConsumerActivity subject, List<Object> ancestors) { // Check if we can find the jar containing the apiconsumer's class Processor p = (Processor) VisitReport.findAncestor(ancestors, Processor.class); if (p == null) { return null; }// ww w .j a va2s . co m List<VisitReport> reports = new ArrayList<VisitReport>(); JsonNode configuration = subject.getConfiguration(); /* String className = configuration.getClassName(); try { // Try to load the API consumer's class ClassLoader classLoader = subject.getClassLoader(); classLoader.loadClass(className); reports.add(new VisitReport(HealthCheck.getInstance(), p, "Class found", HealthCheck.NO_PROBLEM, Status.OK)); // All is fine } catch (ClassNotFoundException e) { VisitReport vr = new VisitReport(HealthCheck.getInstance(), p, "Class missing", HealthCheck.MISSING_CLASS, Status.SEVERE); vr.setProperty("className", className); reports.add(vr); } */ // Check if we can find all the API consumer's dependencies LinkedHashSet<String> localDependencies = new LinkedHashSet<>(); for (JsonNode localDependency : configuration.get("localDependency")) { localDependencies.add(localDependency.textValue()); } if (!localDependencies.isEmpty()) { String[] jarArray = subject.libDir.list(new FileExtFilter(".jar")); if (jarArray != null) { List<String> jarFiles = Arrays.asList(jarArray); // URLs of all jars found in the lib directory for (String jar : localDependencies) { if (jarFiles.contains(jar)) { localDependencies.remove(jar); } } if (localDependencies.isEmpty()) { // all dependencies found reports.add(new VisitReport(HealthCheck.getInstance(), p, "Dependencies found", HealthCheck.NO_PROBLEM, Status.OK)); } else { VisitReport vr = new VisitReport(HealthCheck.getInstance(), p, "Dependencies missing", HealthCheck.MISSING_DEPENDENCY, Status.SEVERE); vr.setProperty("dependencies", localDependencies); vr.setProperty("directory", subject.libDir); reports.add(vr); } } } Status status = VisitReport.getWorstStatus(reports); VisitReport report = new VisitReport(HealthCheck.getInstance(), p, "API Consumer report", HealthCheck.NO_PROBLEM, status, reports); return report; }
From source file:net.sf.taverna.t2.security.credentialmanager.impl.PossibleURILookupsTest.java
@Test public void possibleLookupsNoRecursion() throws Exception { URI uri = URI.create(NASTY_URI); LinkedHashSet<URI> lookups = credentialManager.getPossibleServiceURIsToLookup(uri, false); assertTrue("Did not contain " + uri, lookups.remove(uri)); assertTrue("Unexpected lookups:" + lookups, lookups.isEmpty()); }