List of usage examples for java.util HashSet size
public int size()
From source file:web.diva.server.model.pca.PCAImageGenerator.java
@SuppressWarnings("CallToPrintStackTrace") public int[] getPCASelection(int startX, int startY, int endX, int endY) { double[] selectRect = null; if (zoom) {/*from w ww . ja v a 2 s . c om*/ try { selectRect = getZoomedSelectionRecatangle(startX, startY, endX, endY); } catch (Exception exp) { exp.printStackTrace(); } } else { selectRect = this.getSelectionRecatangle(startX, startY, endX, endY); } if (selectRect == null) return new int[] {}; HashSet<Integer> selectedPoints = new HashSet<Integer>(); for (int x = 0; x < points[0].length; x++) { double pointX = points[0][x]; double pointY = points[1][x]; if (pointX >= selectRect[0] && pointX <= selectRect[1] && pointY >= selectRect[2] && pointY <= selectRect[3]) { selectedPoints.add(x); } } if (selectedPoints.size() > 0) { Integer[] selectedIndexes = new Integer[selectedPoints.size()]; System.arraycopy(selectedPoints.toArray(), 0, selectedIndexes, 0, selectedIndexes.length); int[] arr = new int[selectedIndexes.length]; arr = org.apache.commons.lang3.ArrayUtils.toPrimitive(selectedIndexes, selectedIndexes.length); return arr; } return new int[] {}; }
From source file:main.java.repartition.SimpleTr.java
void populateMigrationList(Cluster cluster, WorkloadBatch wb) { this.migrationPlanList = new ArrayList<MigrationPlan>(); // Based on: https://code.google.com/p/combinatoricslib/ // Create the initial vector ICombinatoricsVector<Integer> initialVector = Factory.createVector(this.serverDataSet.keySet()); // Create a simple permutation generator to generate N-permutations of the initial vector Generator<Integer> permutationGen = Factory.createPermutationWithRepetitionGenerator(initialVector, this.serverDataSet.size()); HashMap<HashSet<Integer>, Integer> uniqueFromSet = new HashMap<HashSet<Integer>, Integer>(); // Get all possible N-permutations HashMap<Integer, HashSet<Integer>> dataMap; idtGainRank = new TreeSet<Double>(); lbGainRank = new TreeSet<Double>(); for (ICombinatoricsVector<Integer> permutations : permutationGen) { HashSet<Integer> fromSet = new HashSet<Integer>(); for (int i = 0; i < permutations.getSize() - 1; i++) fromSet.add(permutations.getValue(i)); int to = permutations.getValue(permutations.getSize() - 1); if (!fromSet.contains(to)) { if (!uniqueFromSet.containsKey(fromSet) || (uniqueFromSet.containsKey(fromSet) && !uniqueFromSet.get(fromSet).equals(to))) { //System.out.println(">> fromSet = "+fromSet.size()); if (fromSet.size() <= (this.serverDataSet.size() - 1)) { dataMap = new HashMap<Integer, HashSet<Integer>>(); int req_data_mgr = 0; for (int from : fromSet) { req_data_mgr += this.serverDataSet.get(from).size(); dataMap.put(from, this.serverDataSet.get(from)); }/* w w w . j a va 2 s. c om*/ MigrationPlan m = new MigrationPlan(fromSet, to, dataMap, req_data_mgr); this.migrationPlanList.add(m); // From Source Server m.delta_idt = getDeltaIdt(wb, this, m); m.delta_lb = getDeltaLb(cluster, this, m); idtGainRank.add(m.delta_idt); lbGainRank.add(m.delta_lb); if (fromSet.size() > 1) uniqueFromSet.put(fromSet, to); } } //end-if() } //end-if() } // end-for( // Get the maximum Idt and Lb gains for this transaction for normalization purpose max_delta_idt = idtGainRank.last(); max_delta_lb = lbGainRank.last(); // Sorting migration list sortMigrationPlanList(); this.max_idt_gain = this.migrationPlanList.get(0).delta_idt; this.max_lb_gain = this.migrationPlanList.get(0).delta_lb; this.min_data_mgr = this.migrationPlanList.get(0).req_data_mgr; this.max_combined_weight = this.migrationPlanList.get(0).combined_weight; // Testing /*System.out.println("-------------------------------------------------------------------------"); //System.out.println("max_delta_id = "+max_delta_idt); //System.out.println("max_delta_lb = "+max_delta_lb); System.out.println("Sorting based on combined ranking ..."); System.out.println("--> "+this.toString()); for(MigrationPlan m : this.migrationPlanList) { System.out.println("\t"+m.toString()); }*/ }
From source file:org.ramadda.geodata.cdmdata.GridPointOutputHandler.java
/** * Get the grid dates//www . j a v a 2s . co m * * @param dataset the dataset * * @return the dates or null */ public static List<CalendarDate> getGridDates(GridDataset dataset) { List<CalendarDate> gridDates = new ArrayList<CalendarDate>(); if (dataset == null) { return gridDates; } List<GridDatatype> grids = dataset.getGrids(); HashSet<CalendarDate> dateHash = new HashSet<CalendarDate>(); List<CoordinateAxis1DTime> timeAxes = new ArrayList<CoordinateAxis1DTime>(); for (GridDatatype grid : grids) { GridCoordSystem gcs = grid.getCoordinateSystem(); CoordinateAxis1DTime timeAxis = gcs.getTimeAxis1D(); if ((timeAxis != null) && !timeAxes.contains(timeAxis)) { timeAxes.add(timeAxis); List<CalendarDate> timeDates = timeAxis.getCalendarDates(); for (CalendarDate timeDate : timeDates) { dateHash.add(timeDate); } } } if (!dateHash.isEmpty()) { gridDates = Arrays.asList(dateHash.toArray(new CalendarDate[dateHash.size()])); Collections.sort(gridDates); } return gridDates; }
From source file:org.compass.core.lucene.engine.store.AbstractLuceneSearchEngineStore.java
public String[] calcSubIndexes(String[] subIndexes, String[] aliases) { if (aliases == null) { if (subIndexes == null) { return getSubIndexes(); }/*w w w . ja v a2 s. c o m*/ return subIndexes; } HashSet<String> ret = new HashSet<String>(); for (String aliase : aliases) { List<String> subIndexesList = subIndexesByAlias.get(aliase); if (subIndexesList == null) { throw new IllegalArgumentException("No sub-index is mapped to alias [" + aliase + "]"); } for (String subIndex : subIndexesList) { ret.add(subIndex); } } if (subIndexes != null) { ret.addAll(Arrays.asList(subIndexes)); } return ret.toArray(new String[ret.size()]); }
From source file:ispd.gui.JResultados.java
private void setResultadosUsuario(MetricasUsuarios metricasUsuarios, Metricas metricas) { if (metricasUsuarios != null && metricasUsuarios.getUsuarios().size() > 1) { String texto = ""; for (int i = 0; i < metricasUsuarios.getUsuarios().size(); i++) { String userName = metricasUsuarios.getUsuarios().get(i); texto += "\n\n\t\tUser " + userName + "\n"; HashSet set = metricasUsuarios.getTarefasConcluidas(userName); texto += "\nNumber of task: " + set.size() + "\n"; //Applications: //Name: Number of task: Mflops: double tempoMedioFilaComunicacao = 0; double tempoMedioComunicacao = 0; double tempoMedioSistemaComunicacao; double tempoMedioFilaProcessamento = 0; double tempoMedioProcessamento = 0; double tempoMedioSistemaProcessamento; int numTarefasCanceladas = 0; int numTarefas = 0; for (Tarefa no : metricasUsuarios.getTarefasConcluidas(userName)) { tempoMedioFilaComunicacao += no.getMetricas().getTempoEsperaComu(); tempoMedioComunicacao += no.getMetricas().getTempoComunicacao(); tempoMedioFilaProcessamento = no.getMetricas().getTempoEsperaProc(); tempoMedioProcessamento = no.getMetricas().getTempoProcessamento(); numTarefas++;// w w w . j a v a 2 s .c o m } tempoMedioFilaComunicacao = tempoMedioFilaComunicacao / numTarefas; tempoMedioComunicacao = tempoMedioComunicacao / numTarefas; tempoMedioFilaProcessamento = tempoMedioFilaProcessamento / numTarefas; tempoMedioProcessamento = tempoMedioProcessamento / numTarefas; tempoMedioSistemaComunicacao = tempoMedioFilaComunicacao + tempoMedioComunicacao; tempoMedioSistemaProcessamento = tempoMedioFilaProcessamento + tempoMedioProcessamento; texto += "\n Communication \n"; texto += String.format(" Queue average time: %g seconds.\n", tempoMedioFilaComunicacao); texto += String.format(" Communication average time: %g seconds.\n", tempoMedioComunicacao); texto += String.format(" System average time: %g seconds.\n", tempoMedioSistemaComunicacao); texto += "\n Processing \n"; texto += String.format(" Queue average time: %g seconds.\n", tempoMedioFilaProcessamento); texto += String.format(" Processing average time: %g seconds.\n", tempoMedioProcessamento); texto += String.format(" System average time: %g seconds.\n", tempoMedioSistemaProcessamento); } String name; texto += String.format("\nSatisfao dos usurios em porcentagem\n"); for (Map.Entry<String, Double> entry : metricas.getMetricasSatisfacao().entrySet()) { String user = entry.getKey(); Double satisfacao = entry.getValue(); texto += user + " : " + satisfacao + " %\n"; } jTextAreaUsuario.setText(texto); } else { jTabbedPanelGraficosIndividuais.remove(jScrollPaneUsuario); } }
From source file:com.silentcircle.contacts.interactions.ContactDeletionInteraction.java
@Override public void onLoadFinished(Loader<Cursor> loader, Cursor cursor) { if (mDialog != null) { mDialog.dismiss();/*from w ww .j ava 2 s .c o m*/ mDialog = null; } if (!mActive) { return; } long contactId = 0; // This cursor may contain duplicate raw contacts, so we need to de-dupe them first HashSet<Long> readOnlyRawContacts = Sets.newHashSet(); HashSet<Long> writableRawContacts = Sets.newHashSet(); AccountTypeManager accountTypes = AccountTypeManager.getInstance(getActivity()); cursor.moveToPosition(-1); while (cursor.moveToNext()) { contactId = cursor.getLong(COLUMN_INDEX_RAW_CONTACT_ID); AccountType type = accountTypes.getAccountType(); boolean writable = type == null || type.areContactsWritable(); if (writable) { writableRawContacts.add(contactId); } else { readOnlyRawContacts.add(contactId); } } int readOnlyCount = readOnlyRawContacts.size(); int writableCount = writableRawContacts.size(); if (readOnlyCount > 0 && writableCount > 0) { mMessageId = R.string.readOnlyContactDeleteConfirmation; } else if (readOnlyCount > 0 && writableCount == 0) { mMessageId = R.string.readOnlyContactWarning; } else if (readOnlyCount == 0 && writableCount > 1) { mMessageId = R.string.multipleContactDeleteConfirmation; } else { mMessageId = R.string.deleteConfirmation; } final Uri contactUri = RawContacts.getLookupUri(contactId); showDialog(mMessageId, contactUri); // We don't want onLoadFinished() calls any more, which may come when the database is // updating. getLoaderManager().destroyLoader(R.id.dialog_delete_contact_loader_id); }
From source file:net.semanticmetadata.lire.imageanalysis.bovw.LocalFeatureHistogramBuilderFromCodeBook.java
private HashSet<Integer> selectVocabularyDocs() throws IOException { // need to make sure that this is not running forever ... int loopCount = 0; float maxDocs = reader.maxDoc(); int capacity = (int) Math.min(numDocsForVocabulary, maxDocs); if (capacity < 0) capacity = (int) (maxDocs / 2); HashSet<Integer> result = new HashSet<Integer>(capacity); int tmpDocNumber, tmpIndex; LinkedList<Integer> docCandidates = new LinkedList<Integer>(); // three cases: ////from w w w . jav a2 s . c o m // either it's more or the same number as documents if (numDocsForVocabulary >= maxDocs) { for (int i = 0; i < maxDocs; i++) { result.add(i); } return result; } else if (numDocsForVocabulary >= maxDocs - 100) { // or it's slightly less: for (int i = 0; i < maxDocs; i++) { result.add(i); } while (result.size() > numDocsForVocabulary) { result.remove((int) Math.floor(Math.random() * result.size())); } return result; } else { for (int i = 0; i < maxDocs; i++) { docCandidates.add(i); } for (int r = 0; r < capacity; r++) { boolean worksFine = false; do { tmpIndex = (int) Math.floor(Math.random() * (double) docCandidates.size()); tmpDocNumber = docCandidates.get(tmpIndex); docCandidates.remove(tmpIndex); // check if the selected doc number is valid: not null, not deleted and not already chosen. worksFine = (reader.document(tmpDocNumber) != null) && !result.contains(tmpDocNumber); } while (!worksFine); result.add(tmpDocNumber); // need to make sure that this is not running forever ... if (loopCount++ > capacity * 100) throw new UnsupportedOperationException( "Could not get the documents, maybe there are not enough documents in the index?"); } return result; } }
From source file:hms.hwestra.interactionrebuttal2.InteractionRebuttal2.java
private void mergeMetaFiles(String[] files, String[] fileNames, String out, String annot, double threshold) throws IOException { TextFile tf1 = new TextFile(annot, TextFile.R); Map<String, String> ilmnToArr = tf1.readAsHashMap(0, 1); tf1.close();//from www.j a va 2s. c o m ArrayList<HashMap<String, Double>> eqtls = new ArrayList<HashMap<String, Double>>(); HashSet<String> uniqueEQTLs = new HashSet<String>(); for (String file : files) { HashMap<String, Double> eqtl = loadInteractionMetaTableZScoreBlaat(file); Set<String> keyset = eqtl.keySet(); for (String key : keyset) { uniqueEQTLs.add(key); } System.out.println(eqtl.size() + " loaded from " + file); eqtls.add(eqtl); } System.out.println(uniqueEQTLs.size() + " unique eQTLs"); TextFile outfile = new TextFile(out, TextFile.W); String header = "SNP\tProbe\tGene"; for (int i = 0; i < files.length; i++) { header += "\tZ-" + fileNames[i] + "\tP-" + fileNames[i]; } outfile.writeln(header); int[] nrEQTLsBelowThreshold = new int[files.length]; int[] nrTotalEQTLs = new int[files.length]; for (String eqtl : uniqueEQTLs) { String[] eqtlelems = eqtl.split("-"); String outln = eqtlelems[0] + "\t" + eqtlelems[1] + "\t" + ilmnToArr.get(eqtlelems[1]); for (int i = 0; i < files.length; i++) { HashMap<String, Double> map = eqtls.get(i); Double z = map.get(eqtl); if (z != null && !Double.isNaN(z)) { double p = ZScores.zToP(z); if (p < threshold) { nrEQTLsBelowThreshold[i]++; } nrTotalEQTLs[i]++; outln += "\t" + z + "\t" + p; } else { outln += "\tNaN\tNaN"; } } outfile.writeln(outln); } outfile.close(); System.out.println("pvals below threshold:"); for (int i = 0; i < files.length; i++) { System.out.println(fileNames[i] + "\t" + nrEQTLsBelowThreshold[i] + "\t" + nrTotalEQTLs[i]); } }
From source file:ch.unil.genescore.pathway.GeneSetLibrary.java
License:asdf
/** Compute enrichment for the given set using chi2 distribution */ private void computeChi2Pvalue(GeneSet set) { HashSet<Gene> pathwayGenes = set.getGenes(); if (pathwayGenes.size() == 0) return;/* w ww. j a v a 2s . com*/ // Sum the chi2 stats double q = 0; for (Gene g : pathwayGenes) q += g.getChi2Stat(); //q += g.getScore(0); ChiSquaredDistribution chi2 = new ChiSquaredDistribution(pathwayGenes.size()); // P(X > q) -- enrichment of genes with low p-values double pval = 1 - chi2.cumulativeProbability(q); // boolean depletion = false; // if (pval > 0.5) { //pval = 1 - pval; // depletion = true; // } set.setChi2Pvalue(pval); // set.setDepletion(depletion); }
From source file:org.kuali.rice.kew.routeheader.service.impl.RouteHeaderServiceImpl.java
public void updateRouteHeaderSearchValues(String documentId, List<SearchableAttributeValue> searchAttributes) { getRouteHeaderDAO().clearRouteHeaderSearchValues(documentId); HashSet<String> dupedSet = new HashSet<String>(); //"de-dupe" for value,key,and doc header id for (SearchableAttributeValue searchAttribute : searchAttributes) { if (searchAttribute != null) { String fakeKey = searchAttribute.getSearchableAttributeKey() + "-" + searchAttribute.getSearchableAttributeValue(); if (!dupedSet.contains(fakeKey)) { getRouteHeaderDAO().save(searchAttribute); dupedSet.add(fakeKey);/*from w w w. j a v a 2 s . com*/ } } } LOG.warn("Deduplication adjusted incoming SearchableAttributeValue list from original: " + searchAttributes.size() + " entries into : " + (searchAttributes.size() - dupedSet.size()) + " entries."); }