List of usage examples for java.util TreeSet size
public int size()
From source file:com.ichi2.anki.CardEditor.java
private void actualizeTagDialog(StyledDialog ad) { TreeSet<String> tags = new TreeSet<String>(String.CASE_INSENSITIVE_ORDER); for (String tag : mCol.getTags().all()) { tags.add(tag);/*from w w w .j a va 2s .c om*/ } tags.addAll(selectedTags); int len = tags.size(); allTags = new String[len]; boolean[] checked = new boolean[len]; int i = 0; for (String t : tags) { allTags[i++] = t; if (selectedTags.contains(t)) { checked[i - 1] = true; } } ad.setMultiChoiceItems(allTags, checked, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface arg0, int which) { String tag = allTags[which]; if (selectedTags.contains(tag)) { Log.i(AnkiDroidApp.TAG, "unchecked tag: " + tag); selectedTags.remove(tag); } else { Log.i(AnkiDroidApp.TAG, "checked tag: " + tag); selectedTags.add(tag); } } }); }
From source file:org.apache.hadoop.hbase.client.HBaseFsck.java
/** * Return a list of table names whose metadata have not been modified in the * last few milliseconds specified by timelag * if any of the REGIONINFO_QUALIFIER, SERVER_QUALIFIER, STARTCODE_QUALIFIER, * SPLITA_QUALIFIER, SPLITB_QUALIFIER have not changed in the last * milliseconds specified by timelag, then the table is a candidate to be returned. * @param regionList - all entries found in .META * @return tables that have not been modified recently * @throws IOException if an error is encountered *//* w w w .j av a 2 s .co m*/ HTableDescriptor[] getTables(AtomicInteger numSkipped) { TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>(); long now = System.currentTimeMillis(); for (HbckInfo hbi : regionInfo.values()) { MetaEntry info = hbi.metaEntry; // if the start key is zero, then we have found the first region of a table. // pick only those tables that were not modified in the last few milliseconds. if (info != null && info.getStartKey().length == 0) { if (info.modTime + timelag < now) { uniqueTables.add(info.getTableDesc()); } else { numSkipped.incrementAndGet(); // one more in-flux table } } } return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]); }
From source file:org.opencastproject.capture.impl.SchedulerImpl.java
/** * Sets this machine's schedule based on the iCal data passed in as a parameter. Note that this call wipes all * currently scheduled captures and then schedules based on the new data. Also note that any files which are in the * way when this call tries to save the iCal attachments are overwritten without prompting. * //from w w w . j a v a2 s .co m * @param sched * The scheduler to schedule the new events on * @param newCal * The new {@code Calendar} data */ private synchronized void setCaptureSchedule(Scheduler sched, Calendar newCal) { log.debug("setCaptureSchedule(sched, newCal)"); try { Map<Long, String> scheduledEventStarts = new Hashtable<Long, String>(); Map<String, Date> scheduledEventEnds = new Hashtable<String, Date>(); // Sort the events into chronological starting order TreeSet<VEvent> list = new TreeSet<VEvent>(new VEventStartTimeComparator()); list.addAll(newCal.getComponents(Component.VEVENT)); VEvent[] startAry = list.toArray(new VEvent[list.size()]); for (int i = 0; i < startAry.length; i++) { Event event = new Event(startAry[i], captureAgent, this); if (!event.isValidEvent()) { continue; } boolean skipOnError = Boolean .valueOf(configService.getItem(CaptureParameters.CAPTURE_SCHEDULE_DROP_EVENT_IF_CONFLICT)); int bufferMinutes = 1; if (configService.getItem(CaptureParameters.CAPTURE_SCHEDULE_INTEREVENT_BUFFERTIME) != null) { try { bufferMinutes = Integer.valueOf( configService.getItem(CaptureParameters.CAPTURE_SCHEDULE_INTEREVENT_BUFFERTIME)); } catch (NumberFormatException e) { log.info("Unable to parse value for {}, defaulting to 1 minute", CaptureParameters.CAPTURE_SCHEDULE_INTEREVENT_BUFFERTIME); } } long bufferTime = bufferMinutes * CaptureParameters.MINUTES * CaptureParameters.MILLISECONDS; // If there could be an event scheduled before this one if (i > 0 && startAry[i - 1] != null && scheduledEventEnds.size() > 0) { int j = i - 1; String otherUID = null; // Search through the list of captures which could possibly have been scheduled // checking to see which one is closest to us while (j > 0) { String testUID = startAry[j].getUid().getValue(); if (scheduledEventEnds.containsKey(testUID)) { otherUID = testUID; break; } j--; } // If we found something if (otherUID != null) { Date lastEndDate = scheduledEventEnds.get(otherUID); if (event.getStart().before(lastEndDate)) { if (skipOnError) { log.warn("Start time for event {} is before end time of event {}! Skipping...", event.getUID(), otherUID); continue; } else { log.warn( "Start time for event {} is before end time of event {}! Shortening to fit...", event.getUID(), otherUID); event.setStart(new Date(lastEndDate.getTime() + bufferTime)); } } else if (ONE_MINUTE_DURATION.compareTo(new Dur(lastEndDate, event.getStart())) >= 0) { if (skipOnError) { log.warn("Start time for event {} is within one minute of event {}! Skipping...", event.getUID(), otherUID); continue; } else { log.warn( "Start time for event {} is within one minute of event {}! Shortening to fit...", event.getUID(), otherUID); event.setStart(new Date(lastEndDate.getTime() + bufferTime)); } } } } if (!event.isValidEvent()) { continue; } // Get the cron expression and make sure it doesn't conflict with any existing captures // Note that this means the order in which the scheduled events appear in the source iCal makes a functional // difference! String conflict = scheduledEventStarts.get(event.getStart().getTime()); if (conflict != null) { // This case should have disappeared with MH-1253, but I'm leaving it here anyway just in case log.warn("Unable to schedule event {} because its starting time coinsides with event {}!", event.getUID(), conflict); continue; } PropertyList attachments = event.getProperties(Property.ATTACH); scheduleEvent(sched, event, attachments); scheduledEventStarts.put(event.getStart().getTime(), event.getUID()); scheduledEventEnds.put(event.getUID(), event.getEnd()); } } catch (NullPointerException e) { log.error("Invalid calendar data, one of the start or end times is incorrect: {}.", e); } catch (ParseException e) { log.error("Parsing error: {}.", e); } catch (org.opencastproject.util.ConfigurationException e) { log.error("Configuration exception: {}.", e); } catch (MediaPackageException e) { log.error("MediaPackageException exception: {}.", e); } catch (MalformedURLException e) { log.error("MalformedURLException: {}.", e); } }
From source file:de.tudarmstadt.tk.statistics.report.ReportGenerator.java
/** * Get a String representation of the models order, inferred from their * pairwise p-values./*from www . j a v a 2 s .co m*/ * * @param ordering * a HashMap mapping levels of topological order to sets of * models * @return A string representing the models order or alternatively a message * that no order could be determined. */ private String getModelOrderingRepresentation(HashMap<Integer, TreeSet<Integer>> ordering) { if (ordering != null && ordering.size() > 1) { StringBuilder orderSequence = new StringBuilder(); for (int level = 0; level < ordering.keySet().size(); level++) { TreeSet<Integer> s = ordering.get(level); if (s.size() == 0) return "These results do not allow for a strict ordering of all models."; int n = s.first(); s.remove(n); orderSequence.append(String.format("(M%d", n)); for (Integer node : ordering.get(level)) { orderSequence.append(String.format(",M%d", node)); } orderSequence.append(")"); if (level < ordering.keySet().size() - 1) { orderSequence.append("<"); } } return String.format("These results allow for the follwing ordering of model performances: %s. ", orderSequence.toString()); } else { return "These results do not allow for a strict ordering of all models. "; } }
From source file:gdsc.smlm.ij.plugins.TraceMolecules.java
private int[] convert(double[] intervals) { TreeSet<Integer> set = new TreeSet<Integer>(); for (double d : intervals) set.add((int) Math.round(d)); set.remove(0); // Do not allow zero int[] values = new int[set.size()]; int i = 0;/* ww w. j a v a 2 s . c om*/ for (Integer ii : set) values[i++] = ii; Arrays.sort(values); return values; }
From source file:ca.sqlpower.architect.diff.CompareSQL.java
/** * Creates a List of DiffChunks that describe the differences between the * columns of the given tables.//from w w w .j av a 2 s . com * * @param sourceTable The "left side" for the comparison. If null, then all columns * in the target table will be considered obsolete. * @param targetTable The "right side" for the comparison. If null, then all columns * in the source table will be considered new. * @throws SQLObjectException If the getColumns() methods of the source or target * tables run into trouble. */ private List<DiffChunk<SQLObject>> generateColumnDiffs(SQLTable sourceTable, SQLTable targetTable) throws SQLObjectException { TreeSet<SQLColumn> sourceColumnList; TreeSet<SQLColumn> targetColumnList; Iterator<SQLColumn> sourceColIter; Iterator<SQLColumn> targetColIter; SQLColumn sourceColumn; SQLColumn targetColumn; boolean sourceColContinue; boolean targetColContinue; boolean keyChangeFlag = false; sourceColumnList = new TreeSet<SQLColumn>(getObjectComparator()); targetColumnList = new TreeSet<SQLColumn>(getObjectComparator()); sourceColContinue = false; targetColContinue = false; sourceColIter = null; targetColIter = null; sourceColumn = null; targetColumn = null; // We store the diffs in here, then return this list List<DiffChunk<SQLObject>> diffs = new ArrayList<DiffChunk<SQLObject>>(); if (sourceTable != null) { sourceColumnList.addAll(sourceTable.getColumns()); } if (targetTable != null) { targetColumnList.addAll(targetTable.getColumns()); } if (sourceColumnList.size() == 0) { sourceColumnList = null; sourceColContinue = false; } else { sourceColIter = sourceColumnList.iterator(); sourceColumn = sourceColIter.next(); sourceColContinue = true; } if (targetColumnList.size() == 0) { targetColumnList = null; targetColContinue = false; } else { targetColIter = targetColumnList.iterator(); targetColumn = targetColIter.next(); targetColContinue = true; } while (sourceColContinue && targetColContinue) { int compareResult = getObjectComparator().compare(sourceColumn, targetColumn); // Comparing Columns if (compareResult < 0) { diffs.add(new DiffChunk<SQLObject>(sourceColumn, DiffType.LEFTONLY)); logger.debug("The source column is " + sourceColumn); if (sourceColumn.isPrimaryKey()) { keyChangeFlag = true; } if (sourceColIter.hasNext()) { sourceColumn = sourceColIter.next(); } else { sourceColContinue = false; } } // Comparing Columns if (compareResult > 0) { diffs.add(new DiffChunk<SQLObject>(targetColumn, DiffType.RIGHTONLY)); logger.debug("The target column is " + targetColumn); if (targetColumn.isPrimaryKey()) { keyChangeFlag = true; } if (targetColIter.hasNext()) { targetColumn = targetColIter.next(); } else { targetColContinue = false; } } // Comparing Columns if (compareResult == 0) { if (targetColumn.isPrimaryKey() != sourceColumn.isPrimaryKey()) { keyChangeFlag = true; //diffs.add(new DiffChunk<SQLObject>(targetColumn, DiffType.KEY_CHANGED)); } List<PropertyChange> changes = generatePropertyChanges(sourceColumn, targetColumn); if (changes.size() > 0) { DiffChunk<SQLObject> chunk = null; if (nameComparator.compare(sourceColumn, targetColumn) != 0) { chunk = new DiffChunk<SQLObject>(targetColumn, DiffType.NAME_CHANGED); chunk.setOriginalData(sourceColumn); } else if (ArchitectUtils.columnsDiffer(targetColumn, sourceColumn)) { // Make sure the changes are worthy of a SQL script generation: if (logger.isDebugEnabled()) { logger.debug("Column " + sourceColumn.getName() + " differs!"); logger.debug(String.format(" Type: %10d %10d", targetColumn.getType(), sourceColumn.getType())); logger.debug(String.format(" Precision: %10d %10d", targetColumn.getPrecision(), sourceColumn.getPrecision())); logger.debug(String.format(" Scale: %10d %10d", targetColumn.getScale(), sourceColumn.getScale())); logger.debug(String.format(" Nullable: %10d %10d", targetColumn.getNullable(), sourceColumn.getNullable())); } chunk = new DiffChunk<SQLObject>(targetColumn, DiffType.SQL_MODIFIED); } else { chunk = new DiffChunk<SQLObject>(targetColumn, DiffType.MODIFIED); } for (PropertyChange change : changes) { chunk.addPropertyChange(change); } diffs.add(chunk); } else { if (!suppressSimilarities) { diffs.add(new DiffChunk<SQLObject>(sourceColumn, DiffType.SAME)); } } if (targetColIter.hasNext()) { targetColumn = targetColIter.next(); } else { targetColContinue = false; } if (sourceColIter.hasNext()) { sourceColumn = sourceColIter.next(); } else { sourceColContinue = false; } } } while (sourceColContinue) { diffs.add(new DiffChunk<SQLObject>(sourceColumn, DiffType.LEFTONLY)); if (sourceColIter.hasNext()) { sourceColumn = sourceColIter.next(); } else { sourceColContinue = false; } } while (targetColContinue) { diffs.add(new DiffChunk<SQLObject>(targetColumn, DiffType.RIGHTONLY)); if (targetColIter.hasNext()) { targetColumn = targetColIter.next(); } else { targetColContinue = false; } } if (keyChangeFlag) { if (sourceTable.getPkSize() > 0) { diffs.add(new DiffChunk<SQLObject>(sourceTable, DiffType.DROP_KEY)); } diffs.add(new DiffChunk<SQLObject>(targetTable, DiffType.KEY_CHANGED)); } return diffs; }
From source file:canreg.client.analysis.TopNChartTableBuilder.java
@Override public LinkedList<String> buildTable(String tableHeader, String reportFileName, int startYear, int endYear, Object[][] incidenceData, PopulationDataset[] populations, // can be null PopulationDataset[] standardPopulations, LinkedList<ConfigFields> configList, String[] engineParameters, FileTypes fileType) throws NotCompatibleDataException { String footerString = java.util.ResourceBundle .getBundle("canreg/client/analysis/resources/AgeSpecificCasesPerHundredThousandTableBuilder") .getString("TABLE BUILT ") + new Date() + java.util.ResourceBundle .getBundle(// w w w . j av a2 s.co m "canreg/client/analysis/resources/AgeSpecificCasesPerHundredThousandTableBuilder") .getString(" BY CANREG5."); LinkedList<String> generatedFiles = new LinkedList<String>(); if (Arrays.asList(engineParameters).contains("barchart")) { chartType = ChartType.BAR; } else { chartType = ChartType.PIE; includeOther = true; } if (Arrays.asList(engineParameters).contains("legend")) { legendOn = true; } if (Arrays.asList(engineParameters).contains("r")) { useR = true; } if (Arrays.asList(engineParameters).contains("asr")) { countType = CountType.ASR; } else if (Arrays.asList(engineParameters).contains("cum64")) { countType = CountType.CUM64; } else if (Arrays.asList(engineParameters).contains("cum74")) { countType = CountType.CUM74; } else if (Arrays.asList(engineParameters).contains("per100000")) { countType = CountType.PER_HUNDRED_THOUSAND; } else { // default to cases countType = CountType.CASES; } localSettings = CanRegClientApp.getApplication().getLocalSettings(); rpath = localSettings.getProperty(LocalSettings.R_PATH); // does R exist? if (rpath == null || rpath.isEmpty() || !new File(rpath).exists()) { useR = false; // force false if R is not installed } icdLabel = ConfigFieldsReader.findConfig("ICD_groups_labels", configList); icd10GroupDescriptions = ConfigFieldsReader.findConfig("ICD10_groups", configList); cancerGroupsLocal = EditorialTableTools.generateICD10Groups(icd10GroupDescriptions); // indexes keyGroupsMap = new EnumMap<KeyCancerGroupsEnum, Integer>(KeyCancerGroupsEnum.class); keyGroupsMap.put(KeyCancerGroupsEnum.allCancerGroupsIndex, EditorialTableTools.getICD10index("ALL", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.leukemiaNOSCancerGroupIndex, EditorialTableTools.getICD10index(950, cancerGroupsLocal)); keyGroupsMap.put(KeyCancerGroupsEnum.skinCancerGroupIndex, EditorialTableTools.getICD10index("C44", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.bladderCancerGroupIndex, EditorialTableTools.getICD10index("C67", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.mesotheliomaCancerGroupIndex, EditorialTableTools.getICD10index("C45", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.kaposiSarkomaCancerGroupIndex, EditorialTableTools.getICD10index("C46", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.myeloproliferativeDisordersCancerGroupIndex, EditorialTableTools.getICD10index("MPD", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.myelodysplasticSyndromesCancerGroupIndex, EditorialTableTools.getICD10index("MDS", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.allCancerGroupsButSkinIndex, EditorialTableTools.getICD10index("ALLbC44", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.brainAndCentralNervousSystemCancerGroupIndex, EditorialTableTools.getICD10index("C70-72", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.ovaryCancerGroupIndex, EditorialTableTools.getICD10index(569, cancerGroupsLocal)); keyGroupsMap.put(KeyCancerGroupsEnum.otherCancerGroupsIndex, EditorialTableTools.getICD10index("O&U", icd10GroupDescriptions)); otherCancerGroupsIndex = keyGroupsMap.get(KeyCancerGroupsEnum.otherCancerGroupsIndex); skinCancerGroupIndex = keyGroupsMap.get(KeyCancerGroupsEnum.skinCancerGroupIndex); allCancerGroupsIndex = keyGroupsMap.get(KeyCancerGroupsEnum.allCancerGroupsIndex); allCancerGroupsButSkinIndex = keyGroupsMap.get(KeyCancerGroupsEnum.allCancerGroupsButSkinIndex); numberOfCancerGroups = cancerGroupsLocal.length; double[] countsRow; if (populations != null && populations.length > 0) { if (populations[0].getPopulationDatasetID() < 0) { countType = CountType.CASES; } else { // calculate period pop periodPop = new PopulationDataset(); periodPop.setAgeGroupStructure(populations[0].getAgeGroupStructure()); periodPop.setReferencePopulation(populations[0].getReferencePopulation()); for (PopulationDatasetsEntry pde : populations[0].getAgeGroups()) { int count = 0; for (PopulationDataset pds : populations) { count += pds.getAgeGroupCount(pde.getSex(), pde.getAgeGroup()); } periodPop.addAgeGroup(new PopulationDatasetsEntry(pde.getAgeGroup(), pde.getSex(), count)); } } } if (incidenceData != null) { String sexString, icdString, morphologyString; double countArray[][] = new double[numberOfCancerGroups][numberOfSexes]; int sex, icdIndex, numberOfCases, age; double adjustedCases; List<Integer> dontCount = new LinkedList<Integer>(); // all sites but skin? if (Arrays.asList(engineParameters).contains("noC44")) { dontCount.add(skinCancerGroupIndex); tableHeader += ", excluding C44"; } for (Object[] dataLine : incidenceData) { // Set default adjustedCases = 0.0; // Extract data sexString = (String) dataLine[SEX_COLUMN]; sex = Integer.parseInt(sexString.trim()); // sex = 3 is unknown sex if (sex > 2) { sex = 3; } morphologyString = (String) dataLine[MORPHOLOGY_COLUMN]; icdString = (String) dataLine[ICD10_COLUMN]; icdIndex = Tools.assignICDGroupIndex(keyGroupsMap, icdString, morphologyString, cancerGroupsLocal); age = (Integer) dataLine[AGE_COLUMN]; if (!dontCount.contains(icdIndex) && icdIndex != DONT_COUNT) { // Extract cases numberOfCases = (Integer) dataLine[CASES_COLUMN]; if (countType == CountType.PER_HUNDRED_THOUSAND) { adjustedCases = (100000.0 * numberOfCases) / periodPop.getAgeGroupCount(sex, periodPop.getAgeGroupIndex(age)); } else if (countType == CountType.ASR) { try { adjustedCases = 100.0 * (periodPop.getReferencePopulationForAgeGroupIndex(sex, periodPop.getAgeGroupIndex(age)) * numberOfCases) / periodPop.getAgeGroupCount(sex, periodPop.getAgeGroupIndex(age)); } catch (IncompatiblePopulationDataSetException ex) { Logger.getLogger(TopNChartTableBuilder.class.getName()).log(Level.SEVERE, null, ex); } } else if (countType == CountType.CUM64) { if (age < 65) { adjustedCases = (100000.0 * numberOfCases) / periodPop.getAgeGroupCount(sex, periodPop.getAgeGroupIndex(age)) * 5.0 / 1000.0; } } else if (countType == CountType.CUM74) { if (age < 75) { adjustedCases = (100000.0 * numberOfCases) / periodPop.getAgeGroupCount(sex, periodPop.getAgeGroupIndex(age)) * 5.0 / 1000.0; } } else { adjustedCases = numberOfCases; } if (sex <= numberOfSexes && icdIndex >= 0 && icdIndex <= cancerGroupsLocal.length) { countArray[icdIndex][sex - 1] += adjustedCases; } else { if (otherCancerGroupsIndex >= 0) { countArray[otherCancerGroupsIndex][sex - 1] += adjustedCases; } } if (allCancerGroupsIndex >= 0) { countArray[allCancerGroupsIndex][sex - 1] += adjustedCases; } if (allCancerGroupsButSkinIndex >= 0 && skinCancerGroupIndex >= 0 && icdIndex != skinCancerGroupIndex) { countArray[allCancerGroupsButSkinIndex][sex - 1] += adjustedCases; } } } // separate top 10 and the rest TreeSet<CancerCasesCount> topNMale = new TreeSet<CancerCasesCount>(new Comparator<CancerCasesCount>() { @Override public int compare(CancerCasesCount o1, CancerCasesCount o2) { if (o1.getCount().equals(o2.getCount())) { return -o1.toString().compareTo(o2.toString()); } else { return -(o1.getCount().compareTo(o2.getCount())); } } }); LinkedList<CancerCasesCount> theRestMale = new LinkedList<CancerCasesCount>(); TreeSet<CancerCasesCount> topNFemale = new TreeSet<CancerCasesCount>( new Comparator<CancerCasesCount>() { @Override public int compare(CancerCasesCount o1, CancerCasesCount o2) { if (o1.getCount().equals(o2.getCount())) { return -o1.toString().compareTo(o2.toString()); } else { return -(o1.getCount().compareTo(o2.getCount())); } } }); LinkedList<CancerCasesCount> theRestFemale = new LinkedList<CancerCasesCount>(); CancerCasesCount otherElement; CancerCasesCount thisElement; TreeSet<CancerCasesCount> topN; LinkedList<CancerCasesCount> theRest; for (int icdGroupNumber = 0; icdGroupNumber < countArray.length; icdGroupNumber++) { countsRow = countArray[icdGroupNumber]; for (int sexNumber = 0; sexNumber < 2; sexNumber++) { if (sexNumber == 0) { topN = topNMale; theRest = theRestMale; } else { topN = topNFemale; theRest = theRestFemale; } if (countsRow[sexNumber] > 0) { thisElement = new CancerCasesCount(icd10GroupDescriptions[icdGroupNumber], icdLabel[icdGroupNumber].substring(3), countsRow[sexNumber], icdGroupNumber); // if this is the "other" group - add it immediately to "the rest" if (icdGroupNumber == otherCancerGroupsIndex) { theRest.add(thisElement); // if not we check if this is one of the collection groups } else if (icdGroupNumber != allCancerGroupsButSkinIndex && icdGroupNumber != allCancerGroupsIndex) { // if it is less than N cancers in top N - add it if (topN.size() < topNLimit) { topN.add(thisElement); } else { // otherwise we need to compare it to the last element in the top 10 otherElement = topN.last(); if (thisElement.compareTo(otherElement) < 0) { topN.remove(otherElement); theRest.add(otherElement); topN.add(thisElement); } else { theRest.add(thisElement); } } } } } } for (int sexNumber : new int[] { 0, 1 }) { String fileName = reportFileName + "-" + sexLabel[sexNumber] + "." + fileType.toString(); File file = new File(fileName); TreeSet<CancerCasesCount> casesCounts; Double restCount = Tools.sumUpTheRest(theRestMale, dontCount); if (sexNumber == 0) { casesCounts = topNMale; } else { casesCounts = topNFemale; } if (useR && !fileType.equals(FileTypes.jchart) && !fileType.equals(FileTypes.csv)) { String header = "Top 10 by " + countType + ", \n" + tableHeader + ", " + TableBuilderInterface.sexLabel[sexNumber]; generatedFiles.addAll(Tools.generateRChart(casesCounts, fileName, header, fileType, chartType, includeOther, restCount, rpath, true, "Site")); } else { double allCount = countArray[allCancerGroupsIndex][sexNumber]; Color color; if (sexNumber == 0) { color = Color.BLUE; } else { color = Color.RED; } String header = "Top 10 by " + countType + ", " + tableHeader + ", " + TableBuilderInterface.sexLabel[sexNumber]; charts[sexNumber] = Tools.generateJChart(casesCounts, fileName, header, fileType, chartType, includeOther, legendOn, restCount, allCount, color, "Site"); try { generatedFiles.add(Tools.writeJChartToFile(charts[sexNumber], file, fileType)); } catch (IOException ex) { Logger.getLogger(TopNChartTableBuilder.class.getName()).log(Level.SEVERE, null, ex); } catch (DocumentException ex) { Logger.getLogger(TopNChartTableBuilder.class.getName()).log(Level.SEVERE, null, ex); } } } } return generatedFiles; }
From source file:com.cloud.deploy.DeploymentPlanningManagerImpl.java
protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools, ExcludeList avoid, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, List<Volume> readyAndReusedVolumes, List<Long> preferredHosts) { s_logger.debug(/*from w ww . j av a 2 s . co m*/ "Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM"); boolean hostCanAccessPool = false; boolean haveEnoughSpace = false; boolean hostAffinityCheck = false; if (readyAndReusedVolumes == null) { readyAndReusedVolumes = new ArrayList<Volume>(); } Map<Volume, StoragePool> storage = new HashMap<Volume, StoragePool>(); TreeSet<Volume> volumesOrderBySizeDesc = new TreeSet<Volume>(new Comparator<Volume>() { @Override public int compare(Volume v1, Volume v2) { if (v1.getSize() < v2.getSize()) return 1; else return -1; } }); volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet()); boolean multipleVolume = volumesOrderBySizeDesc.size() > 1; for (Host potentialHost : suitableHosts) { Map<StoragePool, List<Volume>> volumeAllocationMap = new HashMap<StoragePool, List<Volume>>(); for (Volume vol : volumesOrderBySizeDesc) { haveEnoughSpace = false; s_logger.debug("Checking if host: " + potentialHost.getId() + " can access any suitable storage pool for volume: " + vol.getVolumeType()); List<StoragePool> volumePoolList = suitableVolumeStoragePools.get(vol); hostCanAccessPool = false; hostAffinityCheck = checkAffinity(potentialHost, preferredHosts); for (StoragePool potentialSPool : volumePoolList) { if (hostCanAccessSPool(potentialHost, potentialSPool)) { hostCanAccessPool = true; if (multipleVolume && !readyAndReusedVolumes.contains(vol)) { List<Volume> requestVolumes = null; if (volumeAllocationMap.containsKey(potentialSPool)) requestVolumes = volumeAllocationMap.get(potentialSPool); else requestVolumes = new ArrayList<Volume>(); requestVolumes.add(vol); if (!_storageMgr.storagePoolHasEnoughIops(requestVolumes, potentialSPool) || !_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool, potentialHost.getClusterId())) continue; volumeAllocationMap.put(potentialSPool, requestVolumes); } storage.put(vol, potentialSPool); haveEnoughSpace = true; break; } } if (!hostCanAccessPool) { break; } if (!haveEnoughSpace) { s_logger.warn("insufficient capacity to allocate all volumes"); break; } if (!hostAffinityCheck) { s_logger.debug("Host affinity check failed"); break; } } if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck && checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) { s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + " and associated storage pools for this VM"); return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage); } else { avoid.addHost(potentialHost.getId()); } } s_logger.debug( "Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM"); return null; }
From source file:org.bigtextml.topics.ParallelTopicModel.java
/** Return an array (one element for each topic) of arrays of words, which * are the most probable words for that topic in descending order. These * are returned as Objects, but will probably be Strings. * * @param numWords The maximum length of each topic's array of words (may be less). *///from www .j a v a2 s . co m public Object[][] getTopWords(int numWords) { ArrayList<TreeSet<IDSorter>> topicSortedWords = getSortedWords(); Object[][] result = new Object[numTopics][]; for (int topic = 0; topic < numTopics; topic++) { TreeSet<IDSorter> sortedWords = topicSortedWords.get(topic); // How many words should we report? Some topics may have fewer than // the default number of words with non-zero weight. int limit = numWords; if (sortedWords.size() < numWords) { limit = sortedWords.size(); } result[topic] = new Object[limit]; Iterator<IDSorter> iterator = sortedWords.iterator(); for (int i = 0; i < limit; i++) { IDSorter info = iterator.next(); result[topic][i] = alphabet.lookupObject(info.getID()); } } return result; }
From source file:org.apache.fop.complexscripts.fonts.GlyphTable.java
/** * Assemble ordered array of lookup table use specifications according to the specified features and candidate lookups, * where the order of the array is in accordance to the order of the applicable lookup list. * @param features array of feature identifiers to apply * @param lookups a mapping from lookup specifications to lists of look tables from which to select lookup tables according to the specified features * @return ordered array of assembled lookup table use specifications *//* ww w. j a v a 2 s . com*/ public UseSpec[] assembleLookups(String[] features, Map/*<LookupSpec,List<LookupTable>>*/ lookups) { TreeSet/*<UseSpec>*/ uss = new TreeSet/*<UseSpec>*/(); for (int i = 0, n = features.length; i < n; i++) { String feature = features[i]; for (Iterator it = lookups.entrySet().iterator(); it.hasNext();) { Map.Entry/*<LookupSpec,List<LookupTable>>*/ e = (Map.Entry/*<LookupSpec,List<LookupTable>>*/) it .next(); LookupSpec ls = (LookupSpec) e.getKey(); if (ls.getFeature().equals(feature)) { List/*<LookupTable>*/ ltl = (List/*<LookupTable>*/) e.getValue(); if (ltl != null) { for (Iterator ltit = ltl.iterator(); ltit.hasNext();) { LookupTable lt = (LookupTable) ltit.next(); uss.add(new UseSpec(lt, feature)); } } } } } return (UseSpec[]) uss.toArray(new UseSpec[uss.size()]); }