List of usage examples for java.util SortedSet iterator
Iterator<E> iterator();
From source file:org.sakaiproject.myshowcase.tool.MyShowcaseArtefactListController.java
/** * Implementation of AbstractController.handleRequestInternal * @param HttpServletRequest request// ww w. j a v a2 s . co m * @param HttpServletResponse response * @return ModelAndView * @throws Exception */ @Override protected ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response) throws Exception { readRequest(request); Owner owner = myshowcaseService.getOwnerById(new Long(ownerId)); // Read for Artefact list List<Artefact> artefacts = new ArrayList<Artefact>(); List<Artefact> artefactList = new ArrayList<Artefact>(); // false sets the order to descending Order createdDateOrder = new Order("createdDate", false); Order[] order = {}; if ((orderBy.equals("null")) || (orderBy.equals("") || (orderBy.equals("createdDate")))) { Order[] defaultOrder = { createdDateOrder }; order = defaultOrder; } else { Order order2 = new Order(orderBy); Order[] listOrder = { order2, createdDateOrder }; order = listOrder; } // if a term search if (isTermSearch(searchTerm)) { artefactList = myshowcaseService.getArtefactsBySearchTerm(owner, order, searchTerm); } // if a tag search else if (isTagSearch(tagValue)) { Tag tag = myshowcaseService.getTag(tagValue, owner); artefactList = myshowcaseService.getArtefacts(tag); if (orderBy.equals("type")) { int index = 0; Map<String, Artefact> orderedArtefacts = new HashMap<String, Artefact>(); for (Artefact artefact : artefactList) { index++; orderedArtefacts.put(artefact.getType().getName() + index, artefact); } SortedSet<String> sortedset = new TreeSet<String>(orderedArtefacts.keySet()); Iterator<String> it = sortedset.iterator(); artefactList.clear(); String keyVal = ""; while (it.hasNext()) { keyVal = it.next(); artefactList.add(orderedArtefacts.get(keyVal)); } } } else if (!(competencyId.equals("null"))) { artefactList = myshowcaseService.getEvidenceArtefacts(owner, new Long(competencyId), new Long(mappingId)); } else { artefactList = myshowcaseService.getArtefacts(owner, order, tagValue, typeValue); } // Process Artefact List Artefact jsonArtefact = null; ArtefactDetail jsonArtefactDetail = null; ArtefactType jsonArtefactType = null; for (Artefact artefact : artefactList) { jsonArtefactType = new ArtefactType(); jsonArtefactDetail = new ArtefactDetail(); jsonArtefact = artefact.getDeepCopy(); jsonArtefact.setArtefactId(artefact.getArtefactId()); jsonArtefact.setDescription(artefact.getDescription()); jsonArtefact.setName(artefact.getName()); jsonArtefact.setCreatedDate(artefact.getCreatedDate()); jsonArtefact.setCreatedDateTime(artefact.getCreatedDate()); jsonArtefactType.setName(artefact.getType().getName()); jsonArtefact.setType(jsonArtefactType); jsonArtefactDetail.setFileName(artefact.getArtefactDetail().getFileName()); jsonArtefactDetail.setFilePath(artefact.getArtefactDetail().getFilePath()); jsonArtefactDetail.setFileType(artefact.getArtefactDetail().getFileType()); jsonArtefactDetail.setUrl(artefact.getArtefactDetail().getUrl()); jsonArtefactDetail.setDetail(artefact.getArtefactDetail().getDetail()); jsonArtefactDetail.setFlickrUserName(artefact.getArtefactDetail().getFlickrUserName()); jsonArtefactDetail.setTwitterUserName(artefact.getArtefactDetail().getTwitterUserName()); jsonArtefact.setArtefactDetail(jsonArtefactDetail); artefacts.add(jsonArtefact); } response.setContentType("application/json"); response.setCharacterEncoding("UTF-8"); PrintWriter out = response.getWriter(); out.write(new Gson().toJson(artefacts)); out.flush(); out.close(); return null; }
From source file:gov.nih.nci.caarray.upgrade.FixIlluminaGenotypingCsvDesignProbeNamesMigrator.java
private void renameProbesUsingReparsedProbeNames(ArrayDesign originalArrayDesign, ArrayDesign reparsedArrayDesign) { final SortedSet<PhysicalProbe> originalProbes = getSortedProbeList(originalArrayDesign); final SortedSet<PhysicalProbe> reparsedProbes = getSortedProbeList(reparsedArrayDesign); if (originalProbes.size() != reparsedProbes.size()) { throw new IllegalStateException("probe set sizes differ"); }/*from ww w. j av a2 s . c o m*/ final Iterator<PhysicalProbe> reparsedProbeIterator = reparsedProbes.iterator(); for (final PhysicalProbe originalProbe : originalProbes) { final String reparsedName = reparsedProbeIterator.next().getName(); originalProbe.setName(reparsedName); } }
From source file:org.apache.hadoop.hbase.regionserver.AbstractMemStore.java
/** * Inserts the specified Cell into MemStore and deletes any existing * versions of the same row/family/qualifier as the specified Cell. * <p>/*from ww w . j a va2 s .c om*/ * First, the specified Cell is inserted into the Memstore. * <p> * If there are any existing Cell in this MemStore with the same row, * family, and qualifier, they are removed. * <p> * Callers must hold the read lock. * * @param cell the cell to be updated * @param readpoint readpoint below which we can safely remove duplicate KVs * @return change in size of MemStore */ private long upsert(Cell cell, long readpoint) { // Add the Cell to the MemStore // Use the internalAdd method here since we (a) already have a lock // and (b) cannot safely use the MSLAB here without potentially // hitting OOME - see TestMemStore.testUpsertMSLAB for a // test that triggers the pathological case if we don't avoid MSLAB // here. // This cell data is backed by the same byte[] where we read request in RPC(See HBASE-15180). We // must do below deep copy. Or else we will keep referring to the bigger chunk of memory and // prevent it from getting GCed. cell = deepCopyIfNeeded(cell); long addedSize = internalAdd(cell, false); // Get the Cells for the row/family/qualifier regardless of timestamp. // For this case we want to clean up any other puts Cell firstCell = KeyValueUtil.createFirstOnRow(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); SortedSet<Cell> ss = active.tailSet(firstCell); Iterator<Cell> it = ss.iterator(); // versions visible to oldest scanner int versionsVisible = 0; while (it.hasNext()) { Cell cur = it.next(); if (cell == cur) { // ignore the one just put in continue; } // check that this is the row and column we are interested in, otherwise bail if (CellUtil.matchingRow(cell, cur) && CellUtil.matchingQualifier(cell, cur)) { // only remove Puts that concurrent scanners cannot possibly see if (cur.getTypeByte() == KeyValue.Type.Put.getCode() && cur.getSequenceId() <= readpoint) { if (versionsVisible >= 1) { // if we get here we have seen at least one version visible to the oldest scanner, // which means we can prove that no scanner will see this version // false means there was a change, so give us the size. long delta = heapSizeChange(cur, true); addedSize -= delta; active.incSize(-delta); it.remove(); setOldestEditTimeToNow(); } else { versionsVisible++; } } } else { // past the row or column, done break; } } return addedSize; }
From source file:org.mousephenotype.dcc.crawler.Downloader.java
private ZipDownload attemptDownload(SortedSet<FileSourceHasZip> f) { ZipDownload zd = null;/*w ww.j ava 2 s . c o m*/ Iterator<FileSourceHasZip> fi = f.iterator(); // try to download from all file source servers hosting the file while (fi.hasNext()) { FileSourceHasZip fileSourceHasZip = fi.next(); if (prepareDownload(fileSourceHasZip)) { int remainingAttempts = maxRetries; while (remainingAttempts-- > 0) { zd = download(fileSourceHasZip); if (zd != null) { break; // download completed: avoid re-downloading } } if (zd != null) { break; // download completed: don't try remaining sources } } else { logger.warn("Could not prepare connection for downloading '{}' from '{}'", fileSourceHasZip.getZaId().getZipId().getFileName(), fileSourceHasZip.getFileSourceId().getHostname()); } } return zd; }
From source file:de.fischer.thotti.reportgen.diagram.ChartGenerator.java
public ChartMetaData generateSingleVariantsChart(final String testId, String jvmArgsId, String paramGrpId) { String variantId = String.format("%s-%s-%s", testId, jvmArgsId, paramGrpId); File chartFile;/*from w w w. ja va2 s . c o m*/ try { final TimeSeriesCollection collection = new TimeSeriesCollection(); String chartTitle = String.format("%s-%s-%s", testId, jvmArgsId, paramGrpId); String svgFilename = String.format("%s-%s-%s.svg", testId, jvmArgsId, paramGrpId); chartFile = new File(baseDir, svgFilename); TimeSeries series = new TimeSeries(String.format("Average of %s", variantId), Day.class); TimeSeries mediaSeries = new TimeSeries(String.format("Median of %s", variantId), Day.class); List<NDResultEntity> results = persistenceHelper.findAllResultsForVariant(testId, jvmArgsId, paramGrpId); SortedSet<NDResultEntity> sortedSet = new TreeSet<NDResultEntity>( new TestVariantModel.DateComparator()); sortedSet.addAll(results); Iterator<Measurement> itr = new AverageDayCombinerIterator(sortedSet.iterator()); while (itr.hasNext()) { Measurement singleResult = itr.next(); Date startDate = singleResult.getPointInTime(); double time = singleResult.getDuration(); double t2 = convertMilliSecsToSeconds(time); series.add(new Day(startDate), t2); } collection.addSeries(series); Iterator<DatePoint> medianItr = new MedianIterator(sortedSet.iterator()); while (medianItr.hasNext()) { DatePoint singleResult = medianItr.next(); Date startDate = singleResult.getPointInTime(); double value = convertMilliSecsToSeconds(singleResult.getValue()); mediaSeries.add(new Day(startDate), value); } collection.addSeries(mediaSeries); final JFreeChart chart = createChart(chartTitle, collection); saveChartAsSVG(chart, svgFilename); System.out.println(String.format("Written %s", chartFile.toString())); return new ChartMetaData().withFilename(chartFile.getName()).withWidth(DEFAULT_CHAR_WIDTH) .withHeight(DEFAULT_CHAR_HEIGHT).withFormat("SVG"); } catch (IOException ioe) { // @todo Throw an better exception! ioe.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. } return null; }
From source file:com.appeligo.showfiles.FilesByTime.java
/** * @param request/* w w w .j a v a 2s .c o m*/ * @param out * @param path */ private void listFiles(HttpServletRequest request, PrintWriter out, String path, int limit) { header(out); Comparator<File> comparator = new Comparator<File>() { public int compare(File leftFile, File rightFile) { long leftMod = leftFile.lastModified(); long rightMod = rightFile.lastModified(); if (leftMod < rightMod) { return -1; } else if (leftMod > rightMod) { return 1; } else { return leftFile.getPath().compareTo(rightFile.getPath()); } } }; SortedSet<File> fileSet = new TreeSet<File>(comparator); addFile(fileSet, new File(path)); log.info("Total files in tree is " + fileSet.size()); if (limit > 0 && fileSet.size() > limit) { log.info("Trimming tree to limit " + limit); Iterator<File> iter = fileSet.iterator(); int toDrop = fileSet.size() - limit; for (int i = 0; i < toDrop; i++) { iter.next(); } File first = iter.next(); fileSet = fileSet.tailSet(first); } int suggestedLimit = 1000; if (limit == 0 && fileSet.size() > suggestedLimit) { out.println("That's a lot of files! There are " + fileSet.size() + " files to return.<br/>"); out.println("How about just the <a href=\"" + request.getRequestURI() + "?" + suggestedLimit + "\">last " + suggestedLimit + "</a>.<br/>"); out.println("If you really want them all, <a href=\"" + request.getRequestURI() + "?" + (fileSet.size() + suggestedLimit) + "\">click here</a>.<br/>"); } else { DateFormat dateFormat = SimpleDateFormat.getDateInstance(); DateFormat timeFormat = SimpleDateFormat.getTimeInstance(); Calendar lastDay = Calendar.getInstance(); Calendar day = Calendar.getInstance(); boolean first = true; for (File file : fileSet) { Date fileDate = new Date(file.lastModified()); day.setTime(fileDate); if (first || lastDay.get(Calendar.DAY_OF_YEAR) != day.get(Calendar.DAY_OF_YEAR)) { out.print("<b>" + dateFormat.format(fileDate) + "</b><br/>"); } String servlet = "/ShowFile"; if (file.getPath().endsWith(".flv")) { servlet = "/ShowFlv"; } out.print(timeFormat.format(fileDate) + " <a href=\"" + request.getContextPath() + servlet + file.getPath().substring(documentRoot.length()) + "\">" + file.getPath() + "</a>"); out.println("<br/>"); lastDay.setTime(fileDate); first = false; } } footer(out); }
From source file:de.dfki.owlsmx.gui.ResultVisualization.java
private ArrayList getURIasStringFromURISet(SortedSet services) { ArrayList result = new ArrayList(); String uri;/*from ww w .j a v a 2 s .c om*/ for (Iterator iter = services.iterator(); iter.hasNext();) { uri = ((URI) iter.next()).toString(); // System.err.println("Relevant " + uri); result.add(getFileName(uri.toString())); } return result; }
From source file:org.bibsonomy.recommender.tags.meta.TagsFromFirstWeightedBySecondTagRecommender.java
protected int doThirdRound(final Collection<RecommendedTag> recommendedTags, final SortedSet<RecommendedTag> thirdRecommendedTags, final double minScore, final int ctr) { /*/*from www. j av a 2 s . c o m*/ * Third round: * If we have not enough tags, yet, add tags from third recommender until set is complete. */ int myCtr = ctr; final Iterator<RecommendedTag> iterator3 = thirdRecommendedTags.iterator(); while (recommendedTags.size() < numberOfTagsToRecommend && iterator3.hasNext()) { final RecommendedTag recommendedTag = iterator3.next(); if (!recommendedTags.contains(recommendedTag)) { /* * tag has not already been added -> set its score lower than min */ myCtr++; recommendedTag.setScore(getLowerScore(minScore, myCtr)); recommendedTags.add(recommendedTag); } } return myCtr; }
From source file:de.dfki.owlsmx.gui.ResultVisualization.java
private ArrayList getURIasStringFromServiceSet(SortedSet services) { ArrayList result = new ArrayList(); HybridServiceItem item;/* w w w . j a va 2s .c o m*/ for (Iterator iter = services.iterator(); iter.hasNext();) { item = (HybridServiceItem) iter.next(); // ErrorLog.instanceOf().report(item.getDegreeOfMatch() + " " + getFileName(item.getURI().toString())); // System.err.println("Matchmaker " + (item).getURI()); result.add(getFileName(item.getURI().toString())); } return result; }
From source file:org.bibsonomy.recommender.tags.meta.TagsFromFirstWeightedBySecondTagRecommender.java
protected int doSecondRound(final Collection<RecommendedTag> recommendedTags, final SortedSet<RecommendedTag> firstRecommendedTags, final MapBackedSet<String, RecommendedTag> secondRecommendedTags, final double minScore) { /*//from ww w . j av a2s . c o m * Second round: * add remaining tags from first recommender, scored lower than the tags before */ final Iterator<RecommendedTag> iterator2 = firstRecommendedTags.iterator(); int ctr = 0; while (recommendedTags.size() < numberOfTagsToRecommend && iterator2.hasNext()) { final RecommendedTag recommendedTag = iterator2.next(); ctr++; recommendedTag.setScore(getLowerScore(minScore, ctr)); recommendedTags.add(recommendedTag); } return ctr; }