List of usage examples for java.util TreeSet last
public E last()
From source file:darks.learning.word2vec.Word2Vec.java
/** * Calculate specify word's nearest or relate words * //from ww w . j av a 2s . c om * @param word Specify word * @param topCount Result size * @return Nearest or relate words */ public Set<WordEntry> distance(String word, int topCount) { int resultSize = FastMath.min(topCount, wordNodes.size()); TreeSet<WordEntry> result = new TreeSet<WordEntry>(); WordNode node = wordNodes.get(word); if (node != null) { double minSim = Double.MIN_VALUE; for (WordNode target : wordNodes.values()) { if (target.name.equals(word)) { continue; } double sim = target.feature.dot(node.feature); if (sim > minSim) { result.add(new WordEntry(target.name, sim)); if (result.size() > resultSize) { result.pollLast(); } minSim = result.last().similar; } } } return result; }
From source file:bes.injector.InjectorBurnTest.java
private void testPromptnessOfExecution(long intervalNanos, float loadIncrement) throws InterruptedException, ExecutionException, TimeoutException { final int executorCount = 4; int threadCount = 8; int maxQueued = 1024; final WeibullDistribution workTime = new WeibullDistribution(3, 200000); final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1); final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1); final int[] threadCounts = new int[executorCount]; final WeibullDistribution[] workCount = new WeibullDistribution[executorCount]; final ExecutorService[] executors = new ExecutorService[executorCount]; final Injector injector = new Injector(""); for (int i = 0; i < executors.length; i++) { executors[i] = injector.newExecutor(threadCount, maxQueued); threadCounts[i] = threadCount;//from w ww . j a v a2 s . co m workCount[i] = new WeibullDistribution(2, maxQueued); threadCount *= 2; maxQueued *= 2; } long runs = 0; long events = 0; final TreeSet<Batch> pending = new TreeSet<Batch>(); final BitSet executorsWithWork = new BitSet(executorCount); long until = 0; // basic idea is to go through different levels of load on the executor service; initially is all small batches // (mostly within max queue size) of very short operations, moving to progressively larger batches // (beyond max queued size), and longer operations for (float multiplier = 0f; multiplier < 2.01f;) { if (System.nanoTime() > until) { System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f, events * 0.000001f)); events = 0; until = System.nanoTime() + intervalNanos; multiplier += loadIncrement; System.out.println(String.format("Running for %ds with load multiplier %.1f", TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier)); } // wait a random amount of time so we submit new tasks in various stages of long timeout; if (pending.isEmpty()) timeout = 0; else if (Math.random() > 0.98) timeout = Long.MAX_VALUE; else if (pending.size() == executorCount) timeout = pending.first().timeout; else timeout = (long) (Math.random() * pending.last().timeout); while (!pending.isEmpty() && timeout > System.nanoTime()) { Batch first = pending.first(); boolean complete = false; try { for (Result result : first.results.descendingSet()) result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS); complete = true; } catch (TimeoutException e) { } if (!complete && System.nanoTime() > first.timeout) { for (Result result : first.results) if (!result.future.isDone()) throw new AssertionError(); complete = true; } if (complete) { pending.pollFirst(); executorsWithWork.clear(first.executorIndex); } } // if we've emptied the executors, give all our threads an opportunity to spin down if (timeout == Long.MAX_VALUE) { try { Thread.sleep(10); } catch (InterruptedException e) { } } // submit a random batch to the first free executor service int executorIndex = executorsWithWork.nextClearBit(0); if (executorIndex >= executorCount) continue; executorsWithWork.set(executorIndex); ExecutorService executor = executors[executorIndex]; TreeSet<Result> results = new TreeSet<Result>(); int count = (int) (workCount[executorIndex].sample() * multiplier); long targetTotalElapsed = 0; long start = System.nanoTime(); long baseTime; if (Math.random() > 0.5) baseTime = 2 * (long) (workTime.sample() * multiplier); else baseTime = 0; for (int j = 0; j < count; j++) { long time; if (baseTime == 0) time = (long) (workTime.sample() * multiplier); else time = (long) (baseTime * Math.random()); if (time < minWorkTime) time = minWorkTime; if (time > maxWorkTime) time = maxWorkTime; targetTotalElapsed += time; Future<?> future = executor.submit(new WaitTask(time)); results.add(new Result(future, System.nanoTime() + time)); } long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex]) + TimeUnit.MILLISECONDS.toNanos(100L); long now = System.nanoTime(); if (runs++ > executorCount && now > end) throw new AssertionError(); events += results.size(); pending.add(new Batch(results, end, executorIndex)); // System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start))); } }
From source file:fr.gouv.finances.dgfip.xemelios.importers.archives.ArchiveImporter.java
public Errors doImport() { Errors errors = new Errors(); try {// w w w . ja v a2s .com ZipFile zipArchive = new ZipFile(fileToImport); ZipEntry manifesteEntry = zipArchive.getEntry(MANIFESTE_FILE_NAME); archiveManifeste = getManisfesteFromArchive(zipArchive.getInputStream(manifesteEntry)); archiveManifeste.getRootElement() .addAttribute(new Attribute("archive-name", getArchiveName(fileToImport))); zipArchive.close(); HashMap<String, Object> importProperties = extractPropertiesFromArchiveManifeste(archiveManifeste); for (String docType : (String[]) importProperties.get("archiveDocumentTypes")) { if (!docType.equals("PJ") && !DataLayerManager.getImplementation().canImportDocument(docType, getUser())) { errors.addError(Errors.SEVERITY_WARNING, "Impossible d'importer ce type de document (" + docType + "), la base de donne doit d'abord tre mise jour."); } } importedArchiveManifeste = DataLayerManager.getImplementation() .getManifesteFromArchive(importProperties.get("archiveName").toString(), getUser()); definePropertiesFromImportedManifeste(importedArchiveManifeste, importProperties); Element historique = null; if (importedArchiveManifeste != null) { historique = (Element) importedArchiveManifeste .query("/m:manifeste/m:evenements", getNamespaceCtx()).get(0); // pour avoir un lment sans parent et pouvoir l'ajouter o on veut historique = new Element(historique); } else { historique = new Element("evenements"); } archiveManifeste.getRootElement().appendChild(historique); boolean sectionApplied = false; for (SectionModel section : rules.getSections()) { SectionModel theSection = section; if (theSection.getPredicat().matches(importProperties)) { logger.info("Application de la rgle " + theSection.getName()); int globalOverwriteRule = OVERWRITE_RULE_UNSET; if (theSection.getActions().isUsePreviousSection()) { // alors il y a forcment un manifeste import, et on va aller chercher sa section Element sectionElement = (Element) importedArchiveManifeste .query("/m:manifeste/rul:section", getNamespaceCtx()).get(0); if (sectionElement == null) throw new ImportException(new Errors.Error(Errors.SEVERITY_ERROR, "la section " + theSection.getName() + " impose l'application de la section du prcdente import, mais celui-ci n'a pas t trouv."), null); theSection = new SectionModel(sectionElement); // et on supprime toutes les donnes de l'archive HashMap<String, DocumentModel> docsToDrop = new HashMap<String, DocumentModel>(); for (String docId : (String[]) importProperties.get("archiveImportedDocumentTypes")) { docsToDrop.put(docId, documentsModel.getDocumentById(docId)); } DataLayerManager.getImplementation().removeArchive(docsToDrop, importProperties.get("archiveName").toString(), getUser()); Nodes deleteActions = importedArchiveManifeste.query("/m:manifeste/m:on-delete/m:action", getNamespaceCtx()); for (int i = 0; i < deleteActions.size(); i++) { Element action = (Element) deleteActions.get(i); doApplyAction(action); } // a ce stade, aucune mise jour faire dans le manifeste, tous les documents sont supprims } else { if (importedArchiveManifeste != null) { // il faut reprendre l'historique de chacun des documents Nodes importedDocuments = importedArchiveManifeste.query("//m:document", getNamespaceCtx()); for (int i = 0; i < importedDocuments.size(); i++) { Element importedDoc = (Element) importedDocuments.get(i); Element thisDoc = getElement( archiveManifeste.query("/manifeste/documents/document[@path='" + importedDoc.getAttributeValue("path") + "']")); if (thisDoc != null) { String __imported = importedDoc.getAttributeValue("imported"); thisDoc.addAttribute(new Attribute("imported", __imported)); if ("Oui".equals(__imported)) { Element result = getElement( importedDoc.query("m:resultatimport", getNamespaceCtx())); if (result != null) thisDoc.appendChild(new Element(result)); } } } } } if (theSection.getOverwriteRule() != null) { if (EtatImporteur.OVERWRITE_RULE_ALWAYS.equals(theSection.getOverwriteRule())) globalOverwriteRule = OVERWRITE_RULE_OVERWRITE; else if (EtatImporteur.OVERWRITE_RULE_NEVER.equals(theSection.getOverwriteRule())) globalOverwriteRule = OVERWRITE_RULE_SKIP; } // on rcupre la liste des documents importer pour pouvoir contrler qu'ils sont bien disponibles List<Element> documentsToImport = theSection.getDocumentsToImport(archiveManifeste, applicationProperties); if (documentsToImport.size() > 0) { TreeSet<String> volumesRequired = new TreeSet<String>(); for (Element filePlannedToImport : documentsToImport) { volumesRequired.add(filePlannedToImport.getAttributeValue("volume")); } int maxVolumes = Integer.parseInt(volumesRequired.last()); File[] volumes = new File[maxVolumes + 1]; for (String volume : volumesRequired) { String fichierVolume = archiveManifeste .query("/manifeste/volumes/volume[@num=" + volume + "]/@fichier").get(0) .getValue(); File f = new File(fileToImport.getParentFile(), fichierVolume); if (!f.exists()) { errors.addError(Errors.SEVERITY_ERROR, f.getAbsolutePath() + " non trouv"); } volumes[Integer.parseInt(volume)] = f; } if (!errors.containsError()) { logger.info("displayProgress(" + (documentsToImport.size() + 1) + ")"); // ici, on ouvre une porte pour permettre de faire des modifs dans l'ArchiveImporteur preImport(theSection, archiveManifeste); importServiceProvider.displayProgress(documentsToImport.size() + 1); boolean doImport = false; boolean doDelete = false; // on traite les actions for (XomDefinitionable dd : theSection.getActions().getActions()) { if (dd instanceof ImportModel) { //ImportModel im = (ImportModel)dd; // on a dj dtermin la liste des fichiers importer, donc on les importe for (Element documentToImport : documentsToImport) { int vol = Integer.parseInt(documentToImport.getAttributeValue("volume")); try { FileInfo fileInfo = doImportDocument(documentToImport, volumes[vol], importProperties, globalOverwriteRule); if (fileInfo.getInProcessException() != null) errors.addError(Errors.SEVERITY_ERROR, fileInfo.getInProcessException().getMessage()); if (__fileInfo == null) __fileInfo = fileInfo; else __fileInfo.merge(fileInfo); if (fileInfo.getGlobalCount() == 0) { // rien n'a t import, probablement parce que overwrite rule disait // qu'il ne fallait pas importer. Donc on ne modifie rien. } else { Element result = new Element("resultatimport"); result.addAttribute(new Attribute("Duree", DateUtils.durationToString(fileInfo.getDurationImport()))); result.addAttribute(new Attribute("Debut", DateUtils .formatXsDateTime(new Date(fileInfo.getDebutImport())))); result.addAttribute(new Attribute("Fin", DateUtils .formatXsDateTime(new Date(fileInfo.getFinImport())))); // on supprime le prcdent rsultat d'import, si il existait... Nodes previousResults = documentToImport.query( "resultatimport | m:resultatimport", getNamespaceCtx()); for (int i = previousResults.size() - 1; i >= 0; i--) { Element __res = (Element) previousResults.get(i); documentToImport.removeChild(__res); } documentToImport.insertChild(result, 0); documentToImport.addAttribute(new Attribute("imported", "Oui")); } // on applique les ventuelles actions portant sur ce document Nodes actions = archiveManifeste.query("/manifeste/action[@depends-on='" + documentToImport.getAttributeValue("path") + "']"); for (int i = 0; i < actions.size(); i++) { Element action = (Element) actions.get(i); try { FileInfo actFileInfo = doApplyAction(action); if (__fileInfo == null) __fileInfo = actFileInfo; else __fileInfo.merge(actFileInfo); } catch (Exception ex) { logger.error("while applying " + action.toXML(), ex); } } } catch (Exception ex) { logger.error( "while importing " + documentToImport.getAttributeValue("path"), ex); documentToImport.addAttribute( new Attribute("imported", "Erreur: " + ex.getMessage())); } } doImport = true; } else if (dd instanceof DeleteModel) { importServiceProvider.startLongWait(); DeleteModel dm = (DeleteModel) dd; if (dm.getArchive() != null) { String archiveName = null; if ("archiveName".equals(dm.getArchive())) { // on remplace par le nom de l'archive archiveName = importProperties.get("archiveName").toString(); // pour le moment, on n'autorise pas la suppression d'une autre archive HashMap<String, DocumentModel> map = new HashMap<String, DocumentModel>(); for (String s : (String[]) importProperties .get("archiveDocumentTypes")) { map.put(s, documentsModel.getDocumentById(s)); } DataLayerManager.getImplementation().removeArchive(map, archiveName, getUser()); Nodes documents = archiveManifeste .query("/manifeste/documents/document"); for (int i = 0; i < documents.size(); i++) { Element doc = (Element) documents.get(i); Element resultImport = getElement(doc.query( "m:resultatimport | resultatimport", getNamespaceCtx())); if (resultImport != null) doc.removeChild(resultImport); doc.addAttribute(new Attribute("imported", "Non")); } // on applique toutes les actions, puisqu'on a supprim tous les documents Nodes actions = archiveManifeste .query("/manifeste/on-delete/action[@depends-on]"); for (int i = 0; i < actions.size(); i++) { Element action = (Element) actions.get(i); try { FileInfo fileInfo = doApplyAction(action); if (__fileInfo == null) __fileInfo = fileInfo; else __fileInfo.merge(fileInfo); } catch (Exception ex) { logger.error("while applying " + action.toXML(), ex); } } } else if (dm.getTypeDoc() != null) { if (dm.getCollectivite() != null) { if (dm.getBudget() != null) { if (dm.getFileName() != null) { DataLayerManager.getImplementation().removeDocument( documentsModel.getDocumentById(dm.getTypeDoc()), new Pair(dm.getCollectivite(), dm.getCollectivite()), new Pair(dm.getBudget(), dm.getBudget()), dm.getFileName(), user); Nodes documents = archiveManifeste .query("/manifeste/documents/document[@type='" + dm.getTypeDoc() + "' and @buIdCol='" + dm.getCollectivite() + "' and @buCode='" + dm.getBudget() + "' and ends-with(@path,'" + dm.getFileName() + "')]"); for (int i = 0; i < documents.size(); i++) { Element doc = (Element) documents.get(i); Element resultImport = getElement( doc.query("m:resultatimport | resultatimport", getNamespaceCtx())); if (resultImport != null) doc.removeChild(resultImport); doc.addAttribute(new Attribute("imported", "Non")); } // on applique les actions du document Nodes actions = archiveManifeste.query( "/manifeste/on-delete/action[ends-with(@depends-on,'" + dm.getFileName() + "')]"); for (int i = 0; i < actions.size(); i++) { Element action = (Element) actions.get(i); try { FileInfo fileInfo = doApplyAction(action); if (__fileInfo == null) __fileInfo = fileInfo; else __fileInfo.merge(fileInfo); } catch (Exception ex) { logger.error("while applying " + action.toXML(), ex); } } } else { DataLayerManager.getImplementation().removeBudget( documentsModel.getDocumentById(dm.getTypeDoc()), new Pair(dm.getCollectivite(), dm.getCollectivite()), new Pair(dm.getBudget(), dm.getBudget()), user); Nodes documents = archiveManifeste .query("/manifeste/documents/document[@type='" + dm.getTypeDoc() + "' and @buIdCol='" + dm.getCollectivite() + "' and @buCode='" + dm.getBudget() + "']"); for (int i = 0; i < documents.size(); i++) { Element doc = (Element) documents.get(i); Element resultImport = getElement( doc.query("m:resultatimport | resultatimport", getNamespaceCtx())); if (resultImport != null) doc.removeChild(resultImport); doc.addAttribute(new Attribute("imported", "Non")); // on applique les actions du document Nodes actions = archiveManifeste.query( "/manifeste/on-delete/action[@depends-on='" + doc.getAttributeValue("path") + "']"); for (int a = 0; a < actions.size(); a++) { Element action = (Element) actions.get(a); try { FileInfo fileInfo = doApplyAction(action); if (__fileInfo == null) __fileInfo = fileInfo; else __fileInfo.merge(fileInfo); } catch (Exception ex) { logger.error("while applying " + action.toXML(), ex); } } } } } else { DataLayerManager.getImplementation().removeCollectivite( documentsModel.getDocumentById(dm.getTypeDoc()), new Pair(dm.getCollectivite(), dm.getCollectivite()), user); Nodes documents = archiveManifeste .query("/manifeste/documents/document[@type='" + dm.getTypeDoc() + "' and @buIdCol='" + dm.getCollectivite() + "']"); for (int i = 0; i < documents.size(); i++) { Element doc = (Element) documents.get(i); Element resultImport = getElement( doc.query("m:resultatimport | resultatimport", getNamespaceCtx())); if (resultImport != null) doc.removeChild(resultImport); doc.addAttribute(new Attribute("imported", "Non")); // on applique les actions du document Nodes actions = archiveManifeste .query("/manifeste/on-delete/action[@depends-on='" + doc.getAttributeValue("path") + "']"); for (int a = 0; a < actions.size(); a++) { Element action = (Element) actions.get(a); try { FileInfo fileInfo = doApplyAction(action); if (__fileInfo == null) __fileInfo = fileInfo; else __fileInfo.merge(fileInfo); } catch (Exception ex) { logger.error("while applying " + action.toXML(), ex); } } } } } else { DataLayerManager.getImplementation().removeDocumentModel( documentsModel.getDocumentById(dm.getTypeDoc()), user); Nodes documents = archiveManifeste .query("/manifeste/documents/document[@type='" + dm.getTypeDoc() + "']"); for (int i = 0; i < documents.size(); i++) { Element doc = (Element) documents.get(i); Element resultImport = getElement( doc.query("m:resultatimport | resultatimport", getNamespaceCtx())); if (resultImport != null) doc.removeChild(resultImport); doc.addAttribute(new Attribute("imported", "Non")); // on applique les actions du document Nodes actions = archiveManifeste .query("/manifeste/on-delete/action[@depends-on='" + doc.getAttributeValue("path") + "']"); for (int a = 0; a < actions.size(); a++) { Element action = (Element) actions.get(a); try { FileInfo fileInfo = doApplyAction(action); if (__fileInfo == null) __fileInfo = fileInfo; else __fileInfo.merge(fileInfo); } catch (Exception ex) { logger.error("while applying " + action.toXML(), ex); } } } } } } doDelete = true; } importServiceProvider.endLongWait(); } if (doImport) { // Pour compatibilit avec les archives avant 2011, on traite toutes les actions qui ne sont pas on-delete et qui n'ont pas de depends-on Nodes actions = archiveManifeste.query("/manifeste/action[not(@depends-on)]"); for (int i = 0; i < actions.size(); i++) { Element action = (Element) actions.get(i); try { FileInfo fileInfo = doApplyAction(action); if (__fileInfo == null) __fileInfo = fileInfo; else __fileInfo.merge(fileInfo); } catch (Exception ex) { logger.error("while applying " + action.toXML(), ex); } } } if (doImport) { // Pour les patchs edmn on applique les actions Nodes actions = archiveManifeste.query("/manifeste/actions/action"); for (int i = 0; i < actions.size(); i++) { Element action = (Element) actions.get(i); try { FileInfo fileInfo = doApplyAction(action); if (__fileInfo == null) __fileInfo = fileInfo; else __fileInfo.merge(fileInfo); } catch (Exception ex) { logger.error("while applying " + action.toXML(), ex); } } } if (doDelete) { // Pour compatibilit avec les archives avant 2011, on traite toutes les actions qui ne sont pas on-delete et qui n'ont pas de depends-on Nodes actions = archiveManifeste .query("/manifeste/on-delete/action[not(@depends-on)]"); for (int i = 0; i < actions.size(); i++) { Element action = (Element) actions.get(i); try { FileInfo fileInfo = doApplyAction(action); if (__fileInfo == null) __fileInfo = fileInfo; else __fileInfo.merge(fileInfo); } catch (Exception ex) { logger.error("while applying " + action.toXML(), ex); } } } } // dfinir ici si il y a des donnes ou non if (archiveManifeste.query("/manifeste/documents/document[@imported='Oui']").size() > 0) archiveManifeste.getRootElement() .addAttribute(new Attribute("added:archive", Constants.ADDED_NS_URI, "Oui")); else archiveManifeste.getRootElement() .addAttribute(new Attribute("added:archive", Constants.ADDED_NS_URI, "Non")); // on ajoute les actions que l'on a pratiqu dans le manifeste, // pour savoir quoi refaire plus tard... archiveManifeste.getRootElement().appendChild(theSection.getXomDefinition()); sectionApplied = true; break; } else { // il n'y avait rien importer, mais la section a quand mme t importe sectionApplied = true; break; } } } if (sectionApplied) { // on a trait toutes les sections, peut-tre il ne s'est rien pass... boolean somethingHasBeenImported = false; Nodes nodes = archiveManifeste.query("//document[@imported]"); somethingHasBeenImported = nodes.size() > 0; // on recherche tous les documents qui n'ont pas t traits, et on positionne un @imported='false' nodes = archiveManifeste.query("//document[not(@imported)]"); for (int i = 0; i < nodes.size(); i++) { Element el = (Element) nodes.get(i); el.addAttribute(new Attribute("imported", "Non")); } archiveManifeste.getRootElement() .addAttribute(new Attribute("imported", Boolean.toString(somethingHasBeenImported))); Element result = new Element("resultatimport"); if (result != null) { // on sait jamais... en cas de suppression par exemple... if (__fileInfo != null) { result.addAttribute( new Attribute("Duree", DateUtils.durationToString(__fileInfo.getDurationImport()))); result.addAttribute(new Attribute("Debut", DateUtils.formatXsDateTime(new Date(__fileInfo.getDebutImport())))); result.addAttribute(new Attribute("Fin", DateUtils.formatXsDateTime(new Date(__fileInfo.getFinImport())))); result.addAttribute(new Attribute("User", getUser().getId())); result.addAttribute(new Attribute("LastModify", DateUtils.formatXsDateTime(new Date(__fileInfo.getLastModify())))); result.appendChild(__fileInfo.toXomXml(documentsModel)); } } archiveManifeste.getRootElement().appendChild(result); // l'historique des imports Element event = new Element("evenement"); event.addAttribute(new Attribute("date", DateUtils.formatXsDateTime(new Date()))); event.addAttribute(new Attribute("user", getUser().getId())); event.addAttribute(new Attribute("section", archiveManifeste .query("/manifeste/rul:section/@name", getNamespaceCtx()).get(0).getValue())); String version = archiveManifeste.getRootElement().getAttributeValue("version"); if (version == null || version.length() == 0) version = "1"; event.addAttribute(new Attribute("version-archive", version)); historique.insertChild(event, 0); doImportManifeste(archiveManifeste, importProperties.get("archiveName").toString()); DataLayerManager.getImplementation() .declareArchiveImported(importProperties.get("archiveName").toString(), user); // System.out.println(archiveManifeste.toXML()); } else { errors.addError(Errors.SEVERITY_WARNING, "Cette archive ne peut tre importe par aucune des rgles de cette configuration."); } } catch (XPathExpressionException ex) { logger.error(ex.getMessage(), ex); errors.addError(Errors.SEVERITY_ERROR, ex.getMessage()); } catch (IOException ioEx) { logger.error(fileToImport.getAbsolutePath() + ": " + ioEx.getMessage(), ioEx); errors.addError(Errors.SEVERITY_ERROR, ioEx.getMessage()); } catch (ImportException iEx) { Errors.Error error = iEx.error; errors.addError(error); } catch (DataConfigurationException dcEx) { logger.error(dcEx.getMessage(), dcEx); errors.addError(Errors.SEVERITY_ERROR, dcEx.getMessage()); } catch (DataAccessException daEx) { logger.error(daEx.getMessage(), daEx); errors.addError(Errors.SEVERITY_ERROR, daEx.getMessage()); } catch (UnauthorizedException ex) { logger.error(ex.getMessage(), ex); errors.addError(Errors.SEVERITY_ERROR, ex.getMessage()); } catch (Throwable t) { t.printStackTrace(); errors.addError(Errors.SEVERITY_ERROR, t.getMessage()); } finally { // try { zipArchive.close(); } catch(Exception ex) {} } return errors; }
From source file:net.semanticmetadata.lire.solr.LireRequestHandler.java
/** * Actual search implementation based on (i) hash based retrieval and (ii) feature based re-ranking. * * @param rsp/* w ww.java2s . c o m*/ * @param searcher * @param hashFieldName the hash field name * @param maximumHits * @param terms * @param queryFeature * @throws IOException * @throws IllegalAccessException * @throws InstantiationException */ private void doSearch(SolrQueryRequest req, SolrQueryResponse rsp, SolrIndexSearcher searcher, String hashFieldName, int maximumHits, List<Term> terms, Query query, LireFeature queryFeature) throws IOException, IllegalAccessException, InstantiationException { // temp feature instance LireFeature tmpFeature = queryFeature.getClass().newInstance(); // Taking the time of search for statistical purposes. time = System.currentTimeMillis(); Filter filter = null; // if the request contains a filter: if (req.getParams().get("fq") != null) { // only filters with [<field>:<value> ]+ are supported StringTokenizer st = new StringTokenizer(req.getParams().get("fq"), " "); LinkedList<Term> filterTerms = new LinkedList<Term>(); while (st.hasMoreElements()) { String[] tmpToken = st.nextToken().split(":"); if (tmpToken.length > 1) { filterTerms.add(new Term(tmpToken[0], tmpToken[1])); } } if (filterTerms.size() > 0) filter = new TermsFilter(filterTerms); } TopDocs docs; // with query only. if (filter == null) { docs = searcher.search(query, numberOfCandidateResults); } else { docs = searcher.search(query, filter, numberOfCandidateResults); } // TopDocs docs = searcher.search(query, new TermsFilter(terms), numberOfCandidateResults); // with TermsFilter and boosting by simple query // TopDocs docs = searcher.search(new ConstantScoreQuery(new TermsFilter(terms)), numberOfCandidateResults); // just with TermsFilter time = System.currentTimeMillis() - time; rsp.add("RawDocsCount", docs.scoreDocs.length + ""); rsp.add("RawDocsSearchTime", time + ""); // re-rank time = System.currentTimeMillis(); TreeSet<SimpleResult> resultScoreDocs = new TreeSet<SimpleResult>(); float maxDistance = -1f; float tmpScore; String featureFieldName = FeatureRegistry.getFeatureFieldName(hashFieldName); // iterating and re-ranking the documents. BinaryDocValues binaryValues = MultiDocValues.getBinaryValues(searcher.getIndexReader(), featureFieldName); // *** # BytesRef bytesRef;// = new BytesRef(); for (int i = 0; i < docs.scoreDocs.length; i++) { // using DocValues to retrieve the field values ... bytesRef = binaryValues.get(docs.scoreDocs[i].doc); tmpFeature.setByteArrayRepresentation(bytesRef.bytes, bytesRef.offset, bytesRef.length); // Getting the document from the index. // This is the slow step based on the field compression of stored fields. // tmpFeature.setByteArrayRepresentation(d.getBinaryValue(name).bytes, d.getBinaryValue(name).offset, d.getBinaryValue(name).length); tmpScore = queryFeature.getDistance(tmpFeature); if (resultScoreDocs.size() < maximumHits) { // todo: There's potential here for a memory saver, think of a clever data structure that can do the trick without creating a new SimpleResult for each result. resultScoreDocs.add( new SimpleResult(tmpScore, searcher.doc(docs.scoreDocs[i].doc), docs.scoreDocs[i].doc)); maxDistance = resultScoreDocs.last().getDistance(); } else if (tmpScore < maxDistance) { // if it is nearer to the sample than at least one of the current set: // remove the last one ... resultScoreDocs.remove(resultScoreDocs.last()); // add the new one ... resultScoreDocs.add( new SimpleResult(tmpScore, searcher.doc(docs.scoreDocs[i].doc), docs.scoreDocs[i].doc)); // and set our new distance border ... maxDistance = resultScoreDocs.last().getDistance(); } } // System.out.println("** Creating response."); time = System.currentTimeMillis() - time; rsp.add("ReRankSearchTime", time + ""); LinkedList list = new LinkedList(); for (Iterator<SimpleResult> it = resultScoreDocs.iterator(); it.hasNext();) { SimpleResult result = it.next(); HashMap m = new HashMap(2); m.put("d", result.getDistance()); // add fields as requested: if (req.getParams().get("fl") == null) { m.put("id", result.getDocument().get("id")); if (result.getDocument().get("title") != null) m.put("title", result.getDocument().get("title")); } else { String fieldsRequested = req.getParams().get("fl"); if (fieldsRequested.contains("score")) { m.put("score", result.getDistance()); } if (fieldsRequested.contains("*")) { // all fields for (IndexableField field : result.getDocument().getFields()) { String tmpField = field.name(); if (result.getDocument().getFields(tmpField).length > 1) { m.put(result.getDocument().getFields(tmpField)[0].name(), result.getDocument().getValues(tmpField)); } else if (result.getDocument().getFields(tmpField).length > 0) { m.put(result.getDocument().getFields(tmpField)[0].name(), result.getDocument().getFields(tmpField)[0].stringValue()); } } } else { StringTokenizer st; if (fieldsRequested.contains(",")) st = new StringTokenizer(fieldsRequested, ","); else st = new StringTokenizer(fieldsRequested, " "); while (st.hasMoreElements()) { String tmpField = st.nextToken(); if (result.getDocument().getFields(tmpField).length > 1) { m.put(result.getDocument().getFields(tmpField)[0].name(), result.getDocument().getValues(tmpField)); } else if (result.getDocument().getFields(tmpField).length > 0) { m.put(result.getDocument().getFields(tmpField)[0].name(), result.getDocument().getFields(tmpField)[0].stringValue()); } } } } // m.put(field, result.getDocument().get(field)); // m.put(field.replace("_ha", "_hi"), result.getDocument().getBinaryValue(field)); list.add(m); } rsp.add("docs", list); // rsp.add("Test-name", "Test-val"); }
From source file:net.semanticmetadata.lire.solr.FastLireRequestHandler.java
/** * Actual search implementation based on (i) hash based retrieval and (ii) feature based re-ranking. * * @param rsp/*ww w. j a v a 2 s . c o m*/ * @param searcher * @param hashFieldName the hash field name * @param maximumHits * @param terms * @param queryFeature * @throws java.io.IOException * @throws IllegalAccessException * @throws InstantiationException */ private void doSearch(SolrQueryRequest req, SolrQueryResponse rsp, SolrIndexSearcher searcher, String hashFieldName, int maximumHits, List<Term> terms, Query query, LireFeature queryFeature) throws IOException, IllegalAccessException, InstantiationException { // temp feature instance LireFeature tmpFeature = queryFeature.getClass().newInstance(); // Taking the time of search for statistical purposes. time = System.currentTimeMillis(); Filter filter = null; // if the request contains a filter: if (req.getParams().get("fq") != null) { // only filters with [<field>:<value> ]+ are supported StringTokenizer st = new StringTokenizer(req.getParams().get("fq"), " "); LinkedList<Term> filterTerms = new LinkedList<Term>(); while (st.hasMoreElements()) { String[] tmpToken = st.nextToken().split(":"); if (tmpToken.length > 1) { filterTerms.add(new Term(tmpToken[0], tmpToken[1])); } } if (filterTerms.size() > 0) filter = new TermsFilter(filterTerms); } TopDocs docs; // with query only. if (filter == null) { docs = searcher.search(query, numberOfCandidateResults); } else { docs = searcher.search(query, filter, numberOfCandidateResults); } // TopDocs docs = searcher.search(query, new TermsFilter(terms), numberOfCandidateResults); // with TermsFilter and boosting by simple query // TopDocs docs = searcher.search(new ConstantScoreQuery(new TermsFilter(terms)), numberOfCandidateResults); // just with TermsFilter time = System.currentTimeMillis() - time; rsp.add("RawDocsCount", docs.scoreDocs.length + ""); rsp.add("RawDocsSearchTime", time + ""); // re-rank time = System.currentTimeMillis(); TreeSet<SimpleResult> resultScoreDocs = new TreeSet<SimpleResult>(); float maxDistance = -1f; float tmpScore; String featureFieldName = FeatureRegistry.getFeatureFieldName(hashFieldName); // iterating and re-ranking the documents. BinaryDocValues binaryValues = MultiDocValues.getBinaryValues(searcher.getIndexReader(), featureFieldName); // *** # BytesRef bytesRef = new BytesRef(); for (int i = 0; i < docs.scoreDocs.length; i++) { // using DocValues to retrieve the field values ... binaryValues.get(docs.scoreDocs[i].doc, bytesRef); tmpFeature.setByteArrayRepresentation(bytesRef.bytes, bytesRef.offset, bytesRef.length); // Getting the document from the index. // This is the slow step based on the field compression of stored fields. // tmpFeature.setByteArrayRepresentation(d.getBinaryValue(name).bytes, d.getBinaryValue(name).offset, d.getBinaryValue(name).length); tmpScore = queryFeature.getDistance(tmpFeature); if (resultScoreDocs.size() < maximumHits) { // todo: There's potential here for a memory saver, think of a clever data structure that can do the trick without creating a new SimpleResult for each result. resultScoreDocs.add( new SimpleResult(tmpScore, searcher.doc(docs.scoreDocs[i].doc), docs.scoreDocs[i].doc)); maxDistance = resultScoreDocs.last().getDistance(); } else if (tmpScore < maxDistance) { // if it is nearer to the sample than at least one of the current set: // remove the last one ... resultScoreDocs.remove(resultScoreDocs.last()); // add the new one ... resultScoreDocs.add( new SimpleResult(tmpScore, searcher.doc(docs.scoreDocs[i].doc), docs.scoreDocs[i].doc)); // and set our new distance border ... maxDistance = resultScoreDocs.last().getDistance(); } } // System.out.println("** Creating response."); time = System.currentTimeMillis() - time; rsp.add("ReRankSearchTime", time + ""); LinkedList list = new LinkedList(); for (Iterator<SimpleResult> it = resultScoreDocs.iterator(); it.hasNext();) { SimpleResult result = it.next(); HashMap m = new HashMap(2); m.put("d", result.getDistance()); // add fields as requested: if (req.getParams().get("fl") == null) { m.put("id", result.getDocument().get("id")); if (result.getDocument().get("title") != null) m.put("title", result.getDocument().get("title")); } else { String fieldsRequested = req.getParams().get("fl"); if (fieldsRequested.contains("score")) { m.put("score", result.getDistance()); } if (fieldsRequested.contains("*")) { // all fields for (IndexableField field : result.getDocument().getFields()) { String tmpField = field.name(); if (result.getDocument().getFields(tmpField).length > 1) { m.put(result.getDocument().getFields(tmpField)[0].name(), result.getDocument().getValues(tmpField)); } else if (result.getDocument().getFields(tmpField).length > 0) { m.put(result.getDocument().getFields(tmpField)[0].name(), result.getDocument().getFields(tmpField)[0].stringValue()); } } } else { StringTokenizer st; if (fieldsRequested.contains(",")) st = new StringTokenizer(fieldsRequested, ","); else st = new StringTokenizer(fieldsRequested, " "); while (st.hasMoreElements()) { String tmpField = st.nextToken(); if (result.getDocument().getFields(tmpField).length > 1) { m.put(result.getDocument().getFields(tmpField)[0].name(), result.getDocument().getValues(tmpField)); } else if (result.getDocument().getFields(tmpField).length > 0) { m.put(result.getDocument().getFields(tmpField)[0].name(), result.getDocument().getFields(tmpField)[0].stringValue()); } } } } // m.put(field, result.getDocument().get(field)); // m.put(field.replace("_ha", "_hi"), result.getDocument().getBinaryValue(field)); list.add(m); } rsp.add("docs", list); // rsp.add("Test-name", "Test-val"); }
From source file:net.sourceforge.fenixedu.domain.student.Student.java
final public Enrolment getDissertationEnrolment(DegreeCurricularPlan degreeCurricularPlan) { TreeSet<Enrolment> enrolments = getDissertationEnrolments(degreeCurricularPlan); return enrolments.isEmpty() ? null : enrolments.last(); }
From source file:canreg.client.analysis.TopNChartTableBuilder.java
@Override public LinkedList<String> buildTable(String tableHeader, String reportFileName, int startYear, int endYear, Object[][] incidenceData, PopulationDataset[] populations, // can be null PopulationDataset[] standardPopulations, LinkedList<ConfigFields> configList, String[] engineParameters, FileTypes fileType) throws NotCompatibleDataException { String footerString = java.util.ResourceBundle .getBundle("canreg/client/analysis/resources/AgeSpecificCasesPerHundredThousandTableBuilder") .getString("TABLE BUILT ") + new Date() + java.util.ResourceBundle .getBundle(//from w ww . j a v a 2 s .c o m "canreg/client/analysis/resources/AgeSpecificCasesPerHundredThousandTableBuilder") .getString(" BY CANREG5."); LinkedList<String> generatedFiles = new LinkedList<String>(); if (Arrays.asList(engineParameters).contains("barchart")) { chartType = ChartType.BAR; } else { chartType = ChartType.PIE; includeOther = true; } if (Arrays.asList(engineParameters).contains("legend")) { legendOn = true; } if (Arrays.asList(engineParameters).contains("r")) { useR = true; } if (Arrays.asList(engineParameters).contains("asr")) { countType = CountType.ASR; } else if (Arrays.asList(engineParameters).contains("cum64")) { countType = CountType.CUM64; } else if (Arrays.asList(engineParameters).contains("cum74")) { countType = CountType.CUM74; } else if (Arrays.asList(engineParameters).contains("per100000")) { countType = CountType.PER_HUNDRED_THOUSAND; } else { // default to cases countType = CountType.CASES; } localSettings = CanRegClientApp.getApplication().getLocalSettings(); rpath = localSettings.getProperty(LocalSettings.R_PATH); // does R exist? if (rpath == null || rpath.isEmpty() || !new File(rpath).exists()) { useR = false; // force false if R is not installed } icdLabel = ConfigFieldsReader.findConfig("ICD_groups_labels", configList); icd10GroupDescriptions = ConfigFieldsReader.findConfig("ICD10_groups", configList); cancerGroupsLocal = EditorialTableTools.generateICD10Groups(icd10GroupDescriptions); // indexes keyGroupsMap = new EnumMap<KeyCancerGroupsEnum, Integer>(KeyCancerGroupsEnum.class); keyGroupsMap.put(KeyCancerGroupsEnum.allCancerGroupsIndex, EditorialTableTools.getICD10index("ALL", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.leukemiaNOSCancerGroupIndex, EditorialTableTools.getICD10index(950, cancerGroupsLocal)); keyGroupsMap.put(KeyCancerGroupsEnum.skinCancerGroupIndex, EditorialTableTools.getICD10index("C44", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.bladderCancerGroupIndex, EditorialTableTools.getICD10index("C67", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.mesotheliomaCancerGroupIndex, EditorialTableTools.getICD10index("C45", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.kaposiSarkomaCancerGroupIndex, EditorialTableTools.getICD10index("C46", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.myeloproliferativeDisordersCancerGroupIndex, EditorialTableTools.getICD10index("MPD", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.myelodysplasticSyndromesCancerGroupIndex, EditorialTableTools.getICD10index("MDS", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.allCancerGroupsButSkinIndex, EditorialTableTools.getICD10index("ALLbC44", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.brainAndCentralNervousSystemCancerGroupIndex, EditorialTableTools.getICD10index("C70-72", icd10GroupDescriptions)); keyGroupsMap.put(KeyCancerGroupsEnum.ovaryCancerGroupIndex, EditorialTableTools.getICD10index(569, cancerGroupsLocal)); keyGroupsMap.put(KeyCancerGroupsEnum.otherCancerGroupsIndex, EditorialTableTools.getICD10index("O&U", icd10GroupDescriptions)); otherCancerGroupsIndex = keyGroupsMap.get(KeyCancerGroupsEnum.otherCancerGroupsIndex); skinCancerGroupIndex = keyGroupsMap.get(KeyCancerGroupsEnum.skinCancerGroupIndex); allCancerGroupsIndex = keyGroupsMap.get(KeyCancerGroupsEnum.allCancerGroupsIndex); allCancerGroupsButSkinIndex = keyGroupsMap.get(KeyCancerGroupsEnum.allCancerGroupsButSkinIndex); numberOfCancerGroups = cancerGroupsLocal.length; double[] countsRow; if (populations != null && populations.length > 0) { if (populations[0].getPopulationDatasetID() < 0) { countType = CountType.CASES; } else { // calculate period pop periodPop = new PopulationDataset(); periodPop.setAgeGroupStructure(populations[0].getAgeGroupStructure()); periodPop.setReferencePopulation(populations[0].getReferencePopulation()); for (PopulationDatasetsEntry pde : populations[0].getAgeGroups()) { int count = 0; for (PopulationDataset pds : populations) { count += pds.getAgeGroupCount(pde.getSex(), pde.getAgeGroup()); } periodPop.addAgeGroup(new PopulationDatasetsEntry(pde.getAgeGroup(), pde.getSex(), count)); } } } if (incidenceData != null) { String sexString, icdString, morphologyString; double countArray[][] = new double[numberOfCancerGroups][numberOfSexes]; int sex, icdIndex, numberOfCases, age; double adjustedCases; List<Integer> dontCount = new LinkedList<Integer>(); // all sites but skin? if (Arrays.asList(engineParameters).contains("noC44")) { dontCount.add(skinCancerGroupIndex); tableHeader += ", excluding C44"; } for (Object[] dataLine : incidenceData) { // Set default adjustedCases = 0.0; // Extract data sexString = (String) dataLine[SEX_COLUMN]; sex = Integer.parseInt(sexString.trim()); // sex = 3 is unknown sex if (sex > 2) { sex = 3; } morphologyString = (String) dataLine[MORPHOLOGY_COLUMN]; icdString = (String) dataLine[ICD10_COLUMN]; icdIndex = Tools.assignICDGroupIndex(keyGroupsMap, icdString, morphologyString, cancerGroupsLocal); age = (Integer) dataLine[AGE_COLUMN]; if (!dontCount.contains(icdIndex) && icdIndex != DONT_COUNT) { // Extract cases numberOfCases = (Integer) dataLine[CASES_COLUMN]; if (countType == CountType.PER_HUNDRED_THOUSAND) { adjustedCases = (100000.0 * numberOfCases) / periodPop.getAgeGroupCount(sex, periodPop.getAgeGroupIndex(age)); } else if (countType == CountType.ASR) { try { adjustedCases = 100.0 * (periodPop.getReferencePopulationForAgeGroupIndex(sex, periodPop.getAgeGroupIndex(age)) * numberOfCases) / periodPop.getAgeGroupCount(sex, periodPop.getAgeGroupIndex(age)); } catch (IncompatiblePopulationDataSetException ex) { Logger.getLogger(TopNChartTableBuilder.class.getName()).log(Level.SEVERE, null, ex); } } else if (countType == CountType.CUM64) { if (age < 65) { adjustedCases = (100000.0 * numberOfCases) / periodPop.getAgeGroupCount(sex, periodPop.getAgeGroupIndex(age)) * 5.0 / 1000.0; } } else if (countType == CountType.CUM74) { if (age < 75) { adjustedCases = (100000.0 * numberOfCases) / periodPop.getAgeGroupCount(sex, periodPop.getAgeGroupIndex(age)) * 5.0 / 1000.0; } } else { adjustedCases = numberOfCases; } if (sex <= numberOfSexes && icdIndex >= 0 && icdIndex <= cancerGroupsLocal.length) { countArray[icdIndex][sex - 1] += adjustedCases; } else { if (otherCancerGroupsIndex >= 0) { countArray[otherCancerGroupsIndex][sex - 1] += adjustedCases; } } if (allCancerGroupsIndex >= 0) { countArray[allCancerGroupsIndex][sex - 1] += adjustedCases; } if (allCancerGroupsButSkinIndex >= 0 && skinCancerGroupIndex >= 0 && icdIndex != skinCancerGroupIndex) { countArray[allCancerGroupsButSkinIndex][sex - 1] += adjustedCases; } } } // separate top 10 and the rest TreeSet<CancerCasesCount> topNMale = new TreeSet<CancerCasesCount>(new Comparator<CancerCasesCount>() { @Override public int compare(CancerCasesCount o1, CancerCasesCount o2) { if (o1.getCount().equals(o2.getCount())) { return -o1.toString().compareTo(o2.toString()); } else { return -(o1.getCount().compareTo(o2.getCount())); } } }); LinkedList<CancerCasesCount> theRestMale = new LinkedList<CancerCasesCount>(); TreeSet<CancerCasesCount> topNFemale = new TreeSet<CancerCasesCount>( new Comparator<CancerCasesCount>() { @Override public int compare(CancerCasesCount o1, CancerCasesCount o2) { if (o1.getCount().equals(o2.getCount())) { return -o1.toString().compareTo(o2.toString()); } else { return -(o1.getCount().compareTo(o2.getCount())); } } }); LinkedList<CancerCasesCount> theRestFemale = new LinkedList<CancerCasesCount>(); CancerCasesCount otherElement; CancerCasesCount thisElement; TreeSet<CancerCasesCount> topN; LinkedList<CancerCasesCount> theRest; for (int icdGroupNumber = 0; icdGroupNumber < countArray.length; icdGroupNumber++) { countsRow = countArray[icdGroupNumber]; for (int sexNumber = 0; sexNumber < 2; sexNumber++) { if (sexNumber == 0) { topN = topNMale; theRest = theRestMale; } else { topN = topNFemale; theRest = theRestFemale; } if (countsRow[sexNumber] > 0) { thisElement = new CancerCasesCount(icd10GroupDescriptions[icdGroupNumber], icdLabel[icdGroupNumber].substring(3), countsRow[sexNumber], icdGroupNumber); // if this is the "other" group - add it immediately to "the rest" if (icdGroupNumber == otherCancerGroupsIndex) { theRest.add(thisElement); // if not we check if this is one of the collection groups } else if (icdGroupNumber != allCancerGroupsButSkinIndex && icdGroupNumber != allCancerGroupsIndex) { // if it is less than N cancers in top N - add it if (topN.size() < topNLimit) { topN.add(thisElement); } else { // otherwise we need to compare it to the last element in the top 10 otherElement = topN.last(); if (thisElement.compareTo(otherElement) < 0) { topN.remove(otherElement); theRest.add(otherElement); topN.add(thisElement); } else { theRest.add(thisElement); } } } } } } for (int sexNumber : new int[] { 0, 1 }) { String fileName = reportFileName + "-" + sexLabel[sexNumber] + "." + fileType.toString(); File file = new File(fileName); TreeSet<CancerCasesCount> casesCounts; Double restCount = Tools.sumUpTheRest(theRestMale, dontCount); if (sexNumber == 0) { casesCounts = topNMale; } else { casesCounts = topNFemale; } if (useR && !fileType.equals(FileTypes.jchart) && !fileType.equals(FileTypes.csv)) { String header = "Top 10 by " + countType + ", \n" + tableHeader + ", " + TableBuilderInterface.sexLabel[sexNumber]; generatedFiles.addAll(Tools.generateRChart(casesCounts, fileName, header, fileType, chartType, includeOther, restCount, rpath, true, "Site")); } else { double allCount = countArray[allCancerGroupsIndex][sexNumber]; Color color; if (sexNumber == 0) { color = Color.BLUE; } else { color = Color.RED; } String header = "Top 10 by " + countType + ", " + tableHeader + ", " + TableBuilderInterface.sexLabel[sexNumber]; charts[sexNumber] = Tools.generateJChart(casesCounts, fileName, header, fileType, chartType, includeOther, legendOn, restCount, allCount, color, "Site"); try { generatedFiles.add(Tools.writeJChartToFile(charts[sexNumber], file, fileType)); } catch (IOException ex) { Logger.getLogger(TopNChartTableBuilder.class.getName()).log(Level.SEVERE, null, ex); } catch (DocumentException ex) { Logger.getLogger(TopNChartTableBuilder.class.getName()).log(Level.SEVERE, null, ex); } } } } return generatedFiles; }
From source file:com.tasktop.c2c.server.scm.service.GitServiceBean.java
private List<RevCommit> getAllCommits(Repository repository, Region region, Set<ObjectId> visited) { TreeSet<RevCommit> result = new TreeSet<RevCommit>(new Comparator<RevCommit>() { @Override/* w ww.ja v a 2 s .c o m*/ public int compare(RevCommit o1, RevCommit o2) { int ctDiff = o2.getCommitTime() - o1.getCommitTime(); if (ctDiff != 0) { return ctDiff; } return o1.getId().compareTo(o2.getId()); } }); int maxResultsToConsider = -1; if (region != null) { maxResultsToConsider = region.getOffset() + region.getSize(); } long minTime = -1; try { for (Ref ref : getRefsToAdd(repository)) { RevWalk revWal = new RevWalk(repository); revWal.markStart(revWal.parseCommit(ref.getObjectId())); int index = 0; for (RevCommit revCommit : revWal) { if (region == null || (index >= region.getOffset() && index < region.getOffset() + region.getSize())) { if (minTime > 0 && revCommit.getCommitTime() < minTime) { break; } if (visited.add(revCommit.getId())) { result.add(revCommit); if (maxResultsToConsider > 0 && result.size() > maxResultsToConsider) { RevCommit last = result.last(); result.remove(last); minTime = last.getCommitTime(); } } else { break; // Done with this branch } } index++; if (region != null && (index >= region.getOffset() + region.getSize())) { break; } } } } catch (IOException e) { throw new RuntimeException(e); } return new ArrayList<RevCommit>(result); }
From source file:net.sourceforge.fenixedu.domain.student.Student.java
final public Enrolment getDissertationEnrolment(DegreeCurricularPlan degreeCurricularPlan, final ExecutionYear executionYear) { TreeSet<Enrolment> enrolments = getDissertationEnrolments(degreeCurricularPlan); CollectionUtils.filter(enrolments, new Predicate() { @Override// w w w . j a v a 2s . c o m public boolean evaluate(Object enrolment) { return ((Enrolment) enrolment).getExecutionYear().equals(executionYear); } }); return enrolments.isEmpty() ? null : enrolments.last(); }
From source file:edu.duke.cabig.c3pr.domain.StudySubject.java
@Transient public StudyVersion getLastConsentedStudyVersion() { TreeSet uniqueStudyVersions = new TreeSet(); List<StudySubjectConsentVersion> allStudySubjectConsentVersions = getAllConsents(); for (StudySubjectConsentVersion studySubjectConsentVersion : allStudySubjectConsentVersions) { uniqueStudyVersions.add(studySubjectConsentVersion.getConsent().getStudyVersion()); }/*from w ww . j av a2 s . com*/ return uniqueStudyVersions.isEmpty() ? null : (StudyVersion) uniqueStudyVersions.last(); }