List of usage examples for java.util.concurrent ForkJoinTask get
public final V get() throws InterruptedException, ExecutionException
From source file:MSUmpire.PeptidePeakClusterDetection.PDHandlerBase.java
protected void PeakCurveCorrClustering(XYData mzRange) throws IOException { Logger.getRootLogger().info("Grouping isotopic peak curves........"); LCMSPeakBase.PeakClusters = new ArrayList<>(); //Thread pool final ForkJoinPool fjp = new ForkJoinPool(NoCPUs); // ArrayList<PeakCurveClusteringCorrKDtree> ResultList = new ArrayList<>(); final ArrayList<ForkJoinTask<ArrayList<PeakCluster>>> ftemp = new ArrayList<>(); final int end_idx = LCMSPeakBase.UnSortedPeakCurves.size(); final ArrayList<PeakCluster> resultClusters = new ArrayList<>(); //For each peak curve // for (PeakCurve Peakcurve : LCMSPeakBase.UnSortedPeakCurves) { for (int i = 0; i < end_idx; ++i) { final PeakCurve Peakcurve = LCMSPeakBase.UnSortedPeakCurves.get(i); if (Peakcurve.TargetMz >= mzRange.getX() && Peakcurve.TargetMz <= mzRange.getY()) { //Create a thread unit for doing isotope clustering given a peak curve as the monoisotope peak PeakCurveClusteringCorrKDtree unit = new PeakCurveClusteringCorrKDtree(Peakcurve, LCMSPeakBase.GetPeakCurveSearchTree(), parameter, IsotopePatternMap, LCMSPeakBase.StartCharge, LCMSPeakBase.EndCharge, LCMSPeakBase.MaxNoPeakCluster, LCMSPeakBase.MinNoPeakCluster); // ResultList.add(unit); ftemp.add(fjp.submit(unit)); }/*from w ww . j a v a 2 s . co m*/ if (step_pccc == -1) step_pccc = fjp.getParallelism() * 32; final boolean last_iter = i + 1 == end_idx; if (ftemp.size() == step_pccc || last_iter) { final List<ForkJoinTask<ArrayList<PeakCluster>>> ftemp_sublist_view = last_iter ? ftemp : ftemp.subList(0, step_pccc / 2); for (final ForkJoinTask<ArrayList<PeakCluster>> fut : ftemp_sublist_view) try { resultClusters.addAll(fut.get()); } catch (InterruptedException | ExecutionException ex) { throw new RuntimeException(ex); } ftemp_sublist_view.clear(); if (!last_iter && fjp.getActiveThreadCount() < fjp.getParallelism()) { // System.out.println("PeakCurveSmoothingUnit: fjp.getActiveThreadCount()\t"+fjp.getActiveThreadCount()+"\t"+step_pccc); step_pccc *= 2; } } } assert ftemp.isEmpty() : "temp storage for futures should be empty by end of loop"; fjp.shutdown(); try { fjp.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { Logger.getRootLogger().info("interrupted.."); } for (final PeakCluster peakCluster : resultClusters) { //Check if the monoistope peak of cluster has been grouped in other isotope cluster, if yes, remove the peak cluster if (!parameter.RemoveGroupedPeaks || // !peakCluster.MonoIsotopePeak.ChargeGrouped.contains(peakCluster.Charge) !IonChargeHashSet.contains(peakCluster.MonoIsotopePeak.ChargeGrouped, peakCluster.Charge)) { peakCluster.Index = LCMSPeakBase.PeakClusters.size() + 1; peakCluster.GetConflictCorr(); LCMSPeakBase.PeakClusters.add(peakCluster); } } System.gc(); Logger.getRootLogger() .info("No of ion clusters:" + LCMSPeakBase.PeakClusters.size() + " (Memory usage:" + Math.round( (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1048576) + "MB)"); }
From source file:com.homeadvisor.kafdrop.service.CuratorKafkaMonitor.java
private Map<Integer, Long> getConsumerOffsets(String groupId, TopicVO topic) { try {//from w ww. j av a2s . co m // Kafka doesn't really give us an indication of whether a consumer is // using Kafka or Zookeeper based offset tracking. So look up the offsets // for both and assume that the largest offset is the correct one. ForkJoinTask<Map<Integer, Long>> kafkaTask = threadPool .submit(() -> getConsumerOffsets(groupId, topic, false)); ForkJoinTask<Map<Integer, Long>> zookeeperTask = threadPool .submit(() -> getConsumerOffsets(groupId, topic, true)); Map<Integer, Long> zookeeperOffsets = zookeeperTask.get(); Map<Integer, Long> kafkaOffsets = kafkaTask.get(); zookeeperOffsets.entrySet() .forEach(entry -> kafkaOffsets.merge(entry.getKey(), entry.getValue(), Math::max)); return kafkaOffsets; } catch (InterruptedException ex) { Thread.currentThread().interrupt(); throw Throwables.propagate(ex); } catch (ExecutionException ex) { throw Throwables.propagate(ex.getCause()); } }