List of usage examples for java.util.concurrent CompletionService take
Future<V> take() throws InterruptedException;
From source file:org.apache.hadoop.hbase.regionserver.HStore.java
/** * Creates an unsorted list of StoreFile loaded in parallel * from the given directory./*from w w w . j a va 2 s. c o m*/ * @throws IOException */ private List<StoreFile> loadStoreFiles() throws IOException { Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName()); if (files == null || files.size() == 0) { return new ArrayList<StoreFile>(); } // initialize the thread pool for opening store files in parallel.. ThreadPoolExecutor storeFileOpenerThreadPool = this.region .getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" + this.getColumnFamilyName()); CompletionService<StoreFile> completionService = new ExecutorCompletionService<StoreFile>( storeFileOpenerThreadPool); int totalValidStoreFile = 0; for (final StoreFileInfo storeFileInfo : files) { // open each store file in parallel completionService.submit(new Callable<StoreFile>() { @Override public StoreFile call() throws IOException { StoreFile storeFile = createStoreFileAndReader(storeFileInfo); return storeFile; } }); totalValidStoreFile++; } ArrayList<StoreFile> results = new ArrayList<StoreFile>(files.size()); IOException ioe = null; try { for (int i = 0; i < totalValidStoreFile; i++) { try { Future<StoreFile> future = completionService.take(); StoreFile storeFile = future.get(); long length = storeFile.getReader().length(); this.storeSize += length; this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes(); if (LOG.isDebugEnabled()) { LOG.debug("loaded " + storeFile.toStringDetailed()); } results.add(storeFile); } catch (InterruptedException e) { if (ioe == null) ioe = new InterruptedIOException(e.getMessage()); } catch (ExecutionException e) { if (ioe == null) ioe = new IOException(e.getCause()); } } } finally { storeFileOpenerThreadPool.shutdownNow(); } if (ioe != null) { // close StoreFile readers for (StoreFile file : results) { try { if (file != null) file.closeReader(true); } catch (IOException e) { LOG.warn(e.getMessage()); } } throw ioe; } return results; }
From source file:nl.systemsgenetics.eqtlinteractionanalyser.eqtlinteractionanalyser.TestEQTLDatasetForInteractions.java
public final String performInteractionAnalysis(String[] covsToCorrect, String[] covsToCorrect2, TextFile outputTopCovs, File snpsToSwapFile, HashMultimap<String, String> qtlProbeSnpMultiMap, String[] covariatesToTest, HashMap hashSamples, int numThreads, final TIntHashSet snpsToTest, boolean skipNormalization, boolean skipCovariateNormalization, HashMultimap<String, String> qtlProbeSnpMultiMapCovariates) throws IOException, Exception { //hashSamples = excludeOutliers(hashSamples); HashMap<String, Integer> covariatesToLoad = new HashMap(); if (covariatesToTest != null) { for (String c : covariatesToTest) { covariatesToLoad.put(c, null); }/*from w w w . j av a 2 s.c om*/ for (String c : covsToCorrect) { covariatesToLoad.put(c, null); } for (String c : covsToCorrect2) { covariatesToLoad.put(c, null); } for (int i = 1; i <= 50; ++i) { covariatesToLoad.put("Comp" + i, null); } } else { covariatesToLoad = null; } ExpressionDataset datasetExpression = new ExpressionDataset( inputDir + "/bigTableLude.txt.Expression.binary", '\t', null, hashSamples); ExpressionDataset datasetCovariates = new ExpressionDataset( inputDir + "/covariateTableLude.txt.Covariates.binary", '\t', covariatesToLoad, hashSamples); org.apache.commons.math3.stat.regression.OLSMultipleLinearRegression regression = new org.apache.commons.math3.stat.regression.OLSMultipleLinearRegression(); int nrSamples = datasetGenotypes.nrSamples; correctDosageDirectionForQtl(snpsToSwapFile, datasetGenotypes, datasetExpression); if (!skipNormalization) { correctExpressionData(covsToCorrect2, datasetGenotypes, datasetCovariates, datasetExpression); } ExpressionDataset datasetCovariatesPCAForceNormal = new ExpressionDataset( inputDir + "/covariateTableLude.txt.Covariates.binary", '\t', covariatesToLoad, hashSamples); if (!skipNormalization && !skipCovariateNormalization) { correctCovariateDataPCA(covsToCorrect2, covsToCorrect, datasetGenotypes, datasetCovariatesPCAForceNormal); } if (1 == 1) { if (!skipNormalization && !skipCovariateNormalization && covsToCorrect2.length != 0 && covsToCorrect.length != 0) { correctCovariateData(covsToCorrect2, covsToCorrect, datasetGenotypes, datasetCovariates); } if (!skipNormalization && !skipCovariateNormalization && !qtlProbeSnpMultiMapCovariates.isEmpty()) { correctCovariatesForQtls(datasetCovariates, datasetGenotypes, qtlProbeSnpMultiMapCovariates); } if (1 == 2) { saveCorrectedCovariates(datasetCovariates); } if (1 == 2) { icaCovariates(datasetCovariates); } if (!skipNormalization) { forceNormalCovariates(datasetCovariates, datasetGenotypes); } } ExpressionDataset datasetExpressionBeforeEQTLCorrection = new ExpressionDataset(datasetExpression.nrProbes, datasetExpression.nrSamples); for (int p = 0; p < datasetExpression.nrProbes; p++) { for (int s = 0; s < datasetExpression.nrSamples; s++) { datasetExpressionBeforeEQTLCorrection.rawData[p][s] = datasetExpression.rawData[p][s]; } } if (!skipNormalization && covsToCorrect.length != 0) { correctExpressionDataForInteractions(covsToCorrect, datasetCovariates, datasetGenotypes, nrSamples, datasetExpression, regression, qtlProbeSnpMultiMap); } if (!skipNormalization) { forceNormalExpressionData(datasetExpression); } datasetExpression.save(outputDir + "/expressionDataRound_" + covsToCorrect.length + ".txt"); datasetExpression.save(outputDir + "/expressionDataRound_" + covsToCorrect.length + ".binary"); datasetCovariates.save(outputDir + "/covariateData_" + covsToCorrect.length + ".binary"); if (1 == 1) { ExpressionDataset datasetZScores = new ExpressionDataset(datasetCovariates.nrProbes, datasetExpression.nrProbes); datasetZScores.probeNames = datasetCovariates.probeNames; datasetZScores.sampleNames = new String[datasetGenotypes.probeNames.length]; for (int i = 0; i < datasetGenotypes.probeNames.length; ++i) { datasetZScores.sampleNames[i] = datasetGenotypes.probeNames[i] + datasetExpression.probeNames[i] .substring(datasetExpression.probeNames[i].lastIndexOf('_')); } datasetZScores.recalculateHashMaps(); SkippedInteractionWriter skippedWriter = new SkippedInteractionWriter( new File(outputDir + "/skippedInteractionsRound_" + covsToCorrect.length + ".txt")); java.util.concurrent.ExecutorService threadPool = Executors.newFixedThreadPool(numThreads); CompletionService<DoubleArrayIntegerObject> pool = new ExecutorCompletionService<DoubleArrayIntegerObject>( threadPool); int nrTasks = 0; for (int cov = 0; cov < datasetCovariates.nrProbes; cov++) { double stdev = JSci.maths.ArrayMath.standardDeviation(datasetCovariates.rawData[cov]); if (stdev > 0) { PerformInteractionAnalysisPermutationTask task = new PerformInteractionAnalysisPermutationTask( datasetGenotypes, datasetExpression, datasetCovariates, datasetCovariatesPCAForceNormal, cov, skippedWriter, snpsToTest); pool.submit(task); nrTasks++; } } String maxChi2Cov = ""; int maxChi2CovI = 0; double maxChi2 = 0; try { // If gene annotation provided, for chi2sum calculation use only genes that are 1mb apart //if (geneDistanceMap != null) { for (int task = 0; task < nrTasks; task++) { try { //System.out.println("Waiting on thread for: " + datasetCovariates.probeNames[cov]); DoubleArrayIntegerObject result = pool.take().get(); int cov = result.intValue; double chi2Sum = 0; double[] covZ = datasetZScores.rawData[cov]; for (int snp = 0; snp < datasetGenotypes.nrProbes; snp++) { //if (genesFarAway(datasetZScores.sampleNames[snp], datasetZScores.probeNames[cov])) { double z = result.doubleArray[snp]; covZ[snp] = z; if (!Double.isNaN(z)) { chi2Sum += z * z; } //} } if (chi2Sum > maxChi2 && !datasetCovariates.probeNames[cov].startsWith("Comp") && !datasetCovariates.probeNames[cov].equals("LLS") && !datasetCovariates.probeNames[cov].equals("LLdeep") && !datasetCovariates.probeNames[cov].equals("RS") && !datasetCovariates.probeNames[cov].equals("CODAM")) { maxChi2 = chi2Sum; maxChi2CovI = cov; maxChi2Cov = datasetCovariates.probeNames[cov]; } //System.out.println(covsToCorrect.length + "\t" + cov + "\t" + datasetCovariates.probeNames[cov] + "\t" + chi2Sum); if ((task + 1) % 512 == 0) { System.out.println(task + 1 + " tasks processed"); } } catch (ExecutionException ex) { Logger.getLogger(PerformInteractionAnalysisPermutationTask.class.getName()) .log(Level.SEVERE, null, ex); } } /*} //If gene annotation not provided, use all gene pairs else { for (int task = 0; task < nrTasks; task++) { try { DoubleArrayIntegerObject result = pool.take().get(); int cov = result.intValue; double chi2Sum = 0; double[] covZ = datasetZScores.rawData[cov]; for (int snp = 0; snp < datasetGenotypes.nrProbes; snp++) { double z = result.doubleArray[snp]; covZ[snp] = z; if (!Double.isNaN(z)) { chi2Sum += z * z; } } if (chi2Sum > maxChi2) { maxChi2 = chi2Sum; maxChi2Cov = datasetCovariates.probeNames[cov]; } //System.out.println(covsToCorrect.length + "\t" + cov + "\t" + datasetCovariates.probeNames[cov] + "\t" + chi2Sum); if ((task + 1) % 512 == 0) { System.out.println(task + 1 + " tasks processed"); } } catch (ExecutionException ex) { Logger.getLogger(PerformInteractionAnalysisPermutationTask.class.getName()).log(Level.SEVERE, null, ex); } } }*/ threadPool.shutdown(); } catch (Exception e) { e.printStackTrace(); System.out.println(e.getMessage()); } System.out.println("Top covariate:\t" + maxChi2 + "\t" + maxChi2Cov); outputTopCovs.writeln("Top covariate:\t" + maxChi2 + "\t" + maxChi2Cov); outputTopCovs.flush(); skippedWriter.close(); datasetZScores.save(outputDir + "/InteractionZScoresMatrix-" + covsToCorrect.length + "Covariates.txt"); BufferedWriter writer = new BufferedWriter( new FileWriter(outputDir + "/" + "topCov" + maxChi2Cov + "_expression.txt")); double[] topCovExpression = datasetCovariates.rawData[maxChi2CovI]; for (int i = 0; i < topCovExpression.length; ++i) { writer.append(datasetCovariates.sampleNames[i]); writer.append('\t'); writer.append(String.valueOf(topCovExpression[i])); writer.append('\n'); } writer.close(); return maxChi2Cov; } return null; }
From source file:org.openspaces.admin.internal.admin.DefaultAdmin.java
@Override public DumpResult generateDump(final Set<DumpProvider> dumpProviders, final DumpGeneratedListener listener, final String cause, final Map<String, Object> context, final String... processor) throws AdminException { CompoundDumpResult dumpResult = new CompoundDumpResult(); ExecutorService es = Executors.newFixedThreadPool(dumpProviders.size()); CompletionService<DumpResult> cs = new ExecutorCompletionService<DumpResult>(es); final AtomicInteger counter = new AtomicInteger(); for (final DumpProvider dumpProvider : dumpProviders) { cs.submit(new Callable<DumpResult>() { @Override//from w w w .j a v a 2s . c om public DumpResult call() throws Exception { DumpResult result = dumpProvider.generateDump(cause, context, processor); synchronized (listener) { listener.onGenerated(dumpProvider, result, counter.incrementAndGet(), dumpProviders.size()); } return result; } }); } for (int i = 0; i < dumpProviders.size(); i++) { try { dumpResult.add(cs.take().get()); } catch (Exception e) { // ignore it for now } } es.shutdown(); return dumpResult; }
From source file:com.spotify.docker.client.DefaultDockerClientTest.java
@Test(expected = DockerTimeoutException.class) public void testConnectionRequestTimeout() throws Exception { final int connectionPoolSize = 1; final int callableCount = connectionPoolSize * 100; final ExecutorService executor = Executors.newCachedThreadPool(); final CompletionService completion = new ExecutorCompletionService(executor); // Spawn and wait on many more containers than the connection pool size. // This should cause a timeout once the connection pool is exhausted. try (final DockerClient dockerClient = DefaultDockerClient.fromEnv().connectionPoolSize(connectionPoolSize) .build()) {/*from w w w.jav a 2 s. co m*/ // Create container final ContainerConfig config = ContainerConfig.builder().image(BUSYBOX_LATEST) .cmd("sh", "-c", "while :; do sleep 1; done").build(); final String name = randomName(); final ContainerCreation creation = dockerClient.createContainer(config, name); final String id = creation.id(); // Start the container dockerClient.startContainer(id); // Submit a bunch of waitContainer requests for (int i = 0; i < callableCount; i++) { //noinspection unchecked completion.submit(new Callable<ContainerExit>() { @Override public ContainerExit call() throws Exception { return dockerClient.waitContainer(id); } }); } // Wait for the requests to complete or throw expected exception for (int i = 0; i < callableCount; i++) { try { completion.take().get(); } catch (ExecutionException e) { Throwables.propagateIfInstanceOf(e.getCause(), DockerTimeoutException.class); throw e; } } } finally { executor.shutdown(); } }
From source file:org.apache.hadoop.hbase.regionserver.Store.java
/** * Creates an unsorted list of StoreFile loaded in parallel * from the given directory./*from w w w.jav a2 s .c o m*/ * @throws IOException */ private List<StoreFile> loadStoreFiles() throws IOException { ArrayList<StoreFile> results = new ArrayList<StoreFile>(); FileStatus files[] = getStoreFiles(); if (files == null || files.length == 0) { return results; } // initialize the thread pool for opening store files in parallel.. ThreadPoolExecutor storeFileOpenerThreadPool = this.region .getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" + this.family.getNameAsString()); CompletionService<StoreFile> completionService = new ExecutorCompletionService<StoreFile>( storeFileOpenerThreadPool); int totalValidStoreFile = 0; for (int i = 0; i < files.length; i++) { // Skip directories. if (files[i].isDir()) { continue; } final Path p = files[i].getPath(); // Check for empty hfile. Should never be the case but can happen // after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646 // NOTE: that the HFileLink is just a name, so it's an empty file. if (!HFileLink.isHFileLink(p) && this.fs.getFileStatus(p).getLen() <= 0) { LOG.warn("Skipping " + p + " because its empty. HBASE-646 DATA LOSS?"); continue; } // open each store file in parallel completionService.submit(new Callable<StoreFile>() { public StoreFile call() throws IOException { StoreFile storeFile = new StoreFile(fs, p, conf, cacheConf, family.getBloomFilterType(), dataBlockEncoder, isAssistant()); passSchemaMetricsTo(storeFile); storeFile.createReader(); return storeFile; } }); totalValidStoreFile++; } try { for (int i = 0; i < totalValidStoreFile; i++) { Future<StoreFile> future = completionService.take(); StoreFile storeFile = future.get(); long length = storeFile.getReader().length(); this.storeSize += length; this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes(); if (LOG.isDebugEnabled()) { LOG.debug("loaded " + storeFile.toStringDetailed()); } results.add(storeFile); } } catch (InterruptedException e) { throw new IOException(e); } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { storeFileOpenerThreadPool.shutdownNow(); } return results; }
From source file:com.github.NearestNeighbors.java
public Map<String, Map<String, Collection<Float>>> evaluate(final Collection<Watcher> test_instances) throws IOException, InterruptedException, ExecutionException { log.info("knn-evaluate: Loading watchers."); log.debug(String.format("knn-evaluate: Total unique test watchers: %d", test_instances.size())); final Map<String, Map<String, Collection<Float>>> results = new HashMap<String, Map<String, Collection<Float>>>(); final ExecutorService pool = Executors.newFixedThreadPool(THREAD_POOL_SIZE); // For each watcher in the test set . . . log.info("knn-evaluate: Starting evaluations"); int test_watcher_count = 0; for (final Watcher watcher : test_instances) { test_watcher_count++;/*from ww w . j ava 2 s .c o m*/ log.info(String.format("Processing watcher (%d/%d)", test_watcher_count, test_instances.size())); results.put(watcher.id, new HashMap<String, Collection<Float>>()); // See if we have any training instances for the watcher. If not, we really can't guess anything. final Watcher training_watcher = training_watchers.get(watcher.id); if (training_watcher == null) { continue; } /*********************************** *** Handling repository regions *** ***********************************/ // Calculate the distance between the repository regions we know the test watcher is in, to every other // region in the training data. final Set<NeighborRegion> test_regions = watchers_to_regions.get(watcher.id); /* final List<NeighborRegion> related_regions = find_regions_with_most_cutpoints(watcher, test_regions); for (final NeighborRegion related_region : related_regions) { storeDistance(results, watcher, related_region.most_popular, 0.0f); storeDistance(results, watcher, related_region.most_forked, 0.0f); } */ /* also_owned_counts = {} training_watcher.repositories.each do |repo_id| repo = @training_repositories[repo_id] also_owned_counts[repo.owner] ||= 0 also_owned_counts[repo.owner] += 1 end also_owned_counts.each do |owner, count| # If 5% or more of the test watcher's repositories are owned by the same person, look at the owner's other repositories. if (also_owned_repos.size.to_f / training_watcher.repositories.size) > 0.05 || (also_owned_repos.size.to_f / @owners_to_repositories[owner].size) > 0.3 repositories_to_check.merge(@owners_to_repositories[owner].collect {|r| r.id}) end end */ // Add in the most forked regions from similar watchers. /* final Set<NeighborRegion> related_regions = find_regions_containing_fellow_watchers(test_regions); for (final NeighborRegion region : related_regions) { repositories_to_check.add(region.most_forked); } */ /************************************* **** Begin distance calculations **** *************************************/ int test_region_count = 0; for (final NeighborRegion test_region : test_regions) { test_region_count++; final CompletionService<Map<Repository, Float>> cs = new ExecutorCompletionService<Map<Repository, Float>>( pool); int training_region_count = 0; final Set<Repository> repositories_to_check = new HashSet<Repository>(); // Add in the most forked repositories from each region we know the test watcher is in. for (final NeighborRegion region : test_regions) { repositories_to_check.add(region.most_forked); } for (final Repository repo : training_watcher.repositories) { if (repo.parent != null) { repositories_to_check.add(repo.parent); } } /******************************************************************** *** Handling repositories owned by owners we're already watching *** ********************************************************************/ if (training_watcher.owner_counts.get(test_region.most_forked.owner) != null && (((training_watcher.owner_counts.get(test_region.most_forked.owner).floatValue() / owners_to_repositories.get(test_region.most_forked.owner).size()) > 0.25) || (training_watcher.owner_distribution(test_region.most_forked.owner) > 0.25))) { for (final Repository also_owned : owners_to_repositories.get(test_region.most_forked.owner)) { { // Only add repos that are the most forked in their respective regions. if (also_owned.region.most_forked.equals(also_owned)) { repositories_to_check.add(also_owned); } } } } for (final Repository training_repository : repositories_to_check) { training_region_count++; if (log.isDebugEnabled()) { log.debug(String.format("Processing watcher (%d/%d) - (%d/%d):(%d/%d)", test_watcher_count, test_instances.size(), test_region_count, test_regions.size(), training_region_count, repositories_to_check.size())); } // Submit distance calculation task if the test watcher isn't already watching the repository. cs.submit(new Callable<Map<Repository, Float>>() { public Map<Repository, Float> call() throws Exception { final Map<Repository, Float> ret = new HashMap<Repository, Float>(); if (!training_repository.watchers.contains(training_watcher)) { float distance = euclidian_distance(training_watcher, test_region.most_forked, training_repository); ret.put(training_repository, Float.valueOf(distance)); } return ret; } }); } // Process the distance calculation results. for (int i = 0; i < repositories_to_check.size(); i++) { final Map<Repository, Float> distance = cs.take().get(); for (final Map.Entry<Repository, Float> pair : distance.entrySet()) { storeDistance(results, watcher, pair.getKey(), pair.getValue().floatValue()); } } } } /* =begin # Find a set of repositories from fellow watchers that happen to watch a lot of same repositories as the test watcher. repositories_to_check.merge find_repositories_containing_fellow_watchers(test_regions) # Add in the most popular and most forked regions we know the test watcher is in. related_regions = find_regions_containing_fellow_watchers(test_regions) related_regions.each do |region| repositories_to_check << region.most_popular.id repositories_to_check << region.most_forked.id end $LOG.info "Added regions from fellow watchers for watcher #{watcher.id} -- new size #{repositories_to_check.size} (+ #{repositories_to_check.size - old_size})" old_size = repositories_to_check.size $LOG.info "Added similarly owned for watcher #{watcher.id} -- new size #{repositories_to_check.size} (+ #{repositories_to_check.size - old_size})" old_size = repositories_to_check.size =end =begin end results */ return results; }
From source file:org.apache.hadoop.hbase.client.transactional.TransactionManager.java
/** * Prepare to commit a transaction./* w w w . jav a 2 s . c o m*/ * * @param transactionState * @return commitStatusCode (see {@link TransactionalRegionInterface}) * @throws IOException * @throws CommitUnsuccessfulException */ public int prepareCommit(final TransactionState transactionState) throws CommitUnsuccessfulException, IOException { if (LOG.isTraceEnabled()) LOG.trace("Enter prepareCommit, txid: " + transactionState.getTransactionId()); if (batchRegionServer && (TRANSACTION_ALGORITHM == AlgorithmType.MVCC)) { boolean allReadOnly = true; int loopCount = 0; if (transactionState.islocalTransaction()) { if (LOG.isTraceEnabled()) LOG.trace("TransactionManager.prepareCommit local transaction " + transactionState.getTransactionId()); } else if (LOG.isTraceEnabled()) LOG.trace("TransactionManager.prepareCommit global transaction " + transactionState.getTransactionId()); // (need one CompletionService per request for thread safety, can share pool of threads CompletionService<Integer> compPool = new ExecutorCompletionService<Integer>(threadPool); try { ServerName servername; List<TransactionRegionLocation> regionList; Map<ServerName, List<TransactionRegionLocation>> locations = new HashMap<ServerName, List<TransactionRegionLocation>>(); for (TransactionRegionLocation location : transactionState.getParticipatingRegions()) { servername = location.getServerName(); if (!locations.containsKey(servername)) { regionList = new ArrayList<TransactionRegionLocation>(); locations.put(servername, regionList); } else { regionList = locations.get(servername); } regionList.add(location); } for (final Map.Entry<ServerName, List<TransactionRegionLocation>> entry : locations.entrySet()) { loopCount++; compPool.submit(new TransactionManagerCallable(transactionState, entry.getValue().iterator().next(), connection) { public Integer call() throws CommitUnsuccessfulException, IOException { return doPrepareX(entry.getValue(), transactionState.getTransactionId()); } }); } } catch (Exception e) { throw new CommitUnsuccessfulException(e); } // loop to retrieve replies int commitError = 0; try { for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) { Integer canCommit = compPool.take().get(); switch (canCommit) { case TM_COMMIT_TRUE: allReadOnly = false; break; case TM_COMMIT_READ_ONLY: break; case TM_COMMIT_FALSE_CONFLICT: commitError = TransactionalReturn.COMMIT_CONFLICT; break; case TM_COMMIT_FALSE: // Commit conflict takes precedence if (commitError != TransactionalReturn.COMMIT_CONFLICT) commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; break; default: LOG.error("Unexpected value of canCommit in prepareCommit (during completion processing): " + canCommit); commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; ; } } loopCount = 0; if (transactionState.getRegionsRetryCount() > 0) { for (TransactionRegionLocation location : transactionState.getRetryRegions()) { loopCount++; compPool.submit(new TransactionManagerCallable(transactionState, location, connection) { public Integer call() throws CommitUnsuccessfulException, IOException { return doPrepareX(location.getRegionInfo().getRegionName(), transactionState.getTransactionId(), location); } }); } transactionState.clearRetryRegions(); } for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) { Integer canCommit = compPool.take().get(); switch (canCommit) { case TM_COMMIT_TRUE: allReadOnly = false; break; case TM_COMMIT_READ_ONLY: break; case TM_COMMIT_FALSE_CONFLICT: commitError = TransactionalReturn.COMMIT_CONFLICT; break; case TM_COMMIT_FALSE: // Commit conflict takes precedence if (commitError != TransactionalReturn.COMMIT_CONFLICT) commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; break; default: commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; ; } } } catch (Exception e) { throw new CommitUnsuccessfulException(e); } if (commitError != 0) return commitError; return allReadOnly ? TransactionalReturn.COMMIT_OK_READ_ONLY : TransactionalReturn.COMMIT_OK; } else { boolean allReadOnly = true; int loopCount = 0; ServerName servername; List<TransactionRegionLocation> regionList; Map<ServerName, List<TransactionRegionLocation>> locations = null; if (transactionState.islocalTransaction()) { //System.out.println("prepare islocal"); if (LOG.isTraceEnabled()) LOG.trace("TransactionManager.prepareCommit local transaction " + transactionState.getTransactionId()); } else if (LOG.isTraceEnabled()) LOG.trace("TransactionManager.prepareCommit global transaction " + transactionState.getTransactionId()); // (need one CompletionService per request for thread safety, can share pool of threads CompletionService<Integer> compPool = new ExecutorCompletionService<Integer>(threadPool); try { if (batchRSMetricsFlag) locations = new HashMap<ServerName, List<TransactionRegionLocation>>(); for (TransactionRegionLocation location : transactionState.getParticipatingRegions()) { if (batchRSMetricsFlag) { servername = location.getServerName(); if (!locations.containsKey(servername)) { regionList = new ArrayList<TransactionRegionLocation>(); locations.put(servername, regionList); } else { regionList = locations.get(servername); } regionList.add(location); } loopCount++; final TransactionRegionLocation myLocation = location; final byte[] regionName = location.getRegionInfo().getRegionName(); compPool.submit(new TransactionManagerCallable(transactionState, location, connection) { public Integer call() throws IOException, CommitUnsuccessfulException { return doPrepareX(regionName, transactionState.getTransactionId(), myLocation); } }); } if (batchRSMetricsFlag) { this.regions += transactionState.getParticipatingRegions().size(); this.regionServers += locations.size(); String rsToRegion = locations.size() + " RS / " + transactionState.getParticipatingRegions().size() + " Regions"; if (batchRSMetrics.get(rsToRegion) == null) { batchRSMetrics.put(rsToRegion, 1L); } else { batchRSMetrics.put(rsToRegion, batchRSMetrics.get(rsToRegion) + 1); } if (metricsCount >= 10000) { metricsCount = 0; if (LOG.isInfoEnabled()) LOG.info("---------------------- BatchRS metrics ----------------------"); if (LOG.isInfoEnabled()) LOG.info("Number of total Region calls: " + this.regions); if (LOG.isInfoEnabled()) LOG.info("Number of total RegionServer calls: " + this.regionServers); if (LOG.isInfoEnabled()) LOG.info("---------------- Total number of calls by ratio: ------------"); for (Map.Entry<String, Long> entry : batchRSMetrics.entrySet()) { if (LOG.isInfoEnabled()) LOG.info(entry.getKey() + ": " + entry.getValue()); } if (LOG.isInfoEnabled()) LOG.info("-------------------------------------------------------------"); } metricsCount++; } } catch (Exception e) { LOG.error("exception in prepareCommit (during submit to pool): " + e); throw new CommitUnsuccessfulException(e); } // loop to retrieve replies int commitError = 0; try { for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) { int canCommit = compPool.take().get(); switch (canCommit) { case TM_COMMIT_TRUE: allReadOnly = false; break; case TM_COMMIT_READ_ONLY: break; case TM_COMMIT_FALSE_CONFLICT: commitError = TransactionalReturn.COMMIT_CONFLICT; break; case TM_COMMIT_FALSE: // Commit conflict takes precedence if (commitError != TransactionalReturn.COMMIT_CONFLICT) commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; break; default: LOG.error("Unexpected value of canCommit in prepareCommit (during completion processing): " + canCommit); commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; ; } } } catch (Exception e) { LOG.error("exception in prepareCommit (during completion processing): " + e); throw new CommitUnsuccessfulException(e); } if (commitError != 0) return commitError; //Before replying prepare success, check for DDL transaction. //If prepare already has errors (commitError != 0), an abort is automatically //triggered by TM which would take care of ddl abort. //if prepare is success upto this point, DDL operation needs to check if any //drop table requests were recorded as part of phase 0. If any drop table //requests is recorded, then those tables need to disabled as part of prepare. if (transactionState.hasDDLTx()) { //if tables were created, then nothing else needs to be done. //if tables were recorded dropped, then they need to be disabled. //Disabled tables will ultimately be deleted in commit phase. ArrayList<String> createList = new ArrayList<String>(); //This list is ignored. ArrayList<String> dropList = new ArrayList<String>(); ArrayList<String> truncateList = new ArrayList<String>(); StringBuilder state = new StringBuilder(); try { tmDDL.getRow(transactionState.getTransactionId(), state, createList, dropList, truncateList); } catch (Exception e) { LOG.error("exception in doPrepare getRow: " + e); if (LOG.isTraceEnabled()) LOG.trace("exception in doPrepare getRow: txID: " + transactionState.getTransactionId()); state.append("INVALID"); //to avoid processing further down this path. commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; } //Return if error at this point. if (commitError != 0) return commitError; if (state.toString().equals("VALID") && dropList.size() > 0) { Iterator<String> di = dropList.iterator(); while (di.hasNext()) { try { //physical drop of table from hbase. disableTable(transactionState, di.next()); } catch (Exception e) { if (LOG.isTraceEnabled()) LOG.trace("exception in doPrepare disableTable: txID: " + transactionState.getTransactionId()); LOG.error("exception in doCommit, Step : DeleteTable: " + e); //Any error at this point should be considered prepareCommit as unsuccessful. //Retry logic can be added only if it is retryable error: TODO. commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; break; } } } } if (commitError != 0) return commitError; return allReadOnly ? TransactionalReturn.COMMIT_OK_READ_ONLY : TransactionalReturn.COMMIT_OK; } }
From source file:org.ugent.caagt.genestacker.search.bb.BranchAndBound.java
@Override public ParetoFrontier runSearch(long runtimeLimit, int numThreads) throws GenestackerException { // create list to store previously generated schemes previousSchemes = new ArrayList<>(); // create set to store previously generated scheme alternatives previousSchemeAlternatives = new HashSet<>(); // create queue for schemes to be considered schemeQueue = new LinkedList<>(); // reset ids/*ww w.java 2s .c om*/ SeedLotNode.resetIDs(); PlantNode.resetIDs(); CrossingNode.resetIDs(); CrossingSchemeAlternatives.resetIDs(); // create thread pool and completion service for scheme extension // inform user about number of cross workers used (verbose) logger.info(VERBOSE, "Number of threads used for extending partial schemes: {}", numThreads); ExecutorService extPool = Executors.newFixedThreadPool(numThreads); CompletionService<List<CrossingSchemeAlternatives>> extCompletionService = new ExecutorCompletionService<>( extPool); // initialize solution manager BranchAndBoundSolutionManager solutionManager = new BranchAndBoundSolutionManager(dominatesRelation, ideotype, popSizeTools, maxNumSeedsPerCrossing, constraints, heuristics, seedLotFilters, homozygousIdeotypeParents); // set initial Pareto frontier, if any if (initialFrontier != null) { solutionManager.setFrontier(initialFrontier); } // apply initial plant filter, if any if (initialPlantFilter != null) { // verbose logger.info(VERBOSE, "Filtering initial plants ..."); initialPlants = initialPlantFilter.filter(initialPlants); //verbose logger.info(VERBOSE, "Retained {} initial plants (see below)", initialPlants.size()); for (Plant p : initialPlants) { logger.info(VERBOSE, "\n{}", p); } } // create initial partial schemes from initial plants List<CrossingSchemeAlternatives> initialParentSchemes = new ArrayList<>(); for (Plant p : initialPlants) { // create uniform seed lot SeedLot sl = new SeedLot(p.getGenotype()); // create seedlot node SeedLotNode sln = new SeedLotNode(sl, 0); // create and attach plant node PlantNode pn = new PlantNode(p, 0, sln); // create partial crossing scheme CrossingScheme s = new CrossingScheme(popSizeTools, pn); initialParentSchemes.add(new CrossingSchemeAlternatives(s)); } registerNewSchemes(initialParentSchemes, solutionManager); // now iteratively cross schemes with previous schemes to create larger schemes, // until all solutions have been inspected or pruned while (!runtimeLimitExceeded() && !schemeQueue.isEmpty()) { // get next scheme from queue CrossingSchemeAlternatives cur = schemeQueue.poll(); // fire progression message (verbose) logger.info(VERBOSE, "num solutions: {} ### prog: {} ({}) ### cur scheme: {} - T = {}", solutionManager.getFrontier().getNumSchemes(), previousSchemes.size(), schemeQueue.size(), cur, TimeFormatting.formatTime(System.currentTimeMillis() - getStart())); // debug: create diagram of current scheme (all alternatives) if (logger.isDebugEnabled()) { for (int i = 0; i < cur.nrOfAlternatives(); i++) { logger.debug("Cur scheme (alternative {}): {}", i + 1, writeDiagram(cur.getAlternatives().get(i))); } // wait for enter DebugUtils.waitForEnter(); } // delete possible pruned alternatives Iterator<CrossingScheme> it = cur.iterator(); int numForCrossing = 0; int numForSelfing = 0; while (it.hasNext()) { CrossingScheme alt = it.next(); // check if alternative should be removed if (previousSchemeAlternatives.contains(alt)) { // equivalent scheme alternative generated before, delete current alternative it.remove(); } else if (solutionManager.pruneDequeueScheme(alt)) { // prune dequeued scheme (e.g. by the optimal subscheme heuristic) it.remove(); } else { // check pruning for crossing/selfing boolean pruneCross = solutionManager.pruneCrossCurrentScheme(alt); boolean pruneSelf = solutionManager.pruneSelfCurrentScheme(alt); if (pruneCross && pruneSelf) { // alternative not useful anymore it.remove(); } else { // count nr of alternatives useful for crossing or selfing if (!pruneCross) { numForCrossing++; } if (!pruneSelf) { numForSelfing++; } } } } if (cur.nrOfAlternatives() > 0) { // if useful, self current scheme if (numForSelfing > 0) { registerNewSchemes(selfScheme(cur, map, solutionManager), solutionManager); } // if useful, cross with previous schemes if (numForCrossing > 0) { // launch workers to combine with previous schemes Iterator<CrossingSchemeAlternatives> previousSchemesIterator = previousSchemes.iterator(); for (int w = 0; w < numThreads; w++) { // submit worker extCompletionService .submit(new CrossWorker(previousSchemesIterator, cur, solutionManager, map)); // very verbose logger.info(VERY_VERBOSE, "Launched cross worker {} of {}", w + 1, numThreads); } // handle results of completed workers in the order in which they complete for (int w = 0; w < numThreads; w++) { try { // wait for next worker to complete and register its solutions registerNewSchemes(extCompletionService.take().get(), solutionManager); // very verbose logger.info(VERY_VERBOSE, "{}/{} cross workers finished", w + 1, numThreads); } catch (InterruptedException | ExecutionException ex) { // something went wrong with the cross workers throw new SearchException("An error occured while extending the current scheme.", ex); } } } // put the scheme in the sorted set with previously considered schemes (only done if useful for later crossings) previousSchemes.add(cur); // register scheme alternatives previousSchemeAlternatives.addAll(cur.getAlternatives()); } } if (runtimeLimitExceeded()) { // info logger.info("Runtime limit exceeded"); } // shutdown thread pool extPool.shutdownNow(); return solutionManager.getFrontier(); }
From source file:org.apache.hadoop.hbase.regionserver.HRegion.java
private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status) throws IOException, UnsupportedEncodingException { // Load in all the HStores. long maxSeqId = -1; // initialized to -1 so that we pick up MemstoreTS from column families long maxMemstoreTS = -1; if (!htableDescriptor.getFamilies().isEmpty()) { // initialize the thread pool for opening stores in parallel. ThreadPoolExecutor storeOpenerThreadPool = getStoreOpenAndCloseThreadPool( "StoreOpener-" + this.getRegionInfo().getShortNameToLog()); CompletionService<HStore> completionService = new ExecutorCompletionService<HStore>( storeOpenerThreadPool);/*from w ww . j a v a 2s . c om*/ // initialize each store in parallel for (final HColumnDescriptor family : htableDescriptor.getFamilies()) { status.setStatus("Instantiating store for column family " + family); completionService.submit(new Callable<HStore>() { @Override public HStore call() throws IOException { return instantiateHStore(family); } }); } boolean allStoresOpened = false; try { for (int i = 0; i < htableDescriptor.getFamilies().size(); i++) { Future<HStore> future = completionService.take(); HStore store = future.get(); this.stores.put(store.getColumnFamilyName().getBytes(), store); long storeMaxSequenceId = store.getMaxSequenceId(); maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), storeMaxSequenceId); if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) { maxSeqId = storeMaxSequenceId; } long maxStoreMemstoreTS = store.getMaxMemstoreTS(); if (maxStoreMemstoreTS > maxMemstoreTS) { maxMemstoreTS = maxStoreMemstoreTS; } } allStoresOpened = true; } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { storeOpenerThreadPool.shutdownNow(); if (!allStoresOpened) { // something went wrong, close all opened stores LOG.error("Could not initialize all stores for the region=" + this); for (Store store : this.stores.values()) { try { store.close(); } catch (IOException e) { LOG.warn(e.getMessage()); } } } } } mvcc.initialize(maxMemstoreTS + 1); // Recover any edits if available. maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, status)); return maxSeqId; }