List of usage examples for java.util.concurrent ExecutorService execute
void execute(Runnable command);
From source file:com.aerospike.benchmarks.Main.java
private void doAsyncRWTest(AsyncClient client) throws Exception { ExecutorService es = Executors.newFixedThreadPool(this.nThreads); RWTask[] tasks = new RWTask[this.nThreads]; for (int i = 0; i < this.nThreads; i++) { RWTask rt;//from ww w .j a va 2 s .c om if (args.validate) { int tstart = this.startKey + ((int) (this.nKeys * (((float) i) / this.nThreads))); int tkeys = (int) (this.nKeys * (((float) (i + 1)) / this.nThreads)) - (int) (this.nKeys * (((float) i) / this.nThreads)); rt = new RWTaskAsync(client, args, counters, tstart, tkeys); } else { rt = new RWTaskAsync(client, args, counters, this.startKey, this.nKeys); } tasks[i] = rt; es.execute(rt); } collectRWStats(tasks, client); es.shutdown(); }
From source file:org.dllearner.scripts.evaluation.EnrichmentEvaluationMultithreaded.java
private void evaluateObjectProperties(final SparqlEndpointKS ks) throws IllegalArgumentException, SecurityException, InstantiationException, IllegalAccessException, InvocationTargetException, NoSuchMethodException, ComponentInitException, InterruptedException { Set<ObjectProperty> properties = new SPARQLTasks(ks.getEndpoint()).getAllObjectProperties(); logger.info("Evaluating " + properties.size() + " object properties..."); for (final Class<? extends AxiomLearningAlgorithm> algorithmClass : objectPropertyAlgorithms) { Thread.sleep(5000);/*www .j a v a 2 s.com*/ Set<OWLAxiom> axioms = new HashSet<OWLAxiom>(); algorithm2Ontology.put(algorithmClass, axioms); int propCnt = 0; ExecutorService threadPool = Executors.newFixedThreadPool(maxNrOfThreads); for (final ObjectProperty property : properties) { threadPool.execute(new Runnable() { @Override public void run() { String algName = ""; try { AxiomLearningAlgorithm learner = algorithmClass.getConstructor(SparqlEndpointKS.class) .newInstance(ks); ((AbstractAxiomLearningAlgorithm) learner).setReasoner(sparqlReasoner); ((AbstractAxiomLearningAlgorithm) learner).addFilterNamespace(NAMESPACE); ConfigHelper.configure(learner, "propertyToDescribe", property.toString()); ConfigHelper.configure(learner, "maxExecutionTimeInSeconds", maxExecutionTimeInSeconds); learner.init(); algName = AnnComponentManager.getName(learner); boolean emptyEntity = sparqlReasoner.getPopularity(property) == 0; if (emptyEntity) { logger.warn("Empty entity: " + property); } if (emptyEntity) { writeToDB(property.toManchesterSyntaxString(baseURI, prefixes), algName, "EMPTY_ENTITY", 0, 0, false); } else { applyLearningAlgorithm(learner, property); } } catch (Exception e) { logger.error("Error occured for object property " + property.getName() + " with algorithm " + algName, e); } } }); propCnt++; if (maxObjectProperties != 0 && propCnt == maxObjectProperties) { break; } } threadPool.shutdown(); while (!threadPool.isTerminated()) { } } }
From source file:org.dllearner.scripts.evaluation.EnrichmentEvaluationMultithreaded.java
private void evaluateDataProperties(final SparqlEndpointKS ks) throws IllegalArgumentException, SecurityException, InstantiationException, IllegalAccessException, InvocationTargetException, NoSuchMethodException, ComponentInitException, InterruptedException { Set<DatatypeProperty> properties = new SPARQLTasks(ks.getEndpoint()).getAllDataProperties(); logger.info("Evaluating " + properties.size() + " data properties..."); for (final Class<? extends AxiomLearningAlgorithm> algorithmClass : dataPropertyAlgorithms) { Thread.sleep(5000);/*from w w w . jav a 2 s.com*/ int propCnt = 0; Set<OWLAxiom> axioms = new HashSet<OWLAxiom>(); algorithm2Ontology.put(algorithmClass, axioms); ExecutorService threadPool = Executors.newFixedThreadPool(maxNrOfThreads); for (final DatatypeProperty property : properties) { threadPool.execute(new Runnable() { @Override public void run() { String algName = ""; try { AxiomLearningAlgorithm learner = algorithmClass.getConstructor(SparqlEndpointKS.class) .newInstance(ks); ((AbstractAxiomLearningAlgorithm) learner).setReasoner(sparqlReasoner); ((AbstractAxiomLearningAlgorithm) learner).addFilterNamespace(NAMESPACE); ConfigHelper.configure(learner, "propertyToDescribe", property.toString()); ConfigHelper.configure(learner, "maxExecutionTimeInSeconds", maxExecutionTimeInSeconds); learner.init(); algName = AnnComponentManager.getName(learner); boolean emptyEntity = sparqlReasoner.getPopularity(property) == 0; if (emptyEntity) { logger.warn("Empty entity: " + property); } if (emptyEntity) { writeToDB(property.toManchesterSyntaxString(baseURI, prefixes), algName, "EMPTY_ENTITY", 0, 0, false); } else { applyLearningAlgorithm(learner, property); } } catch (Exception e) { logger.error("Error occured for data property " + property.getName() + " with algorithm " + algName, e); } } }); propCnt++; if (maxDataProperties != 0 && propCnt == maxDataProperties) { break; } } threadPool.shutdown(); while (!threadPool.isTerminated()) { } } }
From source file:org.openrdf.http.server.ProtocolTest.java
/** * Test for SES-1861/*from ww w.j a va 2 s.c om*/ * * @throws Exception */ @Test public void testConcurrentNamespaceUpdates() throws Exception { int limitCount = 1000; int limitPrefix = 50; Random prng = new Random(); // String repositoryLocation = // Protocol.getRepositoryLocation("http://localhost:8080/openrdf-sesame", // "Test-NativeStore"); String repositoryLocation = TestServer.REPOSITORY_URL; ExecutorService threadPool = Executors.newFixedThreadPool(20); for (int count = 0; count < limitCount; count++) { final int number = count; final int i = prng.nextInt(limitPrefix); final String prefix = "prefix" + i; final String ns = "http://example.org/namespace" + i; final String location = Protocol.getNamespacePrefixLocation(repositoryLocation, prefix); Runnable runner = new Runnable() { public void run() { try { if (number % 2 == 0) { putNamespace(location, ns); } else { deleteNamespace(location); } } catch (Exception e) { e.printStackTrace(); fail("Failed in test: " + number); } } }; threadPool.execute(runner); } threadPool.shutdown(); threadPool.awaitTermination(30000, TimeUnit.MILLISECONDS); threadPool.shutdownNow(); }
From source file:MSUmpire.LCMSPeakStructure.LCMSPeakDIAMS2.java
private void PrepareMGF_UnfragmentIon() throws IOException { String mgffile4 = FilenameUtils.getFullPath(ParentmzXMLName) + GetQ3Name() + ".mgf.temp"; // FileWriter mgfWriter4 = new FileWriter(mgffile4, true); final BufferedWriter mgfWriter4 = DIAPack.get_file(DIAPack.OutputFile.Mgf_Q3, mgffile4); // FileWriter mapwriter3 = new FileWriter(FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q3", true); final BufferedWriter mapwriter3 = DIAPack.get_file(DIAPack.OutputFile.ScanClusterMapping_Q3, FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q3"); ArrayList<PseudoMSMSProcessing> ScanList = new ArrayList<>(); ExecutorService executorPool = Executors.newFixedThreadPool(NoCPUs); for (PeakCluster ms2cluster : PeakClusters) { ArrayList<PrecursorFragmentPairEdge> frags = UnFragIonClu2Cur.get(ms2cluster.Index); if (frags != null && DIA_MZ_Range.getX() <= ms2cluster.TargetMz() && DIA_MZ_Range.getY() >= ms2cluster.TargetMz()) { // if (DIA_MZ_Range.getX() <= ms2cluster.TargetMz() && DIA_MZ_Range.getY() >= ms2cluster.TargetMz() && UnFragIonClu2Cur.containsKey(ms2cluster.Index)) { // ArrayList<PrecursorFragmentPairEdge> frags = UnFragIonClu2Cur.get(ms2cluster.Index); ms2cluster.GroupedFragmentPeaks.addAll(frags); PseudoMSMSProcessing mSMSProcessing = new PseudoMSMSProcessing(ms2cluster, parameter); executorPool.execute(mSMSProcessing); ScanList.add(mSMSProcessing); }//from www . j av a 2s . c om } executorPool.shutdown(); try { executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { Logger.getRootLogger().info("interrupted.."); } for (PseudoMSMSProcessing mSMSProcessing : ScanList) { if (MatchedFragmentMap.size() > 0) { mSMSProcessing.RemoveMatchedFrag(MatchedFragmentMap); } XYPointCollection Scan = mSMSProcessing.GetScan(); if (Scan != null && Scan.PointCount() > parameter.MinFrag) { parentDIA.Q3Scan++; // StringBuilder mgfString = new StringBuilder(); // mgfString.append("BEGIN IONS\n"); // mgfString.append("PEPMASS=" + mSMSProcessing.Precursorcluster.TargetMz() + "\n"); // mgfString.append("CHARGE=" + mSMSProcessing.Precursorcluster.Charge + "+\n"); // mgfString.append("RTINSECONDS=" + mSMSProcessing.Precursorcluster.PeakHeightRT[0] * 60f + "\n"); // mgfString.append("TITLE=").append(GetQ3Name()).append(".").append(parentDIA.Q3Scan).append(".").append(parentDIA.Q3Scan).append(".").append(mSMSProcessing.Precursorcluster.Charge).append("\n"); // //mgfString.append("TITLE=" + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n"); // //mgfString.append("TITLE=" GetQ3Name() + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n"); // // for (int i = 0; i < Scan.PointCount(); i++) { // mgfString.append(Scan.Data.get(i).getX()).append(" ").append(Scan.Data.get(i).getY()).append("\n"); // } // mgfString.append("END IONS\n\n"); // mgfWriter4.write(mgfString.toString()); mgfWriter4.append("BEGIN IONS\n") .append("PEPMASS=" + mSMSProcessing.Precursorcluster.TargetMz() + "\n") .append("CHARGE=" + mSMSProcessing.Precursorcluster.Charge + "+\n") .append("RTINSECONDS=" + mSMSProcessing.Precursorcluster.PeakHeightRT[0] * 60f + "\n") .append("TITLE=").append(GetQ3Name()).append(".").append(Integer.toString(parentDIA.Q3Scan)) .append(".").append(Integer.toString(parentDIA.Q3Scan)).append(".") .append(Integer.toString(mSMSProcessing.Precursorcluster.Charge)).append("\n"); //mgfWriter4.append("TITLE=" + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n"); //mgfWriter4.append("TITLE=" GetQ3Name() + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n"); for (int i = 0; i < Scan.PointCount(); i++) { mgfWriter4.append(Float.toString(Scan.Data.get(i).getX())).append(" ") .append(Float.toString(Scan.Data.get(i).getY())).append("\n"); } mgfWriter4.append("END IONS\n\n"); mapwriter3.write( parentDIA.Q3Scan + ";" + WindowID + ";" + mSMSProcessing.Precursorcluster.Index + "\n"); } mSMSProcessing.Precursorcluster.GroupedFragmentPeaks.clear(); } // mgfWriter4.close(); // mapwriter3.close(); }
From source file:com.wso2telco.Endpoints.java
/** * Mepin confirm./*from w w w.ja v a 2s . c o m*/ * * @param identifier the identifier * @param transactionId the transaction id * @param allow the allow * @param transactionStatus the transaction status * @return the response * @throws SQLException the SQL exception */ @POST @Path("/mepin/response") @Consumes("application/x-www-form-urlencoded") public Response mepinConfirm(@FormParam("identifier") String identifier, @FormParam("transaction_id") String transactionId, @FormParam("allow") String allow, @FormParam("transaction_status") String transactionStatus) throws SQLException { if (log.isDebugEnabled()) { log.debug("MePIN transactionID : " + transactionId); log.debug("MePIN identifier : " + identifier); log.debug("MePIN transactionStatus : " + transactionStatus); } MePinStatusRequest mePinStatus = new MePinStatusRequest(transactionId); FutureTask<String> futureTask = new FutureTask<String>(mePinStatus); ExecutorService executor = Executors.newFixedThreadPool(1); executor.execute(futureTask); return Response.status(200).build(); }
From source file:edu.cmu.tetrad.data.DataUtils.java
public static ICovarianceMatrix covarianceNonparanormalDrton(DataSet dataSet) { final CovarianceMatrix covMatrix = new CovarianceMatrix(dataSet); final TetradMatrix data = dataSet.getDoubleData(); final int NTHREDS = Runtime.getRuntime().availableProcessors() * 10; final int EPOCH_COUNT = 100000; ExecutorService executor = Executors.newFixedThreadPool(NTHREDS); int runnableCount = 0; for (int _i = 0; _i < dataSet.getNumColumns(); _i++) { for (int _j = _i; _j < dataSet.getNumColumns(); _j++) { final int i = _i; final int j = _j; // double tau = StatUtils.rankCorrelation(data.viewColumn(i).toArray(), data.viewColumn(j).toArray()); Runnable worker = new Runnable() { @Override//from w w w . ja va 2s .c om public void run() { double tau = StatUtils.kendallsTau(data.getColumn(i).toArray(), data.getColumn(j).toArray()); covMatrix.setValue(i, j, tau); covMatrix.setValue(j, i, tau); } }; executor.execute(worker); if (runnableCount < EPOCH_COUNT) { runnableCount++; // System.out.println(runnableCount); } else { executor.shutdown(); try { // Wait until all threads are finish executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); System.out.println("Finished all threads"); } catch (InterruptedException e) { e.printStackTrace(); } executor = Executors.newFixedThreadPool(NTHREDS); runnableCount = 0; } } } executor.shutdown(); try { // Wait until all threads are finish executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); System.out.println("Finished all threads"); } catch (InterruptedException e) { e.printStackTrace(); } return covMatrix; }
From source file:com.comcast.cdn.traffic_control.traffic_router.core.dns.ZoneManager.java
@SuppressWarnings("PMD.CyclomaticComplexity") private static List<Record> createZone(final String domain, final Map<String, List<Record>> zoneMap, final Map<String, DeliveryService> dsMap, final TrafficRouter tr, final LoadingCache<ZoneKey, Zone> zc, final LoadingCache<ZoneKey, Zone> dzc, final ExecutorService initExecutor, final String hostname) throws IOException { final DeliveryService ds = dsMap.get(domain); final CacheRegister data = tr.getCacheRegister(); final JSONObject trafficRouters = data.getTrafficRouters(); final JSONObject config = data.getConfig(); JSONObject ttl = null;/*from w w w . j a v a 2 s .c o m*/ JSONObject soa = null; if (ds != null) { ttl = ds.getTtls(); soa = ds.getSoa(); } else { ttl = config.optJSONObject("ttls"); soa = config.optJSONObject("soa"); } final Name name = newName(domain); final List<Record> list = zoneMap.get(domain); final Name admin = newName(ZoneUtils.getAdminString(soa, "admin", "traffic_ops", domain)); list.add(new SOARecord(name, DClass.IN, ZoneUtils.getLong(ttl, "SOA", 86400), getGlueName(ds, trafficRouters.optJSONObject(hostname), name, hostname), admin, ZoneUtils.getLong(soa, "serial", ZoneUtils.getSerial(data.getStats())), ZoneUtils.getLong(soa, "refresh", 28800), ZoneUtils.getLong(soa, "retry", 7200), ZoneUtils.getLong(soa, "expire", 604800), ZoneUtils.getLong(soa, "minimum", 60))); addTrafficRouters(list, trafficRouters, name, ttl, domain, ds); addStaticDnsEntries(list, ds, domain); final List<Record> records = new ArrayList<Record>(); try { final long maxTTL = ZoneUtils.getMaximumTTL(list); records.addAll(signatureManager.generateDSRecords(name, maxTTL)); list.addAll(signatureManager.generateDNSKEYRecords(name, maxTTL)); initExecutor.execute(new Runnable() { @Override public void run() { try { final Zone zone = zc.get(signatureManager.generateZoneKey(name, list)); // cause the zone to be loaded into the new cache final boolean primeDynCache = config.optBoolean("dynamic.cache.primer.enabled", true); final int primerLimit = config.optInt("dynamic.cache.primer.limit", DEFAULT_PRIMER_LIMIT); // prime the dynamic zone cache if (primeDynCache && ds != null && ds.isDns()) { final DNSRequest request = new DNSRequest(); final Name edgeName = newName(getDnsRoutingName(), domain); request.setHostname(edgeName.toString(true)); // Name.toString(true) - omit the trailing dot for (final CacheLocation cacheLocation : data.getCacheLocations()) { final List<Cache> caches = tr.selectCachesByCZ(ds, cacheLocation); if (caches == null) { continue; } // calculate number of permutations if maxDnsIpsForLocation > 0 and we're not using consistent DNS routing int p = 1; if (ds.getMaxDnsIps() > 0 && !tr.isConsistentDNSRouting() && caches.size() > ds.getMaxDnsIps()) { for (int c = caches.size(); c > (caches.size() - ds.getMaxDnsIps()); c--) { p *= c; } } final Set<List<InetRecord>> pset = new HashSet<List<InetRecord>>(); for (int i = 0; i < primerLimit; i++) { final List<InetRecord> records = tr.inetRecordsFromCaches(ds, caches, request); if (!pset.contains(records)) { fillDynamicZone(dzc, zone, edgeName, records, signatureManager.isDnssecEnabled()); pset.add(records); LOGGER.debug("Primed " + ds.getId() + " @ " + cacheLocation.getId() + "; permutation " + pset.size() + "/" + p); } if (pset.size() == p) { break; } } } } } catch (ExecutionException ex) { LOGGER.fatal("Unable to load zone into cache: " + ex.getMessage(), ex); } catch (TextParseException ex) { // only occurs due to newName above LOGGER.fatal("Unable to prime dynamic zone " + domain, ex); } } }); } catch (NoSuchAlgorithmException ex) { LOGGER.fatal("Unable to create zone: " + ex.getMessage(), ex); } return records; }
From source file:org.eclipse.rdf4j.http.server.ProtocolTest.java
/** * Test for SES-1861//from w ww . j av a 2 s. c om * * @throws Exception */ @Test public void testConcurrentNamespaceUpdates() throws Exception { int limitCount = 1000; int limitPrefix = 50; Random prng = new Random(); // String repositoryLocation = // Protocol.getRepositoryLocation("http://localhost:8080/openrdf-sesame", // "Test-NativeStore"); String repositoryLocation = TestServer.REPOSITORY_URL; ExecutorService threadPool = Executors.newFixedThreadPool(20, new ThreadFactoryBuilder().setNameFormat("rdf4j-protocoltest-%d").build()); for (int count = 0; count < limitCount; count++) { final int number = count; final int i = prng.nextInt(limitPrefix); final String prefix = "prefix" + i; final String ns = "http://example.org/namespace" + i; final String location = Protocol.getNamespacePrefixLocation(repositoryLocation, prefix); Runnable runner = new Runnable() { public void run() { try { if (number % 2 == 0) { putNamespace(location, ns); } else { deleteNamespace(location); } } catch (Exception e) { e.printStackTrace(); fail("Failed in test: " + number); } } }; threadPool.execute(runner); } threadPool.shutdown(); threadPool.awaitTermination(30000, TimeUnit.MILLISECONDS); threadPool.shutdownNow(); }
From source file:org.sakaiproject.contentreview.turnitin.oc.ContentReviewServiceTurnitinOC.java
public void processQueue() { log.info("Processing Turnitin OC submission queue"); // Create new session object to ensure permissions are carried correctly to each new thread final Session session = sessionManager.getCurrentSession(); ExecutorService executor = Executors.newFixedThreadPool(2); executor.execute(new Runnable() { @Override//from w w w . j ava2 s . co m public void run() { sessionManager.setCurrentSession(session); processUnsubmitted(); } }); executor.execute(new Runnable() { @Override public void run() { sessionManager.setCurrentSession(session); checkForReport(); } }); executor.shutdown(); // wait: try { if (!executor.awaitTermination(30, TimeUnit.MINUTES)) { log.error("ContentReviewServiceTurnitinOC.processQueue: time out waiting for executor to complete"); } } catch (InterruptedException e) { log.error(e.getMessage(), e); } }