List of usage examples for java.util.concurrent ExecutorService shutdownNow
List<Runnable> shutdownNow();
From source file:learn.jersey.services.MultiThreadedClientExample.java
@Override public int run(String[] args) throws Exception { if (args.length < 1 || args.length > 2) { System.out.println("Usage: " + this.getClass().getName() + " tableName [num_operations]"); return -1; }//from w w w .j av a 2 s . c o m final TableName tableName = TableName.valueOf(args[0]); int numOperations = DEFAULT_NUM_OPERATIONS; // the second arg is the number of operations to send. if (args.length == 2) { numOperations = Integer.parseInt(args[1]); } // Threads for the client only. // // We don't want to mix hbase and business logic. // ExecutorService service = new ForkJoinPool(threads * 2); // Create two different connections showing how it's possible to // separate different types of requests onto different connections final Connection writeConnection = ConnectionFactory.createConnection(getConf(), service); final Connection readConnection = ConnectionFactory.createConnection(getConf(), service); // At this point the entire cache for the region locations is full. // Only do this if the number of regions in a table is easy to fit into // memory. // // If you are interacting with more than 25k regions on a client then // it's probably not good // to do this at all. warmUpConnectionCache(readConnection, tableName); warmUpConnectionCache(writeConnection, tableName); List<Future<Boolean>> futures = new ArrayList<>(numOperations); for (int i = 0; i < numOperations; i++) { double r = ThreadLocalRandom.current().nextDouble(); Future<Boolean> f; // For the sake of generating some synthetic load this queues // some different callables. // These callables are meant to represent real work done by your // application. if (r < .30) { f = internalPool.submit(new WriteExampleCallable(writeConnection, tableName)); } else if (r < .50) { f = internalPool.submit(new SingleWriteExampleCallable(writeConnection, tableName)); } else { f = internalPool.submit(new ReadExampleCallable(writeConnection, tableName)); } futures.add(f); } // Wait a long time for all the reads/writes to complete for (Future<Boolean> f : futures) { f.get(10, TimeUnit.MINUTES); } // Clean up after our selves for cleanliness internalPool.shutdownNow(); service.shutdownNow(); return 0; }
From source file:org.apache.metamodel.jdbc.JdbcDataContextTest.java
public void testReleaseConnectionsInCompiledQuery() throws Exception { final int connectionPoolSize = 2; final int threadCount = 4; final int noOfCallsPerThreads = 30; final BasicDataSource ds = new BasicDataSource(); ds.setDriverClassName("org.hsqldb.jdbcDriver"); ds.setUrl("jdbc:hsqldb:res:metamodel"); ds.setInitialSize(connectionPoolSize); ds.setMaxActive(connectionPoolSize); ds.setMaxWait(10000);/* ww w. ja v a2 s. c o m*/ ds.setMinEvictableIdleTimeMillis(1800000); ds.setMinIdle(0); ds.setMaxIdle(connectionPoolSize); ds.setNumTestsPerEvictionRun(3); ds.setTimeBetweenEvictionRunsMillis(-1); ds.setDefaultTransactionIsolation(java.sql.Connection.TRANSACTION_READ_COMMITTED); final JdbcDataContext dataContext = new JdbcDataContext(ds, new TableType[] { TableType.TABLE, TableType.VIEW }, null); final JdbcCompiledQuery compiledQuery = (JdbcCompiledQuery) dataContext.query().from("CUSTOMERS") .select("CUSTOMERNAME").where("CUSTOMERNUMBER").eq(new QueryParameter()).compile(); assertEquals(0, compiledQuery.getActiveLeases()); assertEquals(0, compiledQuery.getIdleLeases()); final String compliedQueryString = compiledQuery.toSql(); assertEquals( "SELECT _CUSTOMERS_._CUSTOMERNAME_ FROM PUBLIC._CUSTOMERS_ WHERE _CUSTOMERS_._CUSTOMERNUMBER_ = ?", compliedQueryString.replace('\"', '_')); assertEquals(0, compiledQuery.getActiveLeases()); assertEquals(0, compiledQuery.getIdleLeases()); ExecutorService executorService = Executors.newFixedThreadPool(threadCount); final CountDownLatch latch = new CountDownLatch(threadCount); final List<Throwable> errors = new ArrayList<Throwable>(); final Runnable runnable = new Runnable() { @Override public void run() { try { for (int i = 0; i < noOfCallsPerThreads; i++) { final DataSet dataSet = dataContext.executeQuery(compiledQuery, new Object[] { 103 }); try { assertTrue(dataSet.next()); Row row = dataSet.getRow(); assertNotNull(row); assertEquals("Atelier graphique", row.getValue(0).toString()); assertFalse(dataSet.next()); } finally { dataSet.close(); } } } catch (Throwable e) { errors.add(e); } finally { latch.countDown(); } } }; for (int i = 0; i < threadCount; i++) { executorService.execute(runnable); } try { latch.await(60000, TimeUnit.MILLISECONDS); if (errors.size() > 0) { throw new IllegalStateException(errors.get(0)); } assertTrue(true); } finally { executorService.shutdownNow(); } assertEquals(0, compiledQuery.getActiveLeases()); compiledQuery.close(); assertEquals(0, compiledQuery.getActiveLeases()); assertEquals(0, compiledQuery.getIdleLeases()); }
From source file:org.apache.phoenix.monitoring.PhoenixMetricsIT.java
@Test public void testGetConnectionsForSameUrlConcurrently() throws Exception { // establish url and quorum. Need to use PhoenixDriver and not PhoenixTestDriver String zkQuorum = "localhost:" + getUtility().getZkCluster().getClientPort(); String url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum; ExecutorService exec = Executors.newFixedThreadPool(10); try {//w w w . j a v a2 s.c o m GLOBAL_HCONNECTIONS_COUNTER.getMetric().reset(); GLOBAL_QUERY_SERVICES_COUNTER.getMetric().reset(); assertEquals(0, GLOBAL_HCONNECTIONS_COUNTER.getMetric().getValue()); assertEquals(0, GLOBAL_QUERY_SERVICES_COUNTER.getMetric().getValue()); List<Callable<Connection>> callables = new ArrayList<>(100); List<Future<Connection>> futures = new ArrayList<>(100); int expectedHConnections = numConnections.get() > 0 ? 0 : 1; for (int i = 1; i <= 100; i++) { Callable<Connection> c = new GetConnectionCallable(url); callables.add(c); futures.add(exec.submit(c)); } for (int i = 0; i < futures.size(); i++) { Connection c = futures.get(i).get(); try { c.close(); } catch (Exception ignore) { } } assertEquals(expectedHConnections, GLOBAL_HCONNECTIONS_COUNTER.getMetric().getValue()); assertEquals(expectedHConnections, GLOBAL_QUERY_SERVICES_COUNTER.getMetric().getValue()); } finally { exec.shutdownNow(); } }
From source file:org.apache.hadoop.hbase.client.ConnectionImplementation.java
private void shutdownBatchPool(ExecutorService pool) { pool.shutdown();/*from w w w . jav a2s . c o m*/ try { if (!pool.awaitTermination(10, TimeUnit.SECONDS)) { pool.shutdownNow(); } } catch (InterruptedException e) { pool.shutdownNow(); } }
From source file:io.pravega.client.stream.impl.ControllerImplTest.java
@Test public void testParallelGetCurrentSegments() throws Exception { final ExecutorService executorService = Executors.newFixedThreadPool(10); Semaphore createCount = new Semaphore(-19); AtomicBoolean success = new AtomicBoolean(true); for (int i = 0; i < 10; i++) { executorService.submit(() -> { for (int j = 0; j < 2; j++) { try { CompletableFuture<StreamSegments> streamSegments; streamSegments = controllerClient.getCurrentSegments("scope1", "streamparallel"); assertTrue(streamSegments.get().getSegments().size() == 2); assertEquals(new Segment("scope1", "streamparallel", 0), streamSegments.get().getSegmentForKey(0.2)); assertEquals(new Segment("scope1", "streamparallel", 1), streamSegments.get().getSegmentForKey(0.6)); createCount.release(); } catch (Exception e) { log.error("Exception when getting segments: {}", e); // Don't wait for other threads to complete. success.set(false);//from w ww. j a v a 2 s. c om createCount.release(20); } } }); } createCount.acquire(); executorService.shutdownNow(); assertTrue(success.get()); }
From source file:nl.privacybarometer.privacyvandaag.service.FetcherService.java
private int refreshFeeds(final long keepDateBorderTime) { ContentResolver cr = getContentResolver(); final Cursor cursor = cr.query(FeedColumns.CONTENT_URI, FeedColumns.PROJECTION_ID, null, null, null); int nbFeed = (cursor != null) ? cursor.getCount() : 0; ExecutorService executor = Executors.newFixedThreadPool(THREAD_NUMBER, new ThreadFactory() { @Override//from ww w . j a va 2s. c o m public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setPriority(Thread.MIN_PRIORITY); return t; } }); CompletionService<Integer> completionService = new ExecutorCompletionService<>(executor); while (cursor != null && cursor.moveToNext()) { final String feedId = cursor.getString(0); completionService.submit(new Callable<Integer>() { @Override public Integer call() { int result = 0; try { result = refreshFeed(feedId, keepDateBorderTime); } catch (Exception e) { Log.e(TAG, "Error refreshing feed " + e.getMessage()); } return result; } }); } if (cursor != null) cursor.close(); int globalResult = 0; for (int i = 0; i < nbFeed; i++) { try { Future<Integer> f = completionService.take(); // ModPrivacyVandaag: the count of new articles after a feed is refreshed globalResult += f.get(); } catch (Exception e) { Log.e(TAG, "Error counting new articles " + e.getMessage()); } } executor.shutdownNow(); // To purge all threads return globalResult; // ModPrivacyVandaag: As far as I can see: this contains the number of new articles from a refresh of the feeds. }
From source file:io.druid.segment.realtime.firehose.EventReceiverFirehoseTest.java
@Test public void testMultipleThreads() throws InterruptedException, IOException, TimeoutException, ExecutionException { EasyMock.expect(req.getContentType()).andReturn("application/json").times(2 * NUM_EVENTS); EasyMock.replay(req);/* ww w .ja v a 2 s . c o m*/ final ExecutorService executorService = Execs.singleThreaded("single_thread"); final Future future = executorService.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { for (int i = 0; i < NUM_EVENTS; ++i) { final InputStream inputStream = IOUtils.toInputStream(inputRow); firehose.addAll(inputStream, req); inputStream.close(); } return true; } }); for (int i = 0; i < NUM_EVENTS; ++i) { final InputStream inputStream = IOUtils.toInputStream(inputRow); firehose.addAll(inputStream, req); inputStream.close(); } future.get(10, TimeUnit.SECONDS); EasyMock.verify(req); final Iterable<Map.Entry<String, EventReceiverFirehoseMetric>> metrics = register.getMetrics(); Assert.assertEquals(1, Iterables.size(metrics)); final Map.Entry<String, EventReceiverFirehoseMetric> entry = Iterables.getLast(metrics); Assert.assertEquals(SERVICE_NAME, entry.getKey()); Assert.assertEquals(CAPACITY, entry.getValue().getCapacity()); Assert.assertEquals(CAPACITY, firehose.getCapacity()); Assert.assertEquals(2 * NUM_EVENTS, entry.getValue().getCurrentBufferSize()); Assert.assertEquals(2 * NUM_EVENTS, firehose.getCurrentBufferSize()); for (int i = 2 * NUM_EVENTS - 1; i >= 0; --i) { Assert.assertTrue(firehose.hasMore()); Assert.assertNotNull(firehose.nextRow()); Assert.assertEquals(i, firehose.getCurrentBufferSize()); } Assert.assertEquals(CAPACITY, entry.getValue().getCapacity()); Assert.assertEquals(CAPACITY, firehose.getCapacity()); Assert.assertEquals(0, entry.getValue().getCurrentBufferSize()); Assert.assertEquals(0, firehose.getCurrentBufferSize()); firehose.close(); Assert.assertFalse(firehose.hasMore()); Assert.assertEquals(0, Iterables.size(register.getMetrics())); executorService.shutdownNow(); }
From source file:com.linkedin.pinot.tools.perf.QueryRunner.java
/** * Use multiple threads to run query at a target QPS. * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send * them./*from w w w .j a v a 2s .c o m*/ * <p>The main thread is responsible for collecting and logging the statistic information periodically. * <p>Queries are picked sequentially from the query file. * <p>Query runner will stop when all queries in the query file has been executed number of times configured. * * @param conf perf benchmark driver config. * @param queryFile query file. * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times. * @param numThreads number of threads sending queries. * @param startQPS start QPS (target QPS). * @param reportIntervalMs report interval in milliseconds. * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear * them, 0 means never. * @throws Exception */ public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, double startQPS, int reportIntervalMs, int numIntervalsToReportAndClearStatistics) throws Exception { List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>(); AtomicInteger numQueriesExecuted = new AtomicInteger(0); AtomicLong totalBrokerTime = new AtomicLong(0L); AtomicLong totalClientTime = new AtomicLong(0L); List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS)); ExecutorService executorService = Executors.newFixedThreadPool(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList)); } executorService.shutdown(); int queryIntervalMs = (int) (MILLIS_PER_SECOND / startQPS); long startTime = System.currentTimeMillis(); long reportStartTime = startTime; int numReportIntervals = 0; int numTimesExecuted = 0; while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) { if (executorService.isTerminated()) { LOGGER.error("All threads got exception and already dead."); return; } for (String query : queries) { queryQueue.add(query); Thread.sleep(queryIntervalMs); long currentTime = System.currentTimeMillis(); if (currentTime - reportStartTime >= reportIntervalMs) { long timePassed = currentTime - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", startQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size()); reportStartTime = currentTime; numReportIntervals++; if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals == numIntervalsToReportAndClearStatistics)) { numReportIntervals = 0; startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); } } } numTimesExecuted++; } // Wait for all queries getting executed. while (queryQueue.size() != 0) { Thread.sleep(1); } executorService.shutdownNow(); while (!executorService.isTerminated()) { Thread.sleep(1); } long timePassed = System.currentTimeMillis() - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info("--------------------------------------------------------------------------------"); LOGGER.info("FINAL REPORT:"); LOGGER.info( "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms.", startQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); for (Statistics statistics : statisticsList) { statistics.report(); } }
From source file:com.mirth.connect.server.controllers.DonkeyEngineController.java
protected void shutdownExecutor(String channelId) { ExecutorService engineExecutor = engineExecutors.get(channelId); if (engineExecutor != null) { List<Runnable> tasks = engineExecutor.shutdownNow(); // Cancel any tasks that had not yet started. Otherwise those tasks would be blocked at future.get() indefinitely. for (Runnable task : tasks) { ((Future<?>) task).cancel(true); }/*w ww.j av a2s . c o m*/ } }
From source file:org.apache.phoenix.monitoring.PhoenixMetricsIT.java
@Test public void testGetConnectionsForDifferentTenantsConcurrently() throws Exception { // establish url and quorum. Need to use PhoenixDriver and not PhoenixTestDriver String zkQuorum = "localhost:" + getUtility().getZkCluster().getClientPort(); String url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum; ExecutorService exec = Executors.newFixedThreadPool(10); try {// ww w.jav a2 s. com GLOBAL_HCONNECTIONS_COUNTER.getMetric().reset(); GLOBAL_QUERY_SERVICES_COUNTER.getMetric().reset(); assertEquals(0, GLOBAL_HCONNECTIONS_COUNTER.getMetric().getValue()); assertEquals(0, GLOBAL_QUERY_SERVICES_COUNTER.getMetric().getValue()); int expectedHConnections = numConnections.get() > 0 ? 0 : 1; List<Callable<Connection>> callables = new ArrayList<>(100); List<Future<Connection>> futures = new ArrayList<>(100); for (int i = 1; i <= 100; i++) { String tenantUrl = url + ';' + TENANT_ID_ATTRIB + '=' + i; Callable<Connection> c = new GetConnectionCallable(tenantUrl + ";"); callables.add(c); futures.add(exec.submit(c)); } for (int i = 0; i < futures.size(); i++) { Connection c = futures.get(i).get(); try { c.close(); } catch (Exception ignore) { } } assertEquals(expectedHConnections, GLOBAL_HCONNECTIONS_COUNTER.getMetric().getValue()); assertEquals(expectedHConnections, GLOBAL_QUERY_SERVICES_COUNTER.getMetric().getValue()); } finally { exec.shutdownNow(); } }