List of usage examples for java.util.concurrent ExecutorService execute
void execute(Runnable command);
From source file:org.apache.activemq.broker.jmx.ConcurrentMoveTest.java
public void testConcurrentMove() throws Exception { // Send some messages connection = connectionFactory.createConnection(); connection.start();// ww w .java 2s. c o m Session session = connection.createSession(transacted, authMode); destination = createDestination(); MessageProducer producer = session.createProducer(destination); for (int i = 0; i < messageCount; i++) { Message message = session.createTextMessage("Message: " + i); producer.send(message); } long usageBeforMove = broker.getPersistenceAdapter().size(); LOG.info("Store usage:" + usageBeforMove); // Now get the QueueViewMBean and purge String objectNameStr = broker.getBrokerObjectName().toString(); objectNameStr += ",destinationType=Queue,destinationName=" + getDestinationString(); ObjectName queueViewMBeanName = assertRegisteredObjectName(objectNameStr); final QueueViewMBean proxy = (QueueViewMBean) MBeanServerInvocationHandler.newProxyInstance(mbeanServer, queueViewMBeanName, QueueViewMBean.class, true); final ActiveMQQueue to = new ActiveMQQueue("TO"); ((RegionBroker) broker.getRegionBroker()).addDestination(broker.getAdminConnectionContext(), to, false); ExecutorService executorService = Executors.newCachedThreadPool(); for (int i = 0; i < 50; i++) { executorService.execute(new Runnable() { @Override public void run() { try { proxy.moveMatchingMessagesTo(null, to.getPhysicalName()); } catch (Exception e) { e.printStackTrace(); } } }); } executorService.shutdown(); executorService.awaitTermination(5, TimeUnit.MINUTES); long count = proxy.getQueueSize(); assertEquals("Queue size", count, 0); assertEquals("Browse size", proxy.browseMessages().size(), 0); objectNameStr = broker.getBrokerObjectName().toString(); objectNameStr += ",destinationType=Queue,destinationName=" + to.getQueueName(); queueViewMBeanName = assertRegisteredObjectName(objectNameStr); QueueViewMBean toProxy = (QueueViewMBean) MBeanServerInvocationHandler.newProxyInstance(mbeanServer, queueViewMBeanName, QueueViewMBean.class, true); count = toProxy.getQueueSize(); assertEquals("Queue size", count, messageCount); long usageAfterMove = broker.getPersistenceAdapter().size(); LOG.info("Store usage, before: " + usageBeforMove + ", after:" + usageAfterMove); LOG.info("Store size increase:" + FileUtils.byteCountToDisplaySize(usageAfterMove - usageBeforMove)); assertTrue("Usage not more than doubled", usageAfterMove < (usageBeforMove * 3)); producer.close(); }
From source file:com.stimulus.archiva.incoming.IAPService.java
public void testConnection(MailboxConnection connection, IAPRunnable.IAPTestCallback testCallback) { Config config = Config.getConfig();/*from w ww . ja v a2 s . c o m*/ ExecutorService executor = Executors.newSingleThreadExecutor(); IAPRunnable worker = new IAPRunnable("iap test", testCallback, connection, config.getMailboxConnections().getPollingIntervalSecs(), null); executor.execute(worker); }
From source file:org.apache.zeppelin.sap.UniverseInterpreter.java
private UniverseCompleter createOrUpdateUniverseCompleter(InterpreterContext interpreterContext, final String buf, final int cursor) throws UniverseException { final UniverseCompleter completer; if (universeCompleter == null) { completer = new UniverseCompleter(3600); } else {//w w w . j ava2 s . co m completer = universeCompleter; } try { final String token = client.getToken(interpreterContext.getParagraphId()); ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(new Runnable() { @Override public void run() { completer.createOrUpdate(client, token, buf, cursor); } }); executorService.shutdown(); executorService.awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException e) { logger.warn("Completion timeout", e); } finally { try { client.closeSession(interpreterContext.getParagraphId()); } catch (Exception e) { logger.error("Error close SAP session", e); } } return completer; }
From source file:org.olegz.uuid.TimeBasedUUIDGeneratorTests.java
@Test public void performanceTestAsynch() throws Exception { ExecutorService executor = Executors.newFixedThreadPool(100); StopWatch stopWatch = new StopWatch(); stopWatch.start();//from w ww .j av a 2s . c om for (int i = 0; i < 1000000; i++) { executor.execute(new Runnable() { public void run() { TimeBasedUUIDGenerator.generateId(); } }); } executor.shutdown(); executor.awaitTermination(10, TimeUnit.SECONDS); stopWatch.stop(); System.out.println("Generated 1000000 UUID (async) via TimeBasedUUIDGenerator.generateId(): in " + stopWatch.getTotalTimeSeconds() + " seconds"); executor = Executors.newFixedThreadPool(100); stopWatch = new StopWatch(); stopWatch.start(); for (int i = 0; i < 1000000; i++) { executor.execute(new Runnable() { public void run() { UUID.randomUUID(); } }); } executor.shutdown(); executor.awaitTermination(10, TimeUnit.SECONDS); stopWatch.stop(); System.out.println("Generated 1000000 UUID (async) via UUID.randomUUID(): in " + stopWatch.getTotalTimeSeconds() + " seconds"); }
From source file:org.wso2.carbon.inbound.localfile.LocalFileOneTimePolling.java
/** * Start the ExecutorService for watchDirectory. */// w w w . java 2 s. c o m private void startWatch() { ExecutorService executorService = Executors.newFixedThreadPool(LocalFileConstants.THREAD_SIZE); executorService.execute(new Runnable() { public void run() { try { watchDirectory(); } catch (IOException e) { log.error("Error while watching the directory." + e.getMessage(), e); } } }); executorService.shutdown(); }
From source file:ubic.gemma.core.loader.util.fetcher.FtpArchiveFetcher.java
protected void unPack(final File toUnpack) { FutureTask<Boolean> future = new FutureTask<>(new Callable<Boolean>() { @Override/* ww w . j a va 2 s .co m*/ @SuppressWarnings("synthetic-access") public Boolean call() { File extractedFile = new File(FileTools.chompExtension(toUnpack.getAbsolutePath())); /* * Decide if an existing file is plausibly usable. Err on the side of caution. */ if (allowUseExisting && extractedFile.canRead() && extractedFile.length() >= toUnpack.length() && !FileUtils.isFileNewer(toUnpack, extractedFile)) { AbstractFetcher.log.warn("Expanded file exists, skipping re-expansion: " + extractedFile); return Boolean.TRUE; } if (expander != null) { expander.setSrc(toUnpack); expander.setDest(toUnpack.getParentFile()); expander.perform(); } else if (toUnpack.getAbsolutePath().toLowerCase().endsWith("zip")) { try { FileTools.unZipFiles(toUnpack.getAbsolutePath()); } catch (IOException e) { throw new RuntimeException(e); } } else { // gzip. try { FileTools.unGzipFile(toUnpack.getAbsolutePath()); } catch (IOException e) { throw new RuntimeException(e); } } return Boolean.TRUE; } }); ExecutorService executor = Executors.newSingleThreadExecutor(); executor.execute(future); executor.shutdown(); StopWatch s = new StopWatch(); s.start(); while (!future.isDone() && !future.isCancelled()) { try { Thread.sleep(AbstractFetcher.INFO_UPDATE_INTERVAL); } catch (InterruptedException ie) { future.cancel(true); return; } AbstractFetcher.log .info("Unpacking archive ... " + Math.floor(s.getTime() / 1000.0) + " seconds elapsed"); } }
From source file:FullReindexer.java
public void reindex() throws IOException { ExecutorService executor = Executors.newFixedThreadPool(totalThreads); for (int i = 0; i < totalThreads; i++) { Worker worker = new Worker(i, totalThreads, readClient, writeClient); executor.execute(worker); }//w ww. j a va 2 s . c o m executor.shutdown(); while (!executor.isTerminated()) { // Wait until done } readClient.close(); writeClient.close(); }
From source file:gridool.communication.transport.nio.GridNioServer.java
private static void handleRead(final SocketChannel channel, final SelectionKey key, final ByteBuffer sharedReadBuf, final GridTransportListener notifier, final ExecutorService exec) { sharedReadBuf.clear();/*from w w w .java 2 s . c o m*/ final SocketAddress remoteAddr = channel.socket().getRemoteSocketAddress(); final int bytesRead; try { bytesRead = channel.read(sharedReadBuf); } catch (IOException e) { LOG.warn("Failed to read data from client: " + remoteAddr, e); NIOUtils.close(key); return; } if (LOG.isDebugEnabled()) { LOG.debug("Read " + bytesRead + " bytes from a client socket: " + remoteAddr); } if (bytesRead == -1) { if (LOG.isTraceEnabled()) { LOG.trace("Remote client closed connection: " + remoteAddr); } NIOUtils.close(key); return; } else if (bytesRead == 0) { return; } final GridMessageBuffer msgBuf = (GridMessageBuffer) key.attachment(); sharedReadBuf.flip(); while (sharedReadBuf.remaining() > 0) { msgBuf.read(sharedReadBuf); if (msgBuf.isFilled()) { exec.execute(new Runnable() { public void run() { final GridCommunicationMessage msg = msgBuf.toMessage(); msgBuf.reset(); if (LOG.isInfoEnabled()) { LOG.info("Recieved a GridCommunicationMessage [" + msg.getMessageId() + "]"); } notifier.notifyListener(msg); } }); break; } } }
From source file:org.yamj.core.service.ArtworkProcessScheduler.java
@Scheduled(initialDelay = 30000, fixedDelay = 60000) public void processArtwork() throws Exception { int maxThreads = configService.getIntProperty("yamj3.scheduler.artworkprocess.maxThreads", 1); if (maxThreads <= 0) { if (!messageDisabled) { messageDisabled = Boolean.TRUE; LOG.info("Artwork processing is disabled"); }//from w w w . j av a2 s .c o m return; } else { messageDisabled = Boolean.FALSE; } int maxResults = configService.getIntProperty("yamj3.scheduler.artworkprocess.maxResults", 20); List<QueueDTO> queueElements = artworkStorageService.getArtworLocatedQueue(maxResults); if (CollectionUtils.isEmpty(queueElements)) { LOG.debug("No artwork found to process"); return; } LOG.info("Found {} artwork objects to process; process with {} threads", queueElements.size(), maxThreads); BlockingQueue<QueueDTO> queue = new LinkedBlockingQueue<QueueDTO>(queueElements); ExecutorService executor = Executors.newFixedThreadPool(maxThreads); for (int i = 0; i < maxThreads; i++) { ArtworkProcessRunner worker = new ArtworkProcessRunner(queue, artworkProcessorService); executor.execute(worker); } executor.shutdown(); // run until all workers have finished while (!executor.isTerminated()) { try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException ignore) { } } LOG.debug("Finished artwork processing"); }
From source file:org.openhab.binding.modbus.internal.SimultaneousReadWriteTestCase.java
/** * Testing how binding handles simultaneous read and writes coming in. * * Even though the server in this test is able to handle at most one client at a time the binding * queues requests./*from ww w.j a va 2s.co m*/ * * Note higher artificialServerWait in constructor * * @throws Exception */ @Test public void testSimultaneousReadWrite() throws Exception { binding = new ModbusBinding(); binding.updated(addSlave(addSlave(newLongPollBindingConfig(), SLAVE_NAME, type, null, 0, READ_COUNT), SLAVE2_NAME, type, null, 0, READ_COUNT)); configureItems(SLAVE_NAME); configureItems(SLAVE2_NAME); /* * - both slaves read twice -> 4 read requests * - followed by write (slave1) -> 1 write request * - both slaves read once -> 2 read requests. * - Finally three writes (slave2) -> 3 write requets */ int expectedRequests = 10; ExecutorService pool = Executors.newFixedThreadPool(expectedRequests); binding.execute(); pool.execute(new UpdateThread(binding)); pool.execute(new WriteCommandThread(binding, SLAVE_NAME, command)); pool.execute(new UpdateThread(binding)); pool.execute(new WriteCommandThread(binding, SLAVE2_NAME, command)); pool.execute(new WriteCommandThread(binding, SLAVE2_NAME, command)); pool.execute(new WriteCommandThread(binding, SLAVE2_NAME, command)); pool.shutdown(); pool.awaitTermination(artificialServerWait * 7 + 5000, TimeUnit.MILLISECONDS); waitForRequests(expectedRequests); ArrayList<ModbusRequest> values = modbustRequestCaptor.getAllReturnValues(); System.err.println(values); int readCount = 0; int writeCount = 0; for (ModbusRequest request : values) { if (request instanceof ReadMultipleRegistersRequest) { readCount++; } else if (request instanceof WriteSingleRegisterRequest) { writeCount++; } } Assert.assertEquals(6, readCount); Assert.assertEquals(4, writeCount); }