Example usage for java.util.concurrent ExecutorService execute

List of usage examples for java.util.concurrent ExecutorService execute

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService execute.

Prototype

void execute(Runnable command);

Source Link

Document

Executes the given command at some time in the future.

Usage

From source file:org.yamj.core.service.ScanningScheduler.java

@Scheduled(initialDelay = 5000, fixedDelay = 45000)
public void scanMediaData() throws Exception {
    int maxThreads = configService.getIntProperty("yamj3.scheduler.mediadatascan.maxThreads", 1);
    if (maxThreads <= 0) {
        if (!messageDisabledMediaData) {
            messageDisabledMediaData = Boolean.TRUE;
            LOG.info("Media data scanning is disabled");
        }/* w ww . j  a  va 2s.c o  m*/
        return;
    } else {
        messageDisabledMediaData = Boolean.FALSE;
    }

    int maxResults = configService.getIntProperty("yamj3.scheduler.mediadatascan.maxResults", 20);
    List<QueueDTO> queueElements = metadataStorageService.getMediaQueueForScanning(maxResults);
    if (CollectionUtils.isEmpty(queueElements)) {
        LOG.debug("No media data found to scan");
        return;
    }

    LOG.info("Found {} media data objects to process; scan with {} threads", queueElements.size(), maxThreads);
    BlockingQueue<QueueDTO> queue = new LinkedBlockingQueue<QueueDTO>(queueElements);

    ExecutorService executor = Executors.newFixedThreadPool(maxThreads);
    for (int i = 0; i < maxThreads; i++) {
        PluginMetadataRunner worker = new PluginMetadataRunner(queue, pluginMetadataService);
        executor.execute(worker);
    }
    executor.shutdown();

    // run until all workers have finished
    while (!executor.isTerminated()) {
        try {
            TimeUnit.SECONDS.sleep(5);
        } catch (InterruptedException ignore) {
        }
    }

    LOG.debug("Finished media data scanning");
}

From source file:org.yamj.core.service.ScanningScheduler.java

@Scheduled(initialDelay = 15000, fixedDelay = 45000)
public void scanArtwork() throws Exception {
    int maxThreads = configService.getIntProperty("yamj3.scheduler.artworkscan.maxThreads", 1);
    if (maxThreads <= 0) {
        if (!messageDisabledArtwork) {
            messageDisabledArtwork = Boolean.TRUE;
            LOG.info("Artwork scanning is disabled");
        }//from   ww w .ja  va 2  s.c  o  m
        return;
    } else {
        messageDisabledArtwork = Boolean.FALSE;
    }

    int maxResults = configService.getIntProperty("yamj3.scheduler.artworkscan.maxResults", 30);
    List<QueueDTO> queueElements = artworkStorageService.getArtworkQueueForScanning(maxResults);
    if (CollectionUtils.isEmpty(queueElements)) {
        LOG.debug("No artwork found to scan");
        return;
    }

    LOG.info("Found {} artwork objects to process; scan with {} threads", queueElements.size(), maxThreads);
    BlockingQueue<QueueDTO> queue = new LinkedBlockingQueue<QueueDTO>(queueElements);

    ExecutorService executor = Executors.newFixedThreadPool(maxThreads);
    for (int i = 0; i < maxThreads; i++) {
        ArtworkScannerRunner worker = new ArtworkScannerRunner(queue, artworkScannerService);
        executor.execute(worker);
    }
    executor.shutdown();

    // run until all workers have finished
    while (!executor.isTerminated()) {
        try {
            TimeUnit.SECONDS.sleep(5);
        } catch (InterruptedException ignore) {
        }
    }

    LOG.debug("Finished artwork scanning");
}

From source file:org.hyperic.hq.agent.server.CommandListener.java

/**
 * The main loop which blocks, waiting for connections.  Connections are
 * handled in a synchronous manner -- one connection is not processed
 * until the previous one is finished./*from www  .ja v a2  s.  c o  m*/
 */
void listenLoop() {
    boolean logDebug = log.isDebugEnabled();
    shutdown.set(false);
    while (!shutdown.get()) {
        try {
            try {
                final AgentServerConnection conn = listener.get().getNewConnection();
                if (logDebug)
                    log.debug("Opened new connection");
                final AgentCommand cmd = conn.readCommand();
                final ExecutorService pool = getPool(cmd);
                if (logDebug)
                    log.debug("Dispatching command " + cmd.getCommand() + " to pool: " + pool);
                pool.execute(new AgentDispatchTask(conn, cmd));
                if (logDebug)
                    log.debug("Done dispatching command " + cmd.getCommand() + " to pool " + pool);
            } catch (EOFException e) {
                log.debug(e, e);
            } catch (InterruptedIOException e) {
                if (shutdown.get()) {
                    listener.get().cleanup();
                    return;
                }
                continue;
            } catch (AgentConnectionException e) {
                if (!shutdown.get()) {
                    log.error("Failed handling new connection: " + e, e);
                }
                continue;
            }
        } catch (Throwable t) {
            // only log to error if the agent isn't shutting down
            if (!shutdown.get()) {
                log.error(t, t);
            } else {
                log.debug(t, t);
            }
        }
    }
}

From source file:org.yamj.core.service.ScanningScheduler.java

@Scheduled(initialDelay = 10000, fixedDelay = 45000)
public void scanPeopleData() throws Exception {
    int maxThreads = configService.getIntProperty("yamj3.scheduler.peoplescan.maxThreads", 1);
    if (maxThreads <= 0) {
        if (!messageDisabledPeople) {
            messageDisabledPeople = Boolean.TRUE;
            LOG.info("People scanning is disabled");
        }//from   www. j  a v a 2s. c  o m
        return;
    } else {
        messageDisabledPeople = Boolean.FALSE;
    }

    int maxResults = configService.getIntProperty("yamj3.scheduler.peoplescan.maxResults", 50);
    List<QueueDTO> queueElements = metadataStorageService.getPersonQueueForScanning(maxResults);
    if (CollectionUtils.isEmpty(queueElements)) {
        LOG.debug("No people data found to scan");
        return;
    }

    LOG.info("Found {} people objects to process; scan with {} threads", queueElements.size(), maxThreads);
    BlockingQueue<QueueDTO> queue = new LinkedBlockingQueue<QueueDTO>(queueElements);

    ExecutorService executor = Executors.newFixedThreadPool(maxThreads);
    for (int i = 0; i < maxThreads; i++) {
        PluginMetadataRunner worker = new PluginMetadataRunner(queue, pluginMetadataService);
        executor.execute(worker);
    }
    executor.shutdown();

    // run until all workers have finished
    while (!executor.isTerminated()) {
        try {
            TimeUnit.SECONDS.sleep(5);
        } catch (InterruptedException ignore) {
        }

    }

    LOG.debug("Finished people data scanning");
}

From source file:com.cisco.oss.foundation.monitoring.service.TestMultiService.java

@Ignore
@Test// w  w  w. j a  v a 2  s  .c o m
public void testHistogram() {
    final int tpsTime = 10;
    final Histogram tpsHistogram = new Histogram(new SlidingTimeWindowReservoir(tpsTime, TimeUnit.SECONDS));

    ExecutorService threadPool = Executors.newFixedThreadPool(35);

    int numOfServices = 600000;

    final CountDownLatch latch = new CountDownLatch(numOfServices);

    for (int i = 0; i < numOfServices; i++) {

        final int index = i;
        threadPool.execute(new Runnable() {

            @Override
            public void run() {
                try {
                    if (index >= 10000 & index < 15000) {
                        Thread.sleep(50);
                    } else if (index % 100 == 0) {
                        Thread.sleep(1000);
                        tpsHistogram.update(index);
                    } else if (index % 1000 == 0) {
                        Thread.sleep(15000);
                        tpsHistogram.update(index);
                    } else if (index % 2 == 0) {
                        Thread.sleep(10);
                        tpsHistogram.update(index);
                    } else {
                        Thread.sleep(50);
                        tpsHistogram.update(index);
                    }
                } catch (InterruptedException e) {
                    // TODO Auto-generated catch block
                    e.printStackTrace();
                }

                if (index % 1000 == 0) {
                    int total = tpsHistogram.getSnapshot().getValues().length;
                    System.out.println("index: " + index + ", count: " + tpsHistogram.getCount()
                            + ", snapshot count: " + total);
                    int tps = total / tpsTime;
                    System.out.println("TPS: " + tps);
                }

                latch.countDown();
            }
        });
    }

    try {
        latch.await();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

}

From source file:org.kie.workbench.common.services.datamodel.backend.server.ProjectDataModelConcurrencyTest.java

@Test
public void testConcurrentResourceUpdates() throws URISyntaxException {
    final URL pomUrl = this.getClass().getResource("/DataModelBackendTest1/pom.xml");
    final org.uberfire.java.nio.file.Path nioPomPath = ioService.get(pomUrl.toURI());
    final Path pomPath = paths.convert(nioPomPath);

    final URL resourceUrl = this.getClass().getResource("/DataModelBackendTest1/src/main/resources/empty.rdrl");
    final org.uberfire.java.nio.file.Path nioResourcePath = ioService.get(resourceUrl.toURI());
    final Path resourcePath = paths.convert(nioResourcePath);

    //Force full build before attempting incremental changes
    final KieProject project = projectService.resolveProject(resourcePath);
    final BuildResults buildResults = buildService.build(project);
    assertNotNull(buildResults);/*w ww .  j  a v a 2s .co m*/
    assertEquals(0, buildResults.getErrorMessages().size());
    assertEquals(1, buildResults.getInformationMessages().size());

    //Perform incremental build
    final int THREADS = 200;
    final Result result = new Result();
    ExecutorService es = Executors.newCachedThreadPool();
    for (int i = 0; i < THREADS; i++) {
        final int operation = (i % 3);

        switch (operation) {
        case 0:
            es.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] Request to update POM received");
                        invalidateCaches(project, pomPath);
                        buildChangeListener.updateResource(pomPath);
                        logger.debug("[Thread: " + Thread.currentThread().getName() + "] POM update completed");
                    } catch (Throwable e) {
                        result.setFailed(true);
                        result.setMessage(e.getMessage());
                        ExceptionUtils.printRootCauseStackTrace(e);
                    }
                }
            });
            break;
        case 1:
            es.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] Request to update Resource received");
                        invalidateCaches(project, resourcePath);
                        buildChangeListener.addResource(resourcePath);
                        logger.debug(
                                "[Thread: " + Thread.currentThread().getName() + "] Resource update completed");
                    } catch (Throwable e) {
                        result.setFailed(true);
                        result.setMessage(e.getMessage());
                        ExceptionUtils.printRootCauseStackTrace(e);
                    }
                }
            });
            break;
        case 2:
            es.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] Request for DataModel received");
                        dataModelService.getDataModel(resourcePath);
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] DataModel request completed");
                    } catch (Throwable e) {
                        result.setFailed(true);
                        result.setMessage(e.getMessage());
                        ExceptionUtils.printRootCauseStackTrace(e);
                    }
                }
            });

        }
    }

    es.shutdown();
    try {
        es.awaitTermination(5, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
    }
    if (result.isFailed()) {
        fail(result.getMessage());
    }

}

From source file:pt.webdetails.cdc.core.MondrianOverseer.java

public int reloadMondrianCache() {
    ExecutorService execService = HazelcastManager.INSTANCE.getHazelcast().getExecutorService();

    MultiTask<Integer> reloadMondrianCache = new MultiTask<Integer>(
            new DistributedMapReinsertion(ICdcConfig.CacheMaps.MONDRIAN_MAP),
            HazelcastManager.INSTANCE.getHazelcast().getCluster().getMembers());
    execService.execute(reloadMondrianCache);
    //TODO: add timeout
    try {// ww w .  j a  v a 2  s  .  c  om
        Collection<Integer> counts = reloadMondrianCache.get();
        int total = 0;
        for (int count : counts) {
            total += count;
        }
        return total;
    } catch (Exception e) {
        logger.error(e);
        return -1;
    }
}

From source file:de.elomagic.mag.AbstractTest.java

protected Future<Boolean> createPutServletFuture(ServletMock putServlet) {

    ExecutorService executor = Executors.newFixedThreadPool(2);

    FutureTask<Boolean> futureTask = new FutureTask<>(() -> {
        while (!putServlet.received) {
            Thread.sleep(100);//from   w  ww .  j a  v  a  2 s . c o m
        }

        return putServlet.received;
    });

    executor.execute(futureTask);

    return futureTask;
}

From source file:com.linkedin.pinot.tools.segment.converter.DictionaryToRawIndexConverter.java

/**
 * Method to perform the conversion for a set of segments in the {@link #_dataDir}
 *
 * @return True if successful, False otherwise
 * @throws Exception//from   w w  w .j  a v a 2  s . c  o m
 */
public boolean convert() throws Exception {
    if (_help) {
        printUsage();
        return true;
    }

    File dataDir = new File(_dataDir);
    File outputDir = new File(_outputDir);

    if (!dataDir.exists()) {
        LOGGER.error("Data directory '{}' does not exist.", _dataDir);
        return false;
    } else if (outputDir.exists()) {
        if (_overwrite) {
            LOGGER.info("Overwriting existing output directory '{}'", _outputDir);
            FileUtils.deleteQuietly(outputDir);
            outputDir = new File(_outputDir);
            outputDir.mkdir();
        } else {
            LOGGER.error("Output directory '{}' already exists, use -overwrite to overwrite", outputDir);
            return false;
        }
    }

    File[] segmentFiles = dataDir.listFiles();
    if (segmentFiles == null || segmentFiles.length == 0) {
        LOGGER.error("Empty data directory '{}'.", _dataDir);
        return false;
    }

    boolean ret = true;
    final File outDir = outputDir;
    ExecutorService executorService = Executors.newFixedThreadPool(_numThreads);
    for (final File segmentDir : segmentFiles) {
        executorService.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    convertSegment(segmentDir, _columns.split("\\s*,\\s*"), outDir, _compressOutput);
                } catch (Exception e) {
                    LOGGER.error("Exception caught while converting segment {}", segmentDir.getName(), e);
                    e.printStackTrace();
                }
            }
        });
    }

    executorService.shutdown();
    executorService.awaitTermination(1, TimeUnit.HOURS);
    return ret;
}

From source file:com.netflix.aegisthus.io.sstable.SSTableColumnScanner.java

public rx.Observable<AtomWritable> observable() {
    final ExecutorService service = Executors.newSingleThreadExecutor();
    rx.Observable<AtomWritable> ret = rx.Observable.create(new OnSubscribe<AtomWritable>() {
        @Override/*w w w.  ja v  a 2 s.c  om*/
        public void call(final Subscriber<? super AtomWritable> subscriber) {
            service.execute(new Runnable() {
                @Override
                public void run() {
                    deserialize(subscriber);
                    subscriber.onCompleted();
                }
            });
        }
    });
    LOG.info("created observable");
    return ret;
}