Example usage for java.util.concurrent ThreadPoolExecutor setRejectedExecutionHandler

List of usage examples for java.util.concurrent ThreadPoolExecutor setRejectedExecutionHandler

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor setRejectedExecutionHandler.

Prototype

public void setRejectedExecutionHandler(RejectedExecutionHandler handler) 

Source Link

Document

Sets a new handler for unexecutable tasks.

Usage

From source file:dk.dbc.opensearch.datadock.DatadockPool.java

/**
 * /*from   w  w w .j a v a  2  s  .  co  m*/
 * @param threadpool
 * @param harvester
 * @param flowMap
 * @throws ConfigurationException
 */
public DatadockPool(ThreadPoolExecutor threadpool, IHarvest harvester, Map<String, List<PluginTask>> flowMap)
        throws ConfigurationException {
    log.debug("DatadockPool constructor called");
    this.flowMap = flowMap;
    this.harvester = harvester;
    this.threadpool = threadpool;

    jobs = new HashMap<IIdentifier, FutureTask<Boolean>>();

    threadpool.setRejectedExecutionHandler(new BlockingRejectedExecutionHandler());
}

From source file:com.liferay.portal.search.elasticsearch.internal.connection.EmbeddedElasticsearchConnection.java

@Override
public void close() {
    super.close();

    if (_node == null) {
        return;// w ww .jav  a  2  s  .  c om
    }

    try {
        Class.forName(ByteBufferUtil.class.getName());
    } catch (ClassNotFoundException cnfe) {
        if (_log.isWarnEnabled()) {
            _log.warn(
                    StringBundler.concat("Unable to preload ", String.valueOf(ByteBufferUtil.class),
                            " to prevent Netty shutdown concurrent class loading ", "interruption issue"),
                    cnfe);
        }
    }

    if (PortalRunMode.isTestMode()) {
        settingsBuilder.put("index.refresh_interval", "-1");
        settingsBuilder.put("index.translog.flush_threshold_ops", Integer.MAX_VALUE);
        settingsBuilder.put("index.translog.interval", "1d");

        Settings settings = settingsBuilder.build();

        Injector injector = _node.injector();

        IndicesService indicesService = injector.getInstance(IndicesService.class);

        Iterator<IndexService> iterator = indicesService.iterator();

        while (iterator.hasNext()) {
            IndexService indexService = iterator.next();

            injector = indexService.injector();

            IndexSettingsService indexSettingsService = injector.getInstance(IndexSettingsService.class);

            indexSettingsService.refreshSettings(settings);
        }

        ThreadPool threadPool = injector.getInstance(ThreadPool.class);

        ScheduledExecutorService scheduledExecutorService = threadPool.scheduler();

        if (scheduledExecutorService instanceof ThreadPoolExecutor) {
            ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) scheduledExecutorService;

            threadPoolExecutor.setRejectedExecutionHandler(_REJECTED_EXECUTION_HANDLER);
        }

        scheduledExecutorService.shutdown();

        try {
            scheduledExecutorService.awaitTermination(1, TimeUnit.HOURS);
        } catch (InterruptedException ie) {
            if (_log.isWarnEnabled()) {
                _log.warn("Thread pool shutdown wait was interrupted", ie);
            }
        }
    }

    _node.close();

    _node = null;

    _file.deltree(_jnaTmpDirName);
}

From source file:com.attribyte.essem.ApplicationCache.java

ApplicationCache(final AsyncClient client, final RequestOptions requestOptions, final ESEndpoint esEndpoint,
        final Logger logger) {

    this.client = client;
    this.requestOptions = requestOptions;
    this.esEndpoint = esEndpoint;
    this.logger = logger;

    final BlockingQueue<Runnable> requestQueue = new ArrayBlockingQueue<>(4096);
    final Gauge<Integer> requestQueueSize = new Gauge<Integer>() {
        @Override//from   w  w  w .j  a v  a 2  s .c o m
        public Integer getValue() {
            return requestQueue.size();
        }
    };

    final ThreadPoolExecutor requestExecutor = new ThreadPoolExecutor(2, 8, 5L, TimeUnit.MINUTES, requestQueue,
            new ThreadFactoryBuilder().setNameFormat("application-cache-%d").build());
    requestExecutor.prestartAllCoreThreads();

    final Counter rejectedRequests = new Counter();
    requestExecutor.setRejectedExecutionHandler(new RejectedExecutionHandler() {
        @Override
        public void rejectedExecution(final Runnable r, final ThreadPoolExecutor executor) {
            rejectedRequests.inc();
        }
    });
    this.requestExecutor = MoreExecutors
            .listeningDecorator(MoreExecutors.getExitingExecutorService(requestExecutor));

    this.appRequestTimer = new Timer();
    this.appRequestErrors = new Counter();

    this.nameRequestTimer = new Timer();
    this.nameRequestErrors = new Counter();

    this.statsRequestTimer = new Timer();
    this.statsRequestErrors = new Counter();

    Gauge<Integer> appCacheSize = new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            return appCache.size();
        }
    };

    this.metrics = ImmutableMap.<String, com.codahale.metrics.Metric>builder()
            .put("request-queue-size", requestQueueSize).put("rejected-background-requests", rejectedRequests)
            .put("app-requests", appRequestTimer).put("app-request-errors", appRequestErrors)
            .put("name-requests", nameRequestTimer).put("name-request-errors", nameRequestErrors)
            .put("app-cache-size", appCacheSize).put("stats-requests", statsRequestTimer)
            .put("stats-request-errors", statsRequestErrors).build();
}

From source file:dk.netarkivet.harvester.indexserver.CrawlLogIndexCache.java

/** Combine a number of crawl.log files into one Lucene index.  This index
 * is placed as gzip files under the directory returned by getCacheFile().
 *
 * @param rawfiles The map from job ID into crawl.log contents. No
 * null values are allowed in this map./* w  w w.ja  v  a 2 s  .  co m*/
 */
protected void combine(Map<Long, File> rawfiles) {
    indexingJobCount++;
    long datasetSize = rawfiles.values().size();
    log.info("Starting combine task #" + indexingJobCount + ". This combines a dataset with " + datasetSize
            + " crawl logs (thread = " + Thread.currentThread().getName() + ")");

    File resultDir = getCacheFile(rawfiles.keySet());
    Set<File> tmpfiles = new HashSet<File>();
    String indexLocation = resultDir.getAbsolutePath() + ".luceneDir";
    ThreadPoolExecutor executor = null;
    try {
        DigestIndexer indexer = createStandardIndexer(indexLocation);
        final boolean verboseIndexing = false;
        DigestOptions indexingOptions = new DigestOptions(this.useBlacklist, verboseIndexing, this.mimeFilter);
        long count = 0;
        Set<IndexingState> outstandingJobs = new HashSet<IndexingState>();
        final int maxThreads = Settings.getInt(HarvesterSettings.INDEXSERVER_INDEXING_MAXTHREADS);
        executor = new ThreadPoolExecutor(maxThreads, maxThreads, 0L, TimeUnit.MILLISECONDS,
                new LinkedBlockingQueue<Runnable>());

        executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());

        for (Map.Entry<Long, File> entry : rawfiles.entrySet()) {
            Long jobId = entry.getKey();
            File crawlLog = entry.getValue();
            // Generate UUID to ensure a unique filedir for the index.
            File tmpFile = new File(FileUtils.getTempDir(), UUID.randomUUID().toString());
            tmpfiles.add(tmpFile);
            String localindexLocation = tmpFile.getAbsolutePath();
            Long cached = cdxcache.cache(jobId);
            if (cached == null) {
                log.warn("Skipping the ingest of logs for job " + entry.getKey()
                        + ". Unable to retrieve cdx-file for job.");
                continue;
            }
            File cachedCDXFile = cdxcache.getCacheFile(cached);

            // Dispatch this indexing task to a separate thread that 
            // handles the sorting of the logfiles and the generation
            // of a lucene index for this crawllog and cdxfile.
            count++;
            String taskID = count + " out of " + datasetSize;
            log.debug("Making subthread for indexing job " + jobId + " - task " + taskID);
            Callable<Boolean> task = new DigestIndexerWorker(localindexLocation, jobId, crawlLog, cachedCDXFile,
                    indexingOptions, taskID);
            Future<Boolean> result = executor.submit(task);
            outstandingJobs.add(new IndexingState(jobId, localindexLocation, result));
        }

        // wait for all the outstanding subtasks to complete.
        Set<Directory> subindices = new HashSet<Directory>();

        // Deadline for the combine-task
        long combineTimeout = Settings.getLong(HarvesterSettings.INDEXSERVER_INDEXING_TIMEOUT);
        long timeOutTime = System.currentTimeMillis() + combineTimeout;

        // The indexwriter for the totalindex.
        IndexWriter totalIndex = indexer.getIndex();
        int subindicesInTotalIndex = 0;
        // Max number of segments in totalindex.
        int maxSegments = Settings.getInt(HarvesterSettings.INDEXSERVER_INDEXING_MAX_SEGMENTS);

        final int ACCUMULATED_SUBINDICES_BEFORE_MERGING = 200;

        while (outstandingJobs.size() > 0) {
            log.info("Outstanding jobs in combine task #" + indexingJobCount + " is now "
                    + outstandingJobs.size());
            Iterator<IndexingState> iterator = outstandingJobs.iterator();
            if (timeOutTime < System.currentTimeMillis()) {
                log.warn("Max indexing time exceeded for one index ("
                        + TimeUtils.readableTimeInterval(combineTimeout) + "). Indexing stops here, although"
                        + " missing subindices for " + outstandingJobs.size() + " jobs");
                break;
            }
            while (iterator.hasNext() && subindices.size() < ACCUMULATED_SUBINDICES_BEFORE_MERGING) {
                Future<Boolean> nextResult;
                IndexingState next = iterator.next();
                if (next.getResultObject().isDone()) {
                    nextResult = next.getResultObject();
                    try {
                        // check, if the indexing failed
                        if (nextResult.get()) {
                            subindices.add(new SimpleFSDirectory(new File(next.getIndex())));
                        } else {
                            log.warn("Indexing of job " + next.getJobIdentifier() + " failed.");
                        }

                    } catch (InterruptedException e) {
                        log.warn("Unable to get Result back from " + "indexing thread", e);
                    } catch (ExecutionException e) {
                        log.warn("Unable to get Result back from " + "indexing thread", e);
                    }
                    //remove the done object from the set
                    iterator.remove();
                }
            }

            if (subindices.size() >= ACCUMULATED_SUBINDICES_BEFORE_MERGING) {

                log.info("Adding " + subindices.size()
                        + " subindices to main index. Forcing index to contain max " + maxSegments
                        + " files (related to combine task # " + indexingJobCount + ")");
                totalIndex.addIndexes(subindices.toArray(new Directory[0]));
                totalIndex.forceMerge(maxSegments);
                totalIndex.commit();
                for (Directory luceneDir : subindices) {
                    luceneDir.close();
                }
                subindicesInTotalIndex += subindices.size();
                log.info("Completed adding " + subindices.size() + " subindices to main index, now containing "
                        + subindicesInTotalIndex + " subindices" + "(related to combine task # "
                        + indexingJobCount + ")");
                subindices.clear();
            } else {
                sleepAwhile();
            }
        }

        log.info("Adding the final " + subindices.size()
                + " subindices to main index. Forcing index to contain max " + maxSegments + " files "
                + "(related to combine task # " + indexingJobCount + ")");

        totalIndex.addIndexes(subindices.toArray(new Directory[0]));
        totalIndex.forceMerge(maxSegments);
        totalIndex.commit();
        for (Directory luceneDir : subindices) {
            luceneDir.close();
        }
        subindices.clear();

        log.info("Adding operation completed (combine task # " + indexingJobCount + ")!");
        long docsInIndex = totalIndex.numDocs();

        indexer.close();
        log.info("Closed index (related to combine task # " + indexingJobCount);

        // Now the index is made, gzip it up.
        File totalIndexDir = new File(indexLocation);
        log.info("Gzip-compressing the individual " + totalIndexDir.list().length
                + " index files of combine task # " + indexingJobCount);
        ZipUtils.gzipFiles(totalIndexDir, resultDir);
        log.info("Completed combine task # " + indexingJobCount + " that combined a dataset with " + datasetSize
                + " crawl logs (entries in combined index: " + docsInIndex + ") - compressed index has size "
                + FileUtils.getHumanReadableFileSize(resultDir));
    } catch (IOException e) {
        throw new IOFailure("Error setting up craw.log index framework for " + resultDir.getAbsolutePath(), e);
    } finally {
        // close down Threadpool-executor
        closeDownThreadpoolQuietly(executor);
        FileUtils.removeRecursively(new File(indexLocation));
        for (File temporaryFile : tmpfiles) {
            FileUtils.removeRecursively(temporaryFile);
        }
    }
}

From source file:co.paralleluniverse.galaxy.netty.UDPComm.java

private void configureThreadPool(String name, ThreadPoolExecutor executor) {
    executor.setRejectedExecutionHandler(new ThreadPoolExecutor.DiscardPolicy());
    executor.setThreadFactory(new ThreadFactoryBuilder().setNameFormat(name + "-%d").setDaemon(true)
            .setThreadFactory(new ThreadFactory() {
                @Override/*from  www. java2 s  . c  om*/
                public Thread newThread(Runnable r) {
                    return new CommThread(r);
                }
            }).build());
    ThreadPoolExecutorMonitor.register(name, executor);
}

From source file:com.jkoolcloud.tnt4j.streams.inputs.TNTInputStream.java

/**
 * Creates thread pool executor service for a given number of threads with bounded tasks queue - queue size is
 * 2x{@code threadsQty}. When queue size is reached, new tasks are offered to queue using defined offer timeout. If
 * task can't be put into queue over this time, task is skipped with making warning log entry. Thus memory use does
 * not grow drastically if consumers can't keep up the pace of producers filling in the queue, making producers
 * synchronize with consumers./*from w w  w. ja  va2  s .  c  o  m*/
 *
 * @param threadsQty
 *            the number of threads in the pool
 * @param offerTimeout
 *            how long to wait before giving up on offering task to queue
 *
 * @return the newly created thread pool executor
 *
 * @see ThreadPoolExecutor#ThreadPoolExecutor(int, int, long, TimeUnit, BlockingQueue, ThreadFactory)
 */
private ExecutorService getBoundedExecutorService(int threadsQty, final int offerTimeout) {
    StreamsThreadFactory stf = new StreamsThreadFactory("StreamBoundedExecutorThread-"); // NON-NLS
    stf.addThreadFactoryListener(new StreamsThreadFactoryListener());

    ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadsQty, threadsQty, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(threadsQty * 2), stf);

    tpe.setRejectedExecutionHandler(new RejectedExecutionHandler() {
        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                boolean added = executor.getQueue().offer(r, offerTimeout, TimeUnit.SECONDS);
                if (!added) {
                    logger().log(OpLevel.WARNING,
                            StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                                    "TNTInputStream.tasks.buffer.limit"),
                            offerTimeout);
                    notifyStreamTaskRejected(r);
                }
            } catch (InterruptedException exc) {
                halt(true);
            }
        }
    });

    return tpe;
}

From source file:mondrian.olap.Util.java

/**
 * Creates an {@link ExecutorService} object backed by a thread pool.
 * @param maximumPoolSize Maximum number of concurrent
 * threads./*from ww w  . j a  v a 2 s. co m*/
 * @param corePoolSize Minimum number of concurrent
 * threads to maintain in the pool, even if they are
 * idle.
 * @param keepAliveTime Time, in seconds, for which to
 * keep alive unused threads.
 * @param name The name of the threads.
 * @param rejectionPolicy The rejection policy to enforce.
 * @return An executor service preconfigured.
 */
public static ExecutorService getExecutorService(int maximumPoolSize, int corePoolSize, long keepAliveTime,
        final String name, RejectedExecutionHandler rejectionPolicy) {
    if (Util.PreJdk16) {
        // On JDK1.5, if you specify corePoolSize=0, nothing gets executed.
        // Bummer.
        corePoolSize = Math.max(corePoolSize, 1);
    }

    // We must create a factory where the threads
    // have the right name and are marked as daemon threads.
    final ThreadFactory factory = new ThreadFactory() {
        private final AtomicInteger counter = new AtomicInteger(0);

        public Thread newThread(Runnable r) {
            final Thread t = Executors.defaultThreadFactory().newThread(r);
            t.setDaemon(true);
            t.setName(name + '_' + counter.incrementAndGet());
            return t;
        }
    };

    // Ok, create the executor
    final ThreadPoolExecutor executor = new ThreadPoolExecutor(corePoolSize,
            maximumPoolSize > 0 ? maximumPoolSize : Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS,
            // we use a sync queue. any other type of queue
            // will prevent the tasks from running concurrently
            // because the executors API requires blocking queues.
            // Important to pass true here. This makes the
            // order of tasks deterministic.
            // TODO Write a non-blocking queue which implements
            // the blocking queue API so we can pass that to the
            // executor.
            new SynchronousQueue<Runnable>(true), factory);

    // Set the rejection policy if required.
    if (rejectionPolicy != null) {
        executor.setRejectedExecutionHandler(rejectionPolicy);
    }

    // Done
    return executor;
}

From source file:org.apache.hama.graph.GraphJobRunner.java

/**
 * Do the main logic of a superstep, namely checking if vertices are active,
 * feeding compute with messages and controlling combiners/aggregators. We
 * iterate over our messages and vertices in sorted order. That means that we
 * need to seek the first vertex that has the same ID as the iterated message.
 *///from   w ww  .  j  a v  a2  s. co m
@SuppressWarnings("unchecked")
private void doSuperstep(GraphJobMessage currentMessage,
        BSPPeer<Writable, Writable, Writable, Writable, GraphJobMessage> peer) throws IOException {
    this.errorCount.set(0);
    long startTime = System.currentTimeMillis();

    this.changedVertexCnt = 0;
    vertices.startSuperstep();

    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    executor.setMaximumPoolSize(conf.getInt(DEFAULT_THREAD_POOL_SIZE, 64));
    executor.setRejectedExecutionHandler(retryHandler);

    long loopStartTime = System.currentTimeMillis();
    while (currentMessage != null) {
        executor.execute(new ComputeRunnable(currentMessage));

        currentMessage = peer.getCurrentMessage();
    }
    LOG.info("Total time spent for superstep-" + peer.getSuperstepCount() + " looping: "
            + (System.currentTimeMillis() - loopStartTime) + " ms");

    executor.shutdown();
    try {
        executor.awaitTermination(60, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new IOException(e);
    }

    if (errorCount.get() > 0) {
        throw new IOException("there were " + errorCount + " exceptions during compute vertices.");
    }

    Iterator it = vertices.iterator();
    while (it.hasNext()) {
        Vertex<V, E, M> vertex = (Vertex<V, E, M>) it.next();
        if (!vertex.isHalted() && !vertex.isComputed()) {
            vertex.compute(Collections.<M>emptyList());
            vertices.finishVertexComputation(vertex);
        }
    }

    getAggregationRunner().sendAggregatorValues(peer, vertices.getActiveVerticesNum(), this.changedVertexCnt);
    this.iteration++;

    LOG.info("Total time spent for superstep-" + peer.getSuperstepCount() + " computing vertices: "
            + (System.currentTimeMillis() - startTime) + " ms");

    startTime = System.currentTimeMillis();
    finishSuperstep();
    LOG.info("Total time spent for superstep-" + peer.getSuperstepCount() + " synchronizing: "
            + (System.currentTimeMillis() - startTime) + " ms");
}

From source file:org.apache.hama.graph.GraphJobRunner.java

/**
 * Seed the vertices first with their own values in compute. This is the first
 * superstep after the vertices have been loaded.
 *///from w ww .  j  av a2 s  .  c  o  m
private void doInitialSuperstep(BSPPeer<Writable, Writable, Writable, Writable, GraphJobMessage> peer)
        throws IOException {
    this.changedVertexCnt = 0;
    this.errorCount.set(0);
    vertices.startSuperstep();

    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    executor.setMaximumPoolSize(conf.getInt(DEFAULT_THREAD_POOL_SIZE, 64));
    executor.setRejectedExecutionHandler(retryHandler);

    for (V v : vertices.keySet()) {
        executor.execute(new ComputeRunnable(v));
    }

    executor.shutdown();
    try {
        executor.awaitTermination(60, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new IOException(e);
    }

    if (errorCount.get() > 0) {
        throw new IOException("there were " + errorCount + " exceptions during compute vertices.");
    }

    getAggregationRunner().sendAggregatorValues(peer, 1, this.changedVertexCnt);
    iteration++;
    finishSuperstep();
}

From source file:org.apache.hama.graph.GraphJobRunner.java

/**
 * Loads vertices into memory of each peer.
 *//*w  w w  . j a  v  a2 s.  co  m*/
@SuppressWarnings("unchecked")
private void loadVertices(BSPPeer<Writable, Writable, Writable, Writable, GraphJobMessage> peer)
        throws IOException, SyncException, InterruptedException {

    for (int i = 0; i < peer.getNumPeers(); i++) {
        partitionMessages.put(i, new GraphJobMessage());
    }

    VertexInputReader<Writable, Writable, V, E, M> reader = (VertexInputReader<Writable, Writable, V, E, M>) ReflectionUtils
            .newInstance(conf.getClass(Constants.RUNTIME_PARTITION_RECORDCONVERTER, VertexInputReader.class));

    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    executor.setMaximumPoolSize(conf.getInt(DEFAULT_THREAD_POOL_SIZE, 64));
    executor.setRejectedExecutionHandler(retryHandler);

    KeyValuePair<Writable, Writable> next = null;

    while ((next = peer.readNext()) != null) {
        Vertex<V, E, M> vertex = GraphJobRunner.<V, E, M>newVertexInstance(VERTEX_CLASS);

        boolean vertexFinished = false;
        try {
            vertexFinished = reader.parseVertex(next.getKey(), next.getValue(), vertex);
        } catch (Exception e) {
            throw new IOException("Parse exception occured: " + e);
        }

        if (!vertexFinished) {
            continue;
        }

        Runnable worker = new Parser(vertex);
        executor.execute(worker);

    }

    executor.shutdown();
    executor.awaitTermination(60, TimeUnit.SECONDS);

    Iterator<Entry<Integer, GraphJobMessage>> it;
    it = partitionMessages.entrySet().iterator();
    while (it.hasNext()) {
        Entry<Integer, GraphJobMessage> e = it.next();
        it.remove();
        GraphJobMessage msg = e.getValue();
        msg.setFlag(GraphJobMessage.PARTITION_FLAG);
        peer.send(getHostName(e.getKey()), msg);
    }

    peer.sync();

    executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    executor.setMaximumPoolSize(conf.getInt(DEFAULT_THREAD_POOL_SIZE, 64));
    executor.setRejectedExecutionHandler(retryHandler);

    GraphJobMessage msg;
    while ((msg = peer.getCurrentMessage()) != null) {
        executor.execute(new AddVertex(msg));
    }

    executor.shutdown();
    executor.awaitTermination(60, TimeUnit.SECONDS);

    LOG.info(vertices.size() + " vertices are loaded into " + peer.getPeerName());
}