Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters, the default thread factory and the default rejected execution handler.

Usage

From source file:org.apache.hadoop.fs.azure.PageBlobOutputStream.java

/**
 * Constructs an output stream over the given page blob.
 *
 * @param blob the blob that this stream is associated with.
 * @param opContext an object used to track the execution of the operation
 * @throws StorageException if anything goes wrong creating the blob.
 *//*from  www  .  j a v  a  2s. co  m*/
public PageBlobOutputStream(final CloudPageBlobWrapper blob, final OperationContext opContext,
        final Configuration conf) throws StorageException {
    this.blob = blob;
    this.outBuffer = new ByteArrayOutputStream();
    this.opContext = opContext;
    this.lastQueuedTask = null;
    this.ioQueue = new LinkedBlockingQueue<Runnable>();

    // As explained above: the IO writes are not designed for parallelism,
    // so we only have one thread in this thread pool.
    this.ioThreadPool = new ThreadPoolExecutor(1, 1, 2, TimeUnit.SECONDS, ioQueue);

    // Make page blob files have a size that is the greater of a
    // minimum size, or the value of fs.azure.page.blob.size from configuration.
    long pageBlobConfigSize = conf.getLong("fs.azure.page.blob.size", 0);
    LOG.debug("Read value of fs.azure.page.blob.size as " + pageBlobConfigSize
            + " from configuration (0 if not present).");
    long pageBlobSize = Math.max(PAGE_BLOB_MIN_SIZE, pageBlobConfigSize);

    // Ensure that the pageBlobSize is a multiple of page size.
    if (pageBlobSize % PAGE_SIZE != 0) {
        pageBlobSize += PAGE_SIZE - pageBlobSize % PAGE_SIZE;
    }
    blob.create(pageBlobSize, new BlobRequestOptions(), opContext);
    currentBlobSize = pageBlobSize;

    // Set the page blob extension size. It must be a minimum of the default
    // value.
    configuredPageBlobExtensionSize = conf.getLong("fs.azure.page.blob.extension.size", 0);
    if (configuredPageBlobExtensionSize < PAGE_BLOB_DEFAULT_EXTENSION_SIZE) {
        configuredPageBlobExtensionSize = PAGE_BLOB_DEFAULT_EXTENSION_SIZE;
    }

    // make sure it is a multiple of the page size
    if (configuredPageBlobExtensionSize % PAGE_SIZE != 0) {
        configuredPageBlobExtensionSize += PAGE_SIZE - configuredPageBlobExtensionSize % PAGE_SIZE;
    }
}

From source file:dk.netarkivet.harvester.indexserver.CrawlLogIndexCache.java

/** Combine a number of crawl.log files into one Lucene index.  This index
 * is placed as gzip files under the directory returned by getCacheFile().
 *
 * @param rawfiles The map from job ID into crawl.log contents. No
 * null values are allowed in this map.//from w  w w  .  j av a2s.c  o m
 */
protected void combine(Map<Long, File> rawfiles) {
    indexingJobCount++;
    long datasetSize = rawfiles.values().size();
    log.info("Starting combine task #" + indexingJobCount + ". This combines a dataset with " + datasetSize
            + " crawl logs (thread = " + Thread.currentThread().getName() + ")");

    File resultDir = getCacheFile(rawfiles.keySet());
    Set<File> tmpfiles = new HashSet<File>();
    String indexLocation = resultDir.getAbsolutePath() + ".luceneDir";
    ThreadPoolExecutor executor = null;
    try {
        DigestIndexer indexer = createStandardIndexer(indexLocation);
        final boolean verboseIndexing = false;
        DigestOptions indexingOptions = new DigestOptions(this.useBlacklist, verboseIndexing, this.mimeFilter);
        long count = 0;
        Set<IndexingState> outstandingJobs = new HashSet<IndexingState>();
        final int maxThreads = Settings.getInt(HarvesterSettings.INDEXSERVER_INDEXING_MAXTHREADS);
        executor = new ThreadPoolExecutor(maxThreads, maxThreads, 0L, TimeUnit.MILLISECONDS,
                new LinkedBlockingQueue<Runnable>());

        executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());

        for (Map.Entry<Long, File> entry : rawfiles.entrySet()) {
            Long jobId = entry.getKey();
            File crawlLog = entry.getValue();
            // Generate UUID to ensure a unique filedir for the index.
            File tmpFile = new File(FileUtils.getTempDir(), UUID.randomUUID().toString());
            tmpfiles.add(tmpFile);
            String localindexLocation = tmpFile.getAbsolutePath();
            Long cached = cdxcache.cache(jobId);
            if (cached == null) {
                log.warn("Skipping the ingest of logs for job " + entry.getKey()
                        + ". Unable to retrieve cdx-file for job.");
                continue;
            }
            File cachedCDXFile = cdxcache.getCacheFile(cached);

            // Dispatch this indexing task to a separate thread that 
            // handles the sorting of the logfiles and the generation
            // of a lucene index for this crawllog and cdxfile.
            count++;
            String taskID = count + " out of " + datasetSize;
            log.debug("Making subthread for indexing job " + jobId + " - task " + taskID);
            Callable<Boolean> task = new DigestIndexerWorker(localindexLocation, jobId, crawlLog, cachedCDXFile,
                    indexingOptions, taskID);
            Future<Boolean> result = executor.submit(task);
            outstandingJobs.add(new IndexingState(jobId, localindexLocation, result));
        }

        // wait for all the outstanding subtasks to complete.
        Set<Directory> subindices = new HashSet<Directory>();

        // Deadline for the combine-task
        long combineTimeout = Settings.getLong(HarvesterSettings.INDEXSERVER_INDEXING_TIMEOUT);
        long timeOutTime = System.currentTimeMillis() + combineTimeout;

        // The indexwriter for the totalindex.
        IndexWriter totalIndex = indexer.getIndex();
        int subindicesInTotalIndex = 0;
        // Max number of segments in totalindex.
        int maxSegments = Settings.getInt(HarvesterSettings.INDEXSERVER_INDEXING_MAX_SEGMENTS);

        final int ACCUMULATED_SUBINDICES_BEFORE_MERGING = 200;

        while (outstandingJobs.size() > 0) {
            log.info("Outstanding jobs in combine task #" + indexingJobCount + " is now "
                    + outstandingJobs.size());
            Iterator<IndexingState> iterator = outstandingJobs.iterator();
            if (timeOutTime < System.currentTimeMillis()) {
                log.warn("Max indexing time exceeded for one index ("
                        + TimeUtils.readableTimeInterval(combineTimeout) + "). Indexing stops here, although"
                        + " missing subindices for " + outstandingJobs.size() + " jobs");
                break;
            }
            while (iterator.hasNext() && subindices.size() < ACCUMULATED_SUBINDICES_BEFORE_MERGING) {
                Future<Boolean> nextResult;
                IndexingState next = iterator.next();
                if (next.getResultObject().isDone()) {
                    nextResult = next.getResultObject();
                    try {
                        // check, if the indexing failed
                        if (nextResult.get()) {
                            subindices.add(new SimpleFSDirectory(new File(next.getIndex())));
                        } else {
                            log.warn("Indexing of job " + next.getJobIdentifier() + " failed.");
                        }

                    } catch (InterruptedException e) {
                        log.warn("Unable to get Result back from " + "indexing thread", e);
                    } catch (ExecutionException e) {
                        log.warn("Unable to get Result back from " + "indexing thread", e);
                    }
                    //remove the done object from the set
                    iterator.remove();
                }
            }

            if (subindices.size() >= ACCUMULATED_SUBINDICES_BEFORE_MERGING) {

                log.info("Adding " + subindices.size()
                        + " subindices to main index. Forcing index to contain max " + maxSegments
                        + " files (related to combine task # " + indexingJobCount + ")");
                totalIndex.addIndexes(subindices.toArray(new Directory[0]));
                totalIndex.forceMerge(maxSegments);
                totalIndex.commit();
                for (Directory luceneDir : subindices) {
                    luceneDir.close();
                }
                subindicesInTotalIndex += subindices.size();
                log.info("Completed adding " + subindices.size() + " subindices to main index, now containing "
                        + subindicesInTotalIndex + " subindices" + "(related to combine task # "
                        + indexingJobCount + ")");
                subindices.clear();
            } else {
                sleepAwhile();
            }
        }

        log.info("Adding the final " + subindices.size()
                + " subindices to main index. Forcing index to contain max " + maxSegments + " files "
                + "(related to combine task # " + indexingJobCount + ")");

        totalIndex.addIndexes(subindices.toArray(new Directory[0]));
        totalIndex.forceMerge(maxSegments);
        totalIndex.commit();
        for (Directory luceneDir : subindices) {
            luceneDir.close();
        }
        subindices.clear();

        log.info("Adding operation completed (combine task # " + indexingJobCount + ")!");
        long docsInIndex = totalIndex.numDocs();

        indexer.close();
        log.info("Closed index (related to combine task # " + indexingJobCount);

        // Now the index is made, gzip it up.
        File totalIndexDir = new File(indexLocation);
        log.info("Gzip-compressing the individual " + totalIndexDir.list().length
                + " index files of combine task # " + indexingJobCount);
        ZipUtils.gzipFiles(totalIndexDir, resultDir);
        log.info("Completed combine task # " + indexingJobCount + " that combined a dataset with " + datasetSize
                + " crawl logs (entries in combined index: " + docsInIndex + ") - compressed index has size "
                + FileUtils.getHumanReadableFileSize(resultDir));
    } catch (IOException e) {
        throw new IOFailure("Error setting up craw.log index framework for " + resultDir.getAbsolutePath(), e);
    } finally {
        // close down Threadpool-executor
        closeDownThreadpoolQuietly(executor);
        FileUtils.removeRecursively(new File(indexLocation));
        for (File temporaryFile : tmpfiles) {
            FileUtils.removeRecursively(temporaryFile);
        }
    }
}

From source file:com.pinterest.teletraan.ConfigHelper.java

public static TeletraanServiceContext setupContext(TeletraanServiceConfiguration configuration)
        throws Exception {
    TeletraanServiceContext context = new TeletraanServiceContext();

    BasicDataSource dataSource = configuration.getDataSourceFactory().build();
    context.setDataSource(dataSource);/*w w  w .  j av  a2  s .c om*/

    context.setUserRolesDAO(new DBUserRolesDAOImpl(dataSource));
    context.setGroupRolesDAO(new DBGroupRolesDAOImpl(dataSource));
    context.setTokenRolesDAO(new DBTokenRolesDAOImpl(dataSource));

    context.setBuildDAO(new DBBuildDAOImpl(dataSource));
    context.setEnvironDAO(new DBEnvironDAOImpl(dataSource));
    context.setDeployDAO(new DBDeployDAOImpl(dataSource));
    context.setHotfixDAO(new DBHotfixDAOImpl(dataSource));
    context.setRatingDAO(new DBRatingsDAOImpl(dataSource));
    context.setPromoteDAO(new DBPromoteDAOImpl(dataSource));

    context.setDataDAO(new DBDataDAOImpl(dataSource));
    context.setUtilDAO(new DBUtilDAOImpl(dataSource));

    context.setConfigHistoryDAO(new DBConfigHistoryDAOImpl(dataSource));
    context.setHostDAO(new DBHostDAOImpl(dataSource));
    context.setGroupDAO(new DBGroupDAOImpl(dataSource));
    context.setAgentDAO(new DBAgentDAOImpl(dataSource));
    context.setAgentErrorDAO(new DBAgentErrorDAOImpl(dataSource));

    context.setClusterDAO(new DBClusterDAOImpl(dataSource));
    context.setBaseImageDAO(new DBBaseImageDAOImpl(dataSource));
    context.setHostTypeDAO(new DBHostTypeDAOImpl(dataSource));
    context.setSecurityZoneDAO(new DBSecurityZoneDAOImpl(dataSource));
    context.setPlacementDAO(new DBPlacementDAOImpl(dataSource));

    // TODO Arcee specific
    context.setAlarmDAO(new DBAlarmDAOImpl(dataSource));
    context.setImageDAO(new DBImageDAOImpl(dataSource));
    context.setGroupInfoDAO(new DBGroupInfoDAOImpl(dataSource));
    context.setHealthCheckDAO(new DBHealthCheckDAOImpl(dataSource));
    context.setHealthCheckErrorDAO(new DBHealthCheckErrorDAOImpl(dataSource));
    context.setnewInstanceReportDAO(new DBNewInstanceReportDAOImpl(dataSource));
    context.setAsgLifecycleEventDAO(new DBAsgLifecycleEventDAOImpl(dataSource));
    context.setManagingGroupDAO(new DBManaginGroupDAOImpl(dataSource));

    // Inject proper implemetation based on config
    context.setAuthorizer(configuration.getAuthorizationFactory().create(context));
    context.setSourceControlManager(configuration.getSourceControlFactory().create());
    context.setChatManager(configuration.getChatFactory().create());
    context.setMailManager(configuration.getEmailFactory().createMailManager());
    context.setHostGroupDAO(configuration.getHostGroupFactory().createHostGroupDAO());
    context.setMetricSource(configuration.getMetricSourceFactory().create());

    EventSenderFactory eventSenderFactory = configuration.getEventSenderFactory();
    if (eventSenderFactory != null) {
        context.setEventSender(eventSenderFactory.createEventSender());
    } else {
        context.setEventSender(new DefaultEventSender());
    }

    // AWS specific DAOs
    AWSFactory awsFactory = configuration.getAwsFactory();
    if (awsFactory != null) {
        context.setAutoScaleGroupManager(awsFactory.buildAwsAutoScalingManager());
        context.setAlarmManager(awsFactory.buildAwsAlarmManager());
        AmazonEC2Client ec2Client = awsFactory.buildEC2Client();
        context.setEc2Client(ec2Client);
        // TODO we should just use AwsConfigManager and get rid of the above 3
        context.setAwsConfigManager(awsFactory.buildAwsConfigManager());
        // TODO rename to manager
        context.setHostInfoDAO(new EC2HostInfoDAOImpl(ec2Client));
        context.setReservedInstanceInfoDAO(new ReservedInstanceFetcher(ec2Client));
        context.setClusterManager(new AwsVmManager(context.getAwsConfigManager()));
    } else {
        // TODO make sure if aws is null, all the workers related to aws still works
        context.setHostInfoDAO(new DefaultHostInfoDAOImpl());
        context.setClusterManager(new DefaultClusterManager());
    }

    /**
     Lastly, let us create the in-process background job executor, all transient, long
     running background jobs can be handled by this executor
     Currently we hard coded the parameters as:
            
     corePoolSize - the number of threads to keep in the pool, even if they are idle, unless allowCoreThreadTimeOut is set
     maximumPoolSize - the maximum number of threads to allow in the pool
     keepAliveTime - when the number of threads is greater than the core, this is the maximum time that excess idle threads will wait for new tasks before terminating.
     unit - the time unit for the keepAliveTime argument
     workQueue - the queue to use for holding tasks before they are executed. This queue will hold only the Runnable tasks submitted by the execute method.
     */
    // TODO make the thread configrable
    ExecutorService jobPool = new ThreadPoolExecutor(1, 10, 30, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
    context.setJobPool(jobPool);

    context.setDeployBoardUrlPrefix(configuration.getSystemFactory().getDashboardUrl());
    context.setChangeFeedUrl(configuration.getSystemFactory().getChangeFeedUrl());
    context.setQuboleAuthentication(configuration.getSystemFactory().getQuboleAuthentication());

    return context;
}

From source file:org.wso2.carbon.device.mgt.output.adapter.http.HTTPEventAdapter.java

@Override
public void init() throws OutputEventAdapterException {

    tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId();

    //ExecutorService will be assigned  if it is null
    if (executorService == null) {
        int minThread;
        int maxThread;
        long defaultKeepAliveTime;
        int jobQueSize;

        //If global properties are available those will be assigned else constant values will be assigned
        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME) != null) {
            minThread = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME));
        } else {/*from  w  w  w  . j av  a2  s  .  co m*/
            minThread = HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME) != null) {
            maxThread = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME));
        } else {
            maxThread = HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME) != null) {
            defaultKeepAliveTime = Integer
                    .parseInt(globalProperties.get(HTTPEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME));
        } else {
            defaultKeepAliveTime = HTTPEventAdapterConstants.DEFAULT_KEEP_ALIVE_TIME_IN_MILLIS;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME) != null) {
            jobQueSize = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME));
        } else {
            jobQueSize = HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE;
        }
        executorService = new ThreadPoolExecutor(minThread, maxThread, defaultKeepAliveTime,
                TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(jobQueSize));

        //configurations for the httpConnectionManager which will be shared by every http adapter
        int defaultMaxConnectionsPerHost;
        int maxTotalConnections;

        if (globalProperties.get(HTTPEventAdapterConstants.DEFAULT_MAX_CONNECTIONS_PER_HOST) != null) {
            defaultMaxConnectionsPerHost = Integer
                    .parseInt(globalProperties.get(HTTPEventAdapterConstants.DEFAULT_MAX_CONNECTIONS_PER_HOST));
        } else {
            defaultMaxConnectionsPerHost = HTTPEventAdapterConstants.DEFAULT_DEFAULT_MAX_CONNECTIONS_PER_HOST;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.MAX_TOTAL_CONNECTIONS) != null) {
            maxTotalConnections = Integer
                    .parseInt(globalProperties.get(HTTPEventAdapterConstants.MAX_TOTAL_CONNECTIONS));
        } else {
            maxTotalConnections = HTTPEventAdapterConstants.DEFAULT_MAX_TOTAL_CONNECTIONS;
        }

        connectionManager = new MultiThreadedHttpConnectionManager();
        connectionManager.getParams().setDefaultMaxConnectionsPerHost(defaultMaxConnectionsPerHost);
        connectionManager.getParams().setMaxTotalConnections(maxTotalConnections);
    }
}

From source file:com.linkedin.pinot.transport.perf.ScatterGatherPerfClient.java

private void setup() {
    MetricsRegistry registry = new MetricsRegistry();
    _timedExecutor = new ScheduledThreadPoolExecutor(1);
    _service = new ThreadPoolExecutor(10, 10, 10, TimeUnit.DAYS, new LinkedBlockingDeque<Runnable>());
    _eventLoopGroup = new NioEventLoopGroup(10);
    _timer = new HashedWheelTimer();

    NettyClientMetrics clientMetrics = new NettyClientMetrics(registry, "client_");
    PooledNettyClientResourceManager rm = new PooledNettyClientResourceManager(_eventLoopGroup, _timer,
            clientMetrics);/*from ww w.j a v  a  2s .c o m*/
    _pool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(1, _maxActiveConnections, 300000, 10, rm,
            _timedExecutor, MoreExecutors.sameThreadExecutor(), registry);
    rm.setPool(_pool);
    _scatterGather = new ScatterGatherImpl(_pool, _service);
    for (AsyncReader r : _readerThreads) {
        r.start();
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer.java

protected ThreadPoolExecutor createNewThreadPoolService(Configuration conf) {
    int nThreads = conf.getInt(YarnConfiguration.RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT,
            YarnConfiguration.DEFAULT_RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT);

    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("DelegationTokenRenewer #%d").build();
    ThreadPoolExecutor pool = new ThreadPoolExecutor(nThreads, nThreads, 3L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>());
    pool.setThreadFactory(tf);//from   w  w w.  ja  va  2 s  .  c o  m
    pool.allowCoreThreadTimeOut(true);
    return pool;
}

From source file:com.espertech.esper.filter.TestIndexTreeBuilderMultithreaded.java

private void performMultithreadedTest(FilterHandleSetNode topNode, int numberOfThreads, int numberOfRunnables,
        int numberOfSecondsSleep) throws Exception {
    log.info(".performMultithreadedTest Loading thread pool work queue,numberOfRunnables=" + numberOfRunnables);

    ThreadPoolExecutor pool = new ThreadPoolExecutor(0, numberOfThreads, 99999, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>());

    for (int i = 0; i < numberOfRunnables; i++) {
        IndexTreeBuilderRunnable runnable = new IndexTreeBuilderRunnable(eventType, topNode, testFilterSpecs,
                matchedEvents, unmatchedEvents);

        pool.execute(runnable);/*w ww .  ja  v  a  2 s. c o m*/
    }

    log.info(".performMultithreadedTest Starting thread pool, threads=" + numberOfThreads);
    pool.setCorePoolSize(numberOfThreads);

    // Sleep X seconds
    sleep(numberOfSecondsSleep);

    log.info(".performMultithreadedTest Completed, numberOfRunnables=" + numberOfRunnables
            + "  numberOfThreads=" + numberOfThreads + "  completed=" + pool.getCompletedTaskCount());

    pool.shutdown();
    pool.awaitTermination(1, TimeUnit.SECONDS);

    assertTrue(pool.getCompletedTaskCount() == numberOfRunnables);
}

From source file:com.ngdata.sep.impl.SepConsumer.java

/**
 * @param subscriptionTimestamp timestamp of when the index subscription became active (or more accurately, not
 *        inactive)/*from  w  ww .  j av a 2s  .com*/
 * @param listener listeners that will process the events
 * @param threadCnt number of worker threads that will handle incoming SEP events
 * @param hostName hostname to bind to
 * @param payloadExtractor extracts payloads to include in SepEvents
 */
public SepConsumer(String subscriptionId, long subscriptionTimestamp, EventListener listener, int threadCnt,
        String hostName, ZooKeeperItf zk, Configuration hbaseConf, PayloadExtractor payloadExtractor)
        throws IOException, InterruptedException {
    Preconditions.checkArgument(threadCnt > 0, "Thread count must be > 0");
    this.subscriptionId = SepModelImpl.toInternalSubscriptionName(subscriptionId);
    this.subscriptionTimestamp = subscriptionTimestamp;
    this.listener = listener;
    this.zk = zk;
    this.hbaseConf = hbaseConf;
    this.sepMetrics = new SepMetrics(subscriptionId);
    this.payloadExtractor = payloadExtractor;
    this.executors = Lists.newArrayListWithCapacity(threadCnt);

    InetSocketAddress initialIsa = new InetSocketAddress(hostName, 0);
    if (initialIsa.getAddress() == null) {
        throw new IllegalArgumentException("Failed resolve of " + initialIsa);
    }
    String name = "regionserver/" + initialIsa.toString();
    this.rpcServer = new RpcServer(this, name, getServices(),
            /*HBaseRPCErrorHandler.class, OnlineRegions.class},*/
            initialIsa, // BindAddress is IP we got for this server.
            hbaseConf.getInt("hbase.regionserver.handler.count", 10),
            hbaseConf.getInt("hbase.regionserver.metahandler.count", 10), hbaseConf, HConstants.QOS_THRESHOLD);
    this.serverName = new ServerName(hostName, rpcServer.getListenerAddress().getPort(),
            System.currentTimeMillis());
    this.zkWatcher = new ZooKeeperWatcher(hbaseConf, this.serverName.toString(), null);

    // login the zookeeper client principal (if using security)
    ZKUtil.loginClient(hbaseConf, "hbase.zookeeper.client.keytab.file",
            "hbase.zookeeper.client.kerberos.principal", hostName);

    // login the server principal (if using secure Hadoop)
    User.login(hbaseConf, "hbase.regionserver.keytab.file", "hbase.regionserver.kerberos.principal", hostName);

    for (int i = 0; i < threadCnt; i++) {
        ThreadPoolExecutor executor = new ThreadPoolExecutor(1, 1, 10, TimeUnit.SECONDS,
                new ArrayBlockingQueue<Runnable>(100));
        executor.setRejectedExecutionHandler(new WaitPolicy());
        executors.add(executor);
    }
}

From source file:nl.uva.sne.disambiguators.WikipediaOnline.java

private Map<String, List<String>> getCategories(Set<Term> terms)
        throws MalformedURLException, InterruptedException, ExecutionException {
    int maxT = 3;
    BlockingQueue<Runnable> workQueue = new ArrayBlockingQueue(maxT);
    ExecutorService pool = new ThreadPoolExecutor(maxT, maxT, 500L, TimeUnit.MICROSECONDS, workQueue);

    //        ExecutorService pool = new ThreadPoolExecutor(maxT, maxT,
    //                5000L, TimeUnit.MILLISECONDS,
    //                new ArrayBlockingQueue<>(maxT, true), new ThreadPoolExecutor.CallerRunsPolicy());
    Map<String, List<String>> cats = new HashMap<>();
    Set<Future<Map<String, List<String>>>> set = new HashSet<>();
    int count = 0;
    for (Term t : terms) {
        URL url = new URL(page + "?action=query&format=json&prop=categories&pageids=" + t.getUID());
        System.err.println(url);//  ww w  .  ja v a 2  s  . c  om
        WikiRequestor req = new WikiRequestor(url, t.getUID(), 0);
        Future<Map<String, List<String>>> future = pool.submit(req);
        set.add(future);
    }
    pool.shutdown();

    for (Future<Map<String, List<String>>> future : set) {
        while (!future.isDone()) {
            //                Logger.getLogger(WikipediaOnline.class.getName()).log(Level.INFO, "Task is not completed yet....");
            Thread.currentThread().sleep(10);
        }
        Map<String, List<String>> c = future.get();
        if (c != null) {
            cats.putAll(c);
        }
    }

    return cats;
}

From source file:com.facebook.presto.accumulo.tools.RewriteMetricsTask.java

public int exec() throws Exception {
    // Validate the required parameters have been set
    int numErrors = checkParam(config, "config");
    numErrors += checkParam(schema, "schema");
    numErrors += checkParam(tableName, "tableName");
    if (numErrors > 0) {
        return 1;
    }/*www .  ja  va2s .c  o  m*/

    // Create the instance and the connector
    Instance inst = new ZooKeeperInstance(config.getInstance(), config.getZooKeepers());
    Connector connector = inst.getConnector(config.getUsername(), new PasswordToken(config.getPassword()));

    if (auths == null) {
        auths = connector.securityOperations().getUserAuthorizations(config.getUsername());
    }

    // Fetch the table metadata
    ZooKeeperMetadataManager manager = new ZooKeeperMetadataManager(config, new TypeRegistry());

    LOG.info("Scanning Presto metadata for tables...");
    AccumuloTable table = manager.getTable(new SchemaTableName(schema, tableName));

    if (table == null) {
        LOG.error("Table is null, does it exist?");
        return 1;
    }

    reconfigureIterators(connector, table);

    if (!dryRun) {
        LOG.info("Truncating metrics table " + table.getIndexTableName() + "_metrics");
        connector.tableOperations().deleteRows(table.getIndexTableName() + "_metrics", null, null);
    } else {
        LOG.info("Would have truncated metrics table " + table.getIndexTableName() + "_metrics");
    }

    long start = System.currentTimeMillis();

    ExecutorService service = MoreExecutors.getExitingExecutorService(
            new ThreadPoolExecutor(2, 2, 0, TimeUnit.MILLISECONDS, new SynchronousQueue<>()));

    List<Future<Void>> tasks = service.invokeAll(ImmutableList.of(() -> {
        rewriteMetrics(connector, table, start);
        return null;
    }, () -> {
        rewriteNumRows(connector, table, start);
        return null;
    }));

    for (Future<Void> task : tasks) {
        task.get();
    }

    LOG.info("Finished");
    return 0;
}