Example usage for java.util.concurrent SynchronousQueue SynchronousQueue

List of usage examples for java.util.concurrent SynchronousQueue SynchronousQueue

Introduction

In this page you can find the example usage for java.util.concurrent SynchronousQueue SynchronousQueue.

Prototype

public SynchronousQueue() 

Source Link

Document

Creates a SynchronousQueue with nonfair access policy.

Usage

From source file:org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.java

@Override
public void init(Context context) throws IOException {
    super.init(context);
    this.conf = HBaseConfiguration.create(ctx.getConfiguration());
    decorateConf();// w  w w.jav  a  2  s  . co m
    this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
    this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier",
            maxRetriesMultiplier);
    // TODO: This connection is replication specific or we should make it particular to
    // replication and make replication specific settings such as compression or codec to use
    // passing Cells.
    this.conn = (HConnection) ConnectionFactory.createConnection(this.conf);
    this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
    this.metrics = context.getMetrics();
    // ReplicationQueueInfo parses the peerId out of the znode for us
    this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf);
    // per sink thread pool
    this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY,
            HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT);
    this.exec = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>());
}

From source file:org.apache.hive.hcatalog.templeton.JobRequestExecutor.java

public JobRequestExecutor(JobRequestType requestType, String concurrentRequestsConfigName,
        String jobTimeoutConfigName, boolean enableCancelTask) {

    this.concurrentRequestsConfigName = concurrentRequestsConfigName;
    this.jobTimeoutConfigName = jobTimeoutConfigName;
    this.requestType = requestType;
    this.enableCancelTask = enableCancelTask;

    /*/*from  ww  w .ja v a 2 s.c  o m*/
     * The default number of threads will be 0. That means thread pool is not used and
     * operation is executed with the current thread.
     */
    int threads = !StringUtils.isEmpty(concurrentRequestsConfigName)
            ? appConf.getInt(concurrentRequestsConfigName, 0)
            : 0;

    if (threads > 0) {
        /*
         * Create a thread pool with no queue wait time to execute the operation. This will ensure
         * that job requests are rejected if there are already maximum number of threads busy.
         */
        this.jobExecutePool = new ThreadPoolExecutor(threads, threads, threadKeepAliveTimeInHours,
                TimeUnit.HOURS, new SynchronousQueue<Runnable>());
        this.jobExecutePool.allowCoreThreadTimeOut(true);

        /*
         * Get the job request time out value. If this configuration value is set to 0
         * then job request will wait until it finishes.
         */
        if (!StringUtils.isEmpty(jobTimeoutConfigName)) {
            this.requestExecutionTimeoutInSec = appConf.getInt(jobTimeoutConfigName, 0);
        }

        LOG.info("Configured " + threads + " threads for job request type " + this.requestType
                + " with time out " + this.requestExecutionTimeoutInSec + " s.");
    } else {
        /*
         * If threads are not configured then they will be executed in current thread itself.
         */
        LOG.info("No thread pool configured for job request type " + this.requestType);
    }
}

From source file:com.blacklocus.qs.worker.QSAssembly.java

/**
 * @return a configured QueueReader to process tasks. The QueueReader must be started via {@link QueueReader#run()}.
 *//*from   w  w w.j a  va  2  s. c  o  m*/
public QueueReader build() {
    validate();

    Runnable heartbeater = Runnables
            .newInfiniteLoggingRunnable(new QSWorkerHeartbeater(workerIdService, logService));
    new Thread(heartbeater, "WorkerHeartbeater").start();

    configuration.addConfiguration(QSConfig.DEFAULTS);

    QueueingStrategy<QSTaskModel> queueingStrategy = QueueingStrategies.newHeapQueueingStrategy(
            configuration.getDouble(QSConfig.PROP_HEAP_STRATEGY_TRIGGER),
            configuration.getLong(QSConfig.PROP_HEAP_STRATEGY_MAX_DELAY),
            configuration.getLong(QSConfig.PROP_HEAP_STRATEGY_HINT));
    QSTaskService taskService = new ThreadedFIFOQSTaskService(queueingStrategy, taskServices);
    TaskServiceIterable taskIterable = new TaskServiceIterable(taskService);

    ExecutorService workerExecutorService = StrategicExecutors.newBalancingThreadPoolExecutor(
            new ThreadPoolExecutor(configuration.getInt(QSConfig.PROP_WORKER_POOL_CORE),
                    configuration.getInt(QSConfig.PROP_WORKER_POOL_MAX), 1, TimeUnit.MINUTES,
                    new SynchronousQueue<Runnable>(), new CallerBlocksPolicy()),
            configuration.getFloat(QSConfig.PROP_WORKER_POOL_UTILIZATION), DEFAULT_SMOOTHING_WEIGHT,
            DEFAULT_BALANCE_AFTER);

    return new QueueReader<QSTaskModel, TaskKit, Object>(taskIterable,
            new WorkerQueueItemHandler(queueingStrategy, taskService, logService, workerIdService, workers),
            workerExecutorService, 0);
}

From source file:test.other.T_encrypt_password.java

@Test
public void testxx() {

    final Random random = new Random();

    final int pow = 15;
    final int loopNum = 30;
    final int taskNum = 10;

    class MyTask implements Runnable {

        public void run() {

            final Long[] elapsed = new Long[loopNum];

            for (int i = 0; i != loopNum; ++i) {

                String[] safes = new String[1 << pow];
                elapsed[i] = 0L;/*from  ww  w .ja va2 s  .co  m*/

                for (int k = 0; k < 1 << pow; ++k) {
                    safes[i] = getRandomString(random.nextInt(50));
                    //                  assert (safes[i] != null);
                }

                long t1 = System.nanoTime();
                for (int j = 0; j < 1 << pow; j++) {
                    String tmp = safes[i];
                    // assert(safes[i] != null) : " index:" + j;
                    safes[i] = EncryptionService.transformPassword(tmp);
                }
                long t2 = System.nanoTime();

                elapsed[i] += t2 - t1;
            }
            long total = 0;
            for (Long long1 : elapsed) {
                total += long1;
            }
            System.out.println(total); //  return value by stdOut!
        }
    }

    //      Thread thread = new Thread(new MyTask());
    //      thread.run();
    ThreadPoolExecutor pool = new ThreadPoolExecutor(2, 16, 10, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(), new ThreadPoolExecutor.CallerRunsPolicy());

    for (int i = 0; i < taskNum; ++i) {
        pool.execute(new MyTask());
    }

}

From source file:org.apache.hadoop.hbase.util.TestHBaseFsckOneRS.java

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
            MasterSyncObserver.class.getName());

    conf.setInt("hbase.regionserver.handler.count", 2);
    conf.setInt("hbase.regionserver.metahandler.count", 30);

    conf.setInt("hbase.htable.threads.max", POOL_SIZE);
    conf.setInt("hbase.hconnection.threads.max", 2 * POOL_SIZE);
    conf.setInt("hbase.hbck.close.timeout", 2 * REGION_ONLINE_TIMEOUT);
    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT);
    TEST_UTIL.startMiniCluster(1);// w w  w.  j av a 2s.  c o m

    tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("testhbck"));

    hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE);

    AssignmentManager assignmentManager = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
    regionStates = assignmentManager.getRegionStates();

    connection = (ClusterConnection) TEST_UTIL.getConnection();

    admin = connection.getAdmin();
    admin.setBalancerRunning(false, true);

    TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
    TEST_UTIL.waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
}

From source file:org.apache.hadoop.hbase.thrift.TBoundedThreadPoolServer.java

public TBoundedThreadPoolServer(Args options, ThriftMetrics metrics) {
    super(options);

    if (options.maxQueuedRequests > 0) {
        this.callQueue = new CallQueue(new LinkedBlockingQueue<Call>(options.maxQueuedRequests), metrics);
    } else {/*  www  . j  a v  a  2s  . c  o m*/
        this.callQueue = new CallQueue(new SynchronousQueue<Call>(), metrics);
    }

    ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
    tfb.setDaemon(true);
    tfb.setNameFormat("thrift-worker-%d");
    executorService = new ThreadPoolExecutor(options.minWorkerThreads, options.maxWorkerThreads,
            options.threadKeepAliveTimeSec, TimeUnit.SECONDS, this.callQueue, tfb.build());
    serverOptions = options;
}

From source file:com.facebook.presto.accumulo.tools.TimestampCheckTask.java

public int exec() throws Exception {
    // Create the instance and the connector
    Instance inst = new ZooKeeperInstance(config.getInstance(), config.getZooKeepers());
    Connector connector = inst.getConnector(config.getUsername(), new PasswordToken(config.getPassword()));

    if (auths == null) {
        auths = connector.securityOperations().getUserAuthorizations(config.getUsername());
    }//from  ww  w.j av  a 2  s . co  m

    // Fetch the table metadata
    ZooKeeperMetadataManager manager = new ZooKeeperMetadataManager(config, new TypeRegistry());

    LOG.info("Scanning Presto metadata for tables...");
    AccumuloTable table = manager.getTable(new SchemaTableName(schema, tableName));

    if (table == null) {
        LOG.error("Table is null, does it exist?");
        return 1;
    }

    AccumuloRowSerializer serializer = new LexicoderRowSerializer();

    startBytes = serializer.encode(TimestampType.TIMESTAMP, PARSER.parseDateTime(start).getMillis());
    endBytes = serializer.encode(TimestampType.TIMESTAMP, PARSER.parseDateTime(end).getMillis());

    this.range = new Range(new Text(startBytes), new Text(endBytes));

    long timestamp = System.currentTimeMillis();

    Optional<AccumuloColumnHandle> columnHandle = table.getColumns().stream()
            .filter(handle -> handle.getName().equalsIgnoreCase(column)).findAny();
    checkArgument(columnHandle.isPresent(), "no column found");

    ExecutorService service = MoreExecutors.getExitingExecutorService(
            new ThreadPoolExecutor(3, 3, 0, TimeUnit.MILLISECONDS, new SynchronousQueue<>()));

    List<Future<Void>> tasks = service.invokeAll(ImmutableList.of(() -> {
        getDataCount(connector, table, columnHandle.get(), timestamp);
        return null;
    }, () -> {
        getIndexCount(connector, table, columnHandle.get(), timestamp);
        return null;
    }, () -> {
        getMetricCount(connector, table, columnHandle.get(), timestamp);
        return null;
    }));

    for (Future<Void> task : tasks) {
        task.get();
    }

    LOG.info("Finished");
    return 0;
}

From source file:com.serotonin.m2m2.Lifecycle.java

public synchronized void initialize(ClassLoader classLoader) {
    for (Module module : ModuleRegistry.getModules()) {
        module.preInitialize();/*from   w  ww .  ja va  2s. c o m*/
    }

    String tzId = Common.envProps.getString("timezone");
    if (!StringUtils.isEmpty(tzId)) {
        TimeZone tz = TimeZone.getTimeZone(tzId);
        if ((tz == null) || (!tz.getID().equals(tzId)))
            throw new RuntimeException("Time zone id '" + tzId + "' in env properties is not valid");
        this.LOG.info("Setting default time zone to " + tz.getID());
        TimeZone.setDefault(tz);
        DateTimeZone.setDefault(DateTimeZone.forID(tzId));
    }

    Common.timer.init(new ThreadPoolExecutor(0, 1000, 30L, TimeUnit.SECONDS, new SynchronousQueue()));

    Providers.add(TimerProvider.class, new TimerProvider() {
        public AbstractTimer getTimer() {
            return Common.timer;
        }
    });
    Common.JSON_CONTEXT.addResolver(new EventTypeResolver(), new Class[] { EventType.class });
    Common.JSON_CONTEXT.addResolver(new BaseChartRenderer.Resolver(), new Class[] { ChartRenderer.class });
    Common.JSON_CONTEXT.addResolver(new BaseTextRenderer.Resolver(), new Class[] { TextRenderer.class });
    Common.JSON_CONTEXT.addResolver(new EmailRecipientResolver(), new Class[] { EmailRecipient.class });

    Providers.add(InputStreamEPollProvider.class, new InputStreamEPollProviderImpl());
    Providers.add(ProcessEPollProvider.class, new ProcessEPollProviderImpl());

    //    lic();
    freemarkerInitialize();
    databaseInitialize(classLoader);

    for (Module module : ModuleRegistry.getModules()) {
        module.postDatabase();
    }
    utilitiesInitialize();
    eventManagerInitialize();
    runtimeManagerInitialize();
    maintenanceInitialize();
    imageSetInitialize();
    webServerInitialize(classLoader);

    for (Module module : ModuleRegistry.getModules()) {
        module.postInitialize();
    }

    SystemEventType.raiseEvent(new SystemEventType("SYSTEM_STARTUP"), System.currentTimeMillis(), false,
            new TranslatableMessage("event.system.startup"));

    for (Runnable task : this.STARTUP_TASKS)
        Common.timer.execute(task);
}

From source file:org.lilyproject.util.hbase.LocalHTable.java

public LocalHTable(Configuration conf, byte[] tableName) throws IOException {
    this.conf = conf;
    this.tableName = tableName;
    this.tableNameString = Bytes.toString(tableName);
    this.pool = getHTablePool(conf);

    // HTable internally has an ExecutorService. I have noticed that many of the HBase operations that Lily
    // performs don't make use of this ES, since they are not plain put or batch operations. Thus, for the
    // operations that do make use of it, they use the ExecutorServices of many different HTable instances,
    // leading to very little thread re-use and many very short-lived threads. Therefore, we switch the
    // ExecutorService instance in HBase by a shared one, which requires modifying a private variable.
    // (seems like this is improved in HBase trunk)

    synchronized (this) {
        if (EXECUTOR_SERVICE == null) {
            int maxThreads = Integer.MAX_VALUE;
            log.debug("Creating ExecutorService for HTable with max threads = " + maxThreads);

            EXECUTOR_SERVICE = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS,
                    new SynchronousQueue<Runnable>(), new CustomThreadFactory("hbase-batch", null, true),
                    new WaitPolicy());
            EXECUTOR_SERVICE_SHUTDOWN_PROTECTED = new ShutdownProtectedExecutor(EXECUTOR_SERVICE);

            try {
                POOL_FIELD = HTable.class.getDeclaredField("pool");
                POOL_FIELD.setAccessible(true);
            } catch (Exception e) {
                throw new RuntimeException(e);
            }//from w  w w.  ja  v a2 s.  c  om
        }
    }

    // Test the table is accessible
    runNoIE(new TableRunnable<Object>() {
        @Override
        public Object run(HTableInterface table) throws IOException {
            return null;
        }
    });
}

From source file:org.apache.hedwig.server.netty.TestPubSubServer.java

@Test(timeout = 60000)
public void testUncaughtExceptionInZKThread() throws Exception {

    SynchronousQueue<Throwable> queue = new SynchronousQueue<Throwable>();
    RecordingUncaughtExceptionHandler uncaughtExceptionHandler = new RecordingUncaughtExceptionHandler(queue);
    final int port = PortManager.nextFreePort();
    final String hostPort = "127.0.0.1:" + PortManager.nextFreePort();

    PubSubServer server = startServer(uncaughtExceptionHandler, port, new TopicManagerInstantiator() {

        @Override//from w ww  . j  a  va2s. co m
        public TopicManager instantiateTopicManager() throws IOException {
            return new AbstractTopicManager(new ServerConfiguration(),
                    Executors.newSingleThreadScheduledExecutor()) {

                @Override
                protected void realGetOwner(ByteString topic, boolean shouldClaim,
                        Callback<HedwigSocketAddress> cb, Object ctx) {
                    ZooKeeper zookeeper;
                    try {
                        zookeeper = new ZooKeeper(hostPort, 60000, new Watcher() {
                            @Override
                            public void process(WatchedEvent event) {
                                // TODO Auto-generated method stub

                            }
                        });
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }

                    zookeeper.getData("/fake", false, new SafeAsyncZKCallback.DataCallback() {
                        @Override
                        public void safeProcessResult(int rc, String path, Object ctx, byte[] data,
                                org.apache.zookeeper.data.Stat stat) {
                            throw new RuntimeException("This should go to the uncaught exception handler");
                        }

                    }, null);
                }

                @Override
                protected void postReleaseCleanup(ByteString topic, Callback<Void> cb, Object ctx) {
                }
            };
        }
    });

    runPublishRequest(port);
    assertEquals(RuntimeException.class, queue.take().getClass());
    server.shutdown();
}