Example usage for java.util.concurrent Executors newSingleThreadExecutor

List of usage examples for java.util.concurrent Executors newSingleThreadExecutor

Introduction

In this page you can find the example usage for java.util.concurrent Executors newSingleThreadExecutor.

Prototype

public static ExecutorService newSingleThreadExecutor(ThreadFactory threadFactory) 

Source Link

Document

Creates an Executor that uses a single worker thread operating off an unbounded queue, and uses the provided ThreadFactory to create a new thread when needed.

Usage

From source file:com.adaptris.hpcc.DfuPlusWrapper.java

@Override
public void init() throws CoreException {
    executor = Executors.newSingleThreadExecutor(new ManagedThreadFactory());
}

From source file:com.clustercontrol.systemlog.util.SyslogReceiver.java

public synchronized void start() throws SocketException, UnknownHostException {
    log.info(String.format("starting SyslogReceiver. [address = %s, port = %s, charset = %s, handler = %s]",
            listenAddress, listenPort, charset.name(), _handler.getClass().getName()));

    // ?????????handler, receiver, socket?????

    // ???Hinemos???Listen???
    if (!HinemosManagerMain._isClustered) {
        socket = new DatagramSocket(listenPort, InetAddress.getByName(listenAddress));
        socket.setReceiveBufferSize(socketBufferSize);
        socket.setSoTimeout(socketTimeout);
    }/* ww w  .ja v a  2s  .  com*/

    _executor = Executors.newSingleThreadExecutor(new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            return new Thread(r, "SystemLogReceiver");
        }
    });

    _handler.start();

    if (!HinemosManagerMain._isClustered) {
        _executor.submit(new ReceiverTask(socket, _handler));
    }
}

From source file:gobblin.scheduler.Worker.java

/**
 * Start this worker.//from ww  w  .ja  va2 s  . com
 */
public void start() {
    this.serviceManager.addListener(new ServiceManager.Listener() {

        @Override
        public void stopped() {
            LOG.info("Worker has been stopped");
        }

        @Override
        public void healthy() {
            LOG.info("All services are health and running");
            // Report services' uptimes
            Map<Service, Long> startupTimes = serviceManager.startupTimes();
            for (Map.Entry<Service, Long> entry : startupTimes.entrySet()) {
                LOG.info(String.format("Service %s is healthy with an uptime of %dms",
                        entry.getKey().toString(), entry.getValue()));
            }
        }

        @Override
        public void failure(Service service) {
            LOG.error(String.format("Service %s failed for the following reason:%n\t%s", service.toString(),
                    service.failureCause().toString()));
            System.exit(1);
        }
    }, Executors.newSingleThreadExecutor(ExecutorsUtils.newThreadFactory(Optional.of(LOG))));

    // Add a shutdown hook so the task scheduler gets properly shutdown
    Runtime.getRuntime().addShutdownHook(new Thread() {

        public void run() {
            // Give the services 5 seconds to stop to ensure that we are
            // responsive to shutdown requests
            LOG.info("Shutting down the worker");
            try {
                serviceManager.stopAsync().awaitStopped(5, TimeUnit.SECONDS);
            } catch (TimeoutException te) {
                LOG.error("Timeout in stopping the service manager", te);
            }
        }
    });

    LOG.info("Starting the worker with configured services");
    // Start the worker
    this.serviceManager.startAsync();
}

From source file:com.geekcap.javaworld.sparkexample.proxy.CustomClientBuilder.java

public CustomClientBuilder() {
    enableGZip = true;/*from   w  w w . j a  v a  2  s  .  c om*/
    name = "hosebird-client-" + clientNum.getAndIncrement();
    ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("hosebird-client-io-thread-%d").build();
    executorService = Executors.newSingleThreadExecutor(threadFactory);

    ThreadFactory rateTrackerThreadFactory = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("hosebird-client-rateTracker-thread-%d").build();

    ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(1, rateTrackerThreadFactory);
    rateTracker = new BasicRateTracker(30000, 100, true, scheduledExecutor);
    reconnectionManager = new BasicReconnectionManager(5);

    socketTimeoutMillis = 60000;
    connectionTimeoutMillis = 4000;

    schemeRegistry = SchemeRegistryFactory.createDefault();
}

From source file:com.streamsets.pipeline.lib.jdbc.JdbcLoadRecordWriter.java

/**
 * Class constructor//from   w  ww  .java2  s .com
 *
 * @param connectionString database connection string
 * @param dataSource JDBC {@link DataSource} to get a connection from
 * @param schema schema name
 * @param tableName table name
 * @param customMappings any custom mappings the user provided
 * @param duplicateKeyAction action to take for duplicate-key errors
 * @param recordReader base JdbcRecordReader, no CDC support
 * @param caseSensitive indicate whether to enclose the table name or not
 * @throws StageException
 */
public JdbcLoadRecordWriter(String connectionString, DataSource dataSource, String schema, String tableName,
        List<JdbcFieldColumnParamMapping> customMappings, DuplicateKeyAction duplicateKeyAction,
        JdbcRecordReader recordReader, boolean caseSensitive) throws StageException {
    super(connectionString, dataSource, schema, tableName, false, // No rollback support
            customMappings, OperationType.LOAD_CODE, UnsupportedOperationAction.SEND_TO_ERROR, recordReader,
            null, caseSensitive);
    this.duplicateKeyAction = duplicateKeyAction;
    String threadName = "JDBC LOAD DATA Stream " + getTableName();
    loadOutputExecutor = Executors
            .newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat(threadName).build());
}

From source file:com.twitter.hbc.ClientBuilder.java

public ClientBuilder() {
    enableGZip = true;/*from w w w . java2 s.  c  om*/
    name = "hosebird-client-" + clientNum.getAndIncrement();
    ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("hosebird-client-io-thread-%d").build();
    executorService = Executors.newSingleThreadExecutor(threadFactory);

    ThreadFactory rateTrackerThreadFactory = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("hosebird-client-rateTracker-thread-%d").build();

    ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(1, rateTrackerThreadFactory);
    rateTracker = new BasicRateTracker(30000, 100, true, scheduledExecutor);
    reconnectionManager = new BasicReconnectionManager(5);

    socketTimeoutMillis = 60000;
    connectionTimeoutMillis = 4000;

    schemeRegistry = SchemeRegistryFactory.createDefault();
}

From source file:minium.script.rhinojs.RhinoEngine.java

public <T> RhinoEngine(final RhinoProperties properties) {
    this.executorService = Executors.newSingleThreadExecutor(new ThreadFactory() {
        @Override//from   w w  w.ja va2 s .  co m
        public Thread newThread(Runnable r) {
            Preconditions.checkState(executionThread == null, "Only one thread is supported");
            executionThread = FACTORY.newThread(r);
            return executionThread;
        }
    });

    // this ensures a single thread for this engine
    scope = runWithContext(new RhinoCallable<Scriptable, RuntimeException>() {
        @Override
        protected Scriptable doCall(Context cx, Scriptable scope) {
            try {
                Global global = new Global(cx);
                RequireProperties require = properties.getRequire();
                if (require != null) {
                    List<String> modulePathURIs = getModulePathURIs(require);
                    LOGGER.debug("Module paths: {}", modulePathURIs);
                    global.installRequire(cx, modulePathURIs, require.isSandboxed());
                }
                ClassLoader classloader = Thread.currentThread().getContextClassLoader();
                // we need to load compat/timeout.js because rhino does not have setTimeout, setInterval, etc.
                cx.evaluateReader(global,
                        new InputStreamReader(classloader.getResourceAsStream("compat/timeout.js")),
                        "compat/timeout.js", 1, null);
                return global;
            } catch (IOException e) {
                throw Throwables.propagate(e);
            }
        }
    });
}

From source file:com.clustercontrol.util.CommandExecutor.java

public CommandExecutor(String[] command, Charset charset, long timeout, int bufferSize) throws HinemosUnknown {
    this._command = command;
    this._charset = charset;
    this._timeout = timeout;
    this._bufferSize = bufferSize;

    log.debug("initializing " + this);

    if (_command == null) {
        throw new NullPointerException("command is not defined : " + this);
    }//  w  ww  .ja va2s . c  o m

    StringBuilder commandStr = new StringBuilder();
    for (String arg : _command) {
        commandStr.append(' ');
        commandStr.append(arg);
    }
    this._commandLine = commandStr.substring(1); //??????

    if (_charset == null) {
        throw new NullPointerException("charset is not defined : " + this);
    }

    _commandExecutor = Executors.newSingleThreadExecutor(new ThreadFactory() {
        private volatile int _count = 0;

        @Override
        public Thread newThread(Runnable r) {
            return new Thread(r, "CommandExecutor-" + _count++);
        }
    });
}

From source file:com.aol.advertising.qiao.util.CommonUtils.java

public static ExecutorService createSingleThreadExecutor(final String threadName) {
    return Executors.newSingleThreadExecutor(new ThreadFactory() {

        @Override/* w ww  .j  a  v a2 s  .c om*/
        public Thread newThread(Runnable r) {
            return new Thread(r, CommonUtils.resolveThreadName(threadName));
        }
    });

}

From source file:com.github.brandtg.switchboard.TestMysqlReplicationApplier.java

@Test
public void testRestoreFromBinlog() throws Exception {
    MysqlReplicationApplier applier = null;
    try (Connection conn = DriverManager.getConnection(jdbc, "root", "")) {
        // Write some rows, so we have binlog entries
        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO simple VALUES(?, ?)");
        for (int i = 0; i < 10; i++) {
            pstmt.setInt(1, i);//w  w  w .j  a v  a  2s .c om
            pstmt.setInt(2, i);
            pstmt.execute();
        }

        // Copy the binlog somewhere
        Statement stmt = conn.createStatement();
        ResultSet rset = stmt.executeQuery("SHOW BINARY LOGS");
        rset.next();
        String binlogName = rset.getString("Log_name");
        rset = stmt.executeQuery("SELECT @@datadir");
        rset.next();
        String dataDir = rset.getString("@@datadir");
        File copyFile = new File(System.getProperty("java.io.tmpdir"),
                TestMysqlReplicationApplier.class.getName());
        FileUtils.copyFile(new File(dataDir + binlogName), copyFile);

        // Clear everything in MySQL
        resetMysql();

        // Get input stream, skipping and checking binlog magic number
        InputStream inputStream = new FileInputStream(copyFile);
        byte[] magic = new byte[MySQLConstants.BINLOG_MAGIC.length];
        int bytesRead = inputStream.read(magic);
        Assert.assertEquals(bytesRead, MySQLConstants.BINLOG_MAGIC.length);
        Assert.assertTrue(CodecUtils.equals(magic, MySQLConstants.BINLOG_MAGIC));

        // Restore from binlog
        PoolingDataSource<PoolableConnection> dataSource = getDataSource();
        applier = new MysqlReplicationApplier(inputStream, dataSource);
        ExecutorService executorService = Executors.newSingleThreadExecutor(new ThreadFactory() {
            @Override
            public Thread newThread(Runnable r) {
                Thread t = new Thread(r);
                t.setDaemon(true);
                return t;
            }
        });
        executorService.submit(applier);

        // Poll until we have restored
        long startTime = System.currentTimeMillis();
        long currentTime = startTime;
        do {
            stmt = conn.createStatement();
            rset = stmt.executeQuery("SELECT COUNT(*) FROM test.simple");
            rset.next();
            long count = rset.getLong(1);
            if (count == 10) {
                return;
            }
            Thread.sleep(1000);
            currentTime = System.currentTimeMillis();
        } while (currentTime - startTime < 10000);
    } finally {
        if (applier != null) {
            applier.shutdown();
        }
    }

    Assert.fail("Timed out when polling");
}