List of usage examples for java.util.concurrent ThreadFactory ThreadFactory
ThreadFactory
From source file:com.clustercontrol.util.CommandExecutor.java
public CommandExecutor(String[] command, Charset charset, long timeout, int bufferSize) throws HinemosUnknown { this._command = command; this._charset = charset; this._timeout = timeout; this._bufferSize = bufferSize; log.debug("initializing " + this); if (_command == null) { throw new NullPointerException("command is not defined : " + this); }//from ww w. j av a 2 s.co m StringBuilder commandStr = new StringBuilder(); for (String arg : _command) { commandStr.append(' '); commandStr.append(arg); } this._commandLine = commandStr.substring(1); //?????? if (_charset == null) { throw new NullPointerException("charset is not defined : " + this); } _commandExecutor = Executors.newSingleThreadExecutor(new ThreadFactory() { private volatile int _count = 0; @Override public Thread newThread(Runnable r) { return new Thread(r, "CommandExecutor-" + _count++); } }); }
From source file:com.reactivetechnologies.platform.interceptor.AbstractOutboundChannel.java
@PostConstruct protected void init() { switch (strategy) { case PARALLEL_ORDERED: if (strategy.getComparator() != null) { Collections.sort(feeders, strategy.getComparator()); }//from w ww . j a va 2 s. c o m threads = Executors.newFixedThreadPool(strategy.getNoOfThreads(), new ThreadFactory() { int n = 0; @Override public Thread newThread(Runnable arg0) { Thread t = new Thread(arg0, "OutboundChannel-Worker-" + (n++)); return t; } }); parallel = true; break; case PARALLEL_RANDOM: threads = Executors.newFixedThreadPool(strategy.getNoOfThreads(), new ThreadFactory() { int n = 0; @Override public Thread newThread(Runnable arg0) { Thread t = new Thread(arg0, "OutboundChannel-Worker-" + (n++)); return t; } }); parallel = true; break; case SEQUENTIAL_ORDERED: if (strategy.getComparator() != null) { Collections.sort(feeders, strategy.getComparator()); } break; case SEQUENTIAL_RANDOM: break; default: break; } log.info("-- New Outbound channel created [" + name() + "]"); StringBuilder s = new StringBuilder(" { "); for (OutboundInterceptor<Serializable> out : feeders) { s.append("\n\t").append(out.name()).append(" - IN:").append(out.type()); } s.append("\n}"); log.info("[" + name() + "] Ready to outflow. No. of feeders " + feeders.size() + s); }
From source file:com.aol.advertising.qiao.util.CommonUtils.java
public static ExecutorService createSingleThreadExecutor(final String threadName) { return Executors.newSingleThreadExecutor(new ThreadFactory() { @Override/* w w w . j a va2s . c om*/ public Thread newThread(Runnable r) { return new Thread(r, CommonUtils.resolveThreadName(threadName)); } }); }
From source file:com.github.brandtg.switchboard.TestMysqlReplicationApplier.java
@Test public void testRestoreFromBinlog() throws Exception { MysqlReplicationApplier applier = null; try (Connection conn = DriverManager.getConnection(jdbc, "root", "")) { // Write some rows, so we have binlog entries PreparedStatement pstmt = conn.prepareStatement("INSERT INTO simple VALUES(?, ?)"); for (int i = 0; i < 10; i++) { pstmt.setInt(1, i);//w ww . ja v a 2 s . c om pstmt.setInt(2, i); pstmt.execute(); } // Copy the binlog somewhere Statement stmt = conn.createStatement(); ResultSet rset = stmt.executeQuery("SHOW BINARY LOGS"); rset.next(); String binlogName = rset.getString("Log_name"); rset = stmt.executeQuery("SELECT @@datadir"); rset.next(); String dataDir = rset.getString("@@datadir"); File copyFile = new File(System.getProperty("java.io.tmpdir"), TestMysqlReplicationApplier.class.getName()); FileUtils.copyFile(new File(dataDir + binlogName), copyFile); // Clear everything in MySQL resetMysql(); // Get input stream, skipping and checking binlog magic number InputStream inputStream = new FileInputStream(copyFile); byte[] magic = new byte[MySQLConstants.BINLOG_MAGIC.length]; int bytesRead = inputStream.read(magic); Assert.assertEquals(bytesRead, MySQLConstants.BINLOG_MAGIC.length); Assert.assertTrue(CodecUtils.equals(magic, MySQLConstants.BINLOG_MAGIC)); // Restore from binlog PoolingDataSource<PoolableConnection> dataSource = getDataSource(); applier = new MysqlReplicationApplier(inputStream, dataSource); ExecutorService executorService = Executors.newSingleThreadExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } }); executorService.submit(applier); // Poll until we have restored long startTime = System.currentTimeMillis(); long currentTime = startTime; do { stmt = conn.createStatement(); rset = stmt.executeQuery("SELECT COUNT(*) FROM test.simple"); rset.next(); long count = rset.getLong(1); if (count == 10) { return; } Thread.sleep(1000); currentTime = System.currentTimeMillis(); } while (currentTime - startTime < 10000); } finally { if (applier != null) { applier.shutdown(); } } Assert.fail("Timed out when polling"); }
From source file:org.apache.activemq.usecases.ConcurrentProducerDurableConsumerTest.java
public void testSendRateWithActivatingConsumers() throws Exception { final Destination destination = createDestination(); final ConnectionFactory factory = createConnectionFactory(); startInactiveConsumers(factory, destination); Connection connection = factory.createConnection(); Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); MessageProducer producer = createMessageProducer(session, destination); // preload the durable consumers double[] inactiveConsumerStats = produceMessages(destination, 500, 10, session, producer, null); LOG.info("With inactive consumers: ave: " + inactiveConsumerStats[1] + ", max: " + inactiveConsumerStats[0] + ", multiplier: " + (inactiveConsumerStats[0] / inactiveConsumerStats[1])); // periodically start a durable sub that has a backlog final int consumersToActivate = 5; final Object addConsumerSignal = new Object(); Executors.newCachedThreadPool(new ThreadFactory() { @Override/* w ww .ja va 2s . com*/ public Thread newThread(Runnable r) { return new Thread(r, "ActivateConsumer" + this); } }).execute(new Runnable() { @Override public void run() { try { MessageConsumer consumer = null; for (int i = 0; i < consumersToActivate; i++) { LOG.info("Waiting for add signal from producer..."); synchronized (addConsumerSignal) { addConsumerSignal.wait(30 * 60 * 1000); } TimedMessageListener listener = new TimedMessageListener(); consumer = createDurableSubscriber(factory.createConnection(), destination, "consumer" + (i + 1)); LOG.info("Created consumer " + consumer); consumer.setMessageListener(listener); consumers.put(consumer, listener); } } catch (Exception e) { LOG.error("failed to start consumer", e); } } }); double[] statsWithActive = produceMessages(destination, 500, 10, session, producer, addConsumerSignal); LOG.info(" with concurrent activate, ave: " + statsWithActive[1] + ", max: " + statsWithActive[0] + ", multiplier: " + (statsWithActive[0] / statsWithActive[1])); while (consumers.size() < consumersToActivate) { TimeUnit.SECONDS.sleep(2); } long timeToFirstAccumulator = 0; for (TimedMessageListener listener : consumers.values()) { long time = listener.getFirstReceipt(); timeToFirstAccumulator += time; LOG.info("Time to first " + time); } LOG.info("Ave time to first message =" + timeToFirstAccumulator / consumers.size()); for (TimedMessageListener listener : consumers.values()) { LOG.info("Ave batch receipt time: " + listener.waitForReceivedLimit(10000) + " max receipt: " + listener.maxReceiptTime); } //assertTrue("max (" + statsWithActive[0] + ") within reasonable // multiplier of ave (" + statsWithActive[1] + ")", // statsWithActive[0] < 5 * statsWithActive[1]); // compare no active to active LOG.info("Ave send time with active: " + statsWithActive[1] + " as multiplier of ave with none active: " + inactiveConsumerStats[1] + ", multiplier=" + (statsWithActive[1] / inactiveConsumerStats[1])); assertTrue( "Ave send time with active: " + statsWithActive[1] + " within reasonable multpler of ave with none active: " + inactiveConsumerStats[1] + ", multiplier " + (statsWithActive[1] / inactiveConsumerStats[1]), statsWithActive[1] < 15 * inactiveConsumerStats[1]); }
From source file:edu.umass.cs.nio.JSONMessenger.java
/** * @param niot/*from ww w . j a v a 2s . c om*/ * @param numWorkers */ @SuppressWarnings("unchecked") public JSONMessenger(final InterfaceNIOTransport<NodeIDType, JSONObject> niot, int numWorkers) { // to not create thread pools unnecessarily if (niot instanceof JSONMessenger) this.execpool = ((JSONMessenger<NodeIDType>) niot).execpool; else this.execpool = Executors.newScheduledThreadPool(5, new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread thread = Executors.defaultThreadFactory().newThread(r); thread.setName(JSONMessenger.class.getSimpleName() + niot.getMyID() + thread.getName()); return thread; } }); nioTransport = (InterfaceNIOTransport<NodeIDType, JSONObject>) niot; this.workers = new MessageNIOTransport[numWorkers]; for (int i = 0; i < workers.length; i++) { try { log.info((this + " starting worker with ssl mode " + this.nioTransport.getSSLMode())); this.workers[i] = new MessageNIOTransport<NodeIDType, JSONObject>(null, this.getNodeConfig(), this.nioTransport.getSSLMode()); this.workers[i].setName(JSONMessenger.class.getSimpleName() + niot.getMyID() + "_send_worker" + i); } catch (IOException e) { this.workers[i] = null; e.printStackTrace(); } } }
From source file:org.madsonic.service.PodcastService.java
public PodcastService() { ThreadFactory threadFactory = new ThreadFactory() { public Thread newThread(Runnable r) { Thread t = Executors.defaultThreadFactory().newThread(r); t.setDaemon(true);//w w w.jav a 2 s. c o m return t; } }; refreshExecutor = Executors.newFixedThreadPool(5, threadFactory); downloadExecutor = Executors.newFixedThreadPool(4, threadFactory); //settingsService.getPodcastEpisodeDownloadLimit() scheduledExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory); }
From source file:org.energy_home.jemma.javagal.layers.object.WrapperWSNNode.java
public WrapperWSNNode(GalController _gal, final String networkAdd) { gal = _gal;//from w w w . j ava2 s . co m this._numberOfAttempt = 0; this.dead = false; freshnessTPool = new ScheduledThreadPoolExecutor(1, new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(r, "THPool-Freshness[" + networkAdd + "]"); } }); discoveryTPool = new ScheduledThreadPoolExecutor(1, new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(r, "THPool-Discovery[" + networkAdd + "]"); } }); forcePingTPool = new ScheduledThreadPoolExecutor(1, new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(r, "THPool-ForcePing[" + networkAdd + "]"); } }); }
From source file:com.jgoetsch.eventtrader.source.SocketIOWebSocketMsgSource.java
@Override protected void receiveMsgs() { final String baseThreadName = Thread.currentThread().getName(); ThreadRenamingRunnable.setThreadNameDeterminer(ThreadNameDeterminer.CURRENT); ThreadFactory threadFactory = new ThreadFactory() { private AtomicInteger n = new AtomicInteger(); public Thread newThread(Runnable r) { return new Thread(r, baseThreadName + "-w-" + n.incrementAndGet()); }/*from w w w. j ava2 s . com*/ }; bootstrap = new ClientBootstrap(new NioClientSocketChannelFactory( Executors.newCachedThreadPool(threadFactory), Executors.newCachedThreadPool(threadFactory))); try { URI url = new URI(getTokenUrl()); final WebSocketClientHandshaker handshaker = new WebSocketClientHandshakerFactory().newHandshaker(url, WebSocketVersion.V13, null, false, null); bootstrap.setPipelineFactory(new ChannelPipelineFactory() { public ChannelPipeline getPipeline() throws Exception { ChannelPipeline pipeline = Channels.pipeline(); pipeline.addLast("decoder", new HttpResponseDecoder()); pipeline.addLast("encoder", new HttpRequestEncoder()); pipeline.addLast("ws-handler", new WebSocketClientHandler(handshaker)); pipeline.addLast("sio-handler", new SocketIOClientHandler()); return pipeline; } }); ChannelFuture future = bootstrap .connect(new InetSocketAddress(url.getHost(), url.getPort() == -1 ? 80 : url.getPort())); future.syncUninterruptibly(); ch = future.getChannel(); handshaker.handshake(ch).syncUninterruptibly(); ch.getCloseFuture().awaitUninterruptibly(); } catch (URISyntaxException use) { log.error("Invalid URL: {}", getUrl(), use); } catch (Exception e) { log.error("Error getting token", e); } finally { if (ch != null) ch.close(); bootstrap.releaseExternalResources(); } }
From source file:org.pentaho.reporting.platform.plugin.async.PentahoAsyncExecutor.java
/** * @param capacity thread pool capacity * @param autoSchedulerThreshold quantity of rows after which reports are automatically scheduled *//*from w ww. ja v a 2 s . c o m*/ public PentahoAsyncExecutor(final int capacity, final int autoSchedulerThreshold) { this.autoSchedulerThreshold = autoSchedulerThreshold; log.info("Initialized reporting async execution fixed thread pool with capacity: " + capacity); executorService = new DelegatedListenableExecutor(new ThreadPoolExecutor(capacity, capacity, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread thread = Executors.defaultThreadFactory().newThread(r); thread.setDaemon(true); thread.setName("PentahoAsyncExecutor Thread Pool"); return thread; } })); PentahoSystem.addLogoutListener(this); this.writeToJcrListeners = new ConcurrentHashMap<>(); this.schedulingLocationListener = new MemorizeSchedulingLocationListener(); }