List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue
public LinkedBlockingQueue()
From source file:io.nats.client.ITClusterTest.java
@Test public void testHotSpotReconnect() throws InterruptedException { int numClients = 100; ExecutorService executor = Executors.newFixedThreadPool(numClients, new NatsThreadFactory("testhotspotreconnect")); final BlockingQueue<String> rch = new LinkedBlockingQueue<String>(); final BlockingQueue<Integer> dch = new LinkedBlockingQueue<Integer>(); final AtomicBoolean shutdown = new AtomicBoolean(false); try (NatsServer s1 = runServerOnPort(1222)) { try (NatsServer s2 = runServerOnPort(1224)) { try (NatsServer s3 = runServerOnPort(1226)) { final class NATSClient implements Runnable { Connection nc = null; final AtomicInteger numReconnects = new AtomicInteger(0); final AtomicInteger numDisconnects = new AtomicInteger(0); String currentUrl = null; final AtomicInteger instance = new AtomicInteger(-1); final Options opts; NATSClient(int inst) { this.instance.set(inst); opts = defaultOptions(); opts.servers = Nats.processUrlArray(testServers); opts.disconnectedCb = new DisconnectedCallback() { public void onDisconnect(ConnectionEvent event) { numDisconnects.incrementAndGet(); try { dch.put(instance.get()); } catch (InterruptedException e) { e.printStackTrace(); }//ww w.j a v a 2s. c om nc.setDisconnectedCallback(null); } }; opts.reconnectedCb = new ReconnectedCallback() { public void onReconnect(ConnectionEvent event) { numReconnects.incrementAndGet(); currentUrl = nc.getConnectedUrl(); try { rch.put(currentUrl); } catch (InterruptedException e) { e.printStackTrace(); } } }; } @Override public void run() { try { nc = opts.connect(); assertTrue(!nc.isClosed()); assertNotNull(nc.getConnectedUrl()); currentUrl = nc.getConnectedUrl(); // System.err.println("Instance " + instance + " connected to " + // currentUrl); while (!shutdown.get()) { sleep(10); } nc.close(); } catch (IOException e) { e.printStackTrace(); } } public synchronized boolean isConnected() { return (nc != null && !nc.isClosed()); } public void shutdown() { shutdown.set(true); } } List<NATSClient> tasks = new ArrayList<NATSClient>(numClients); for (int i = 0; i < numClients; i++) { NATSClient task = new NATSClient(i); tasks.add(task); executor.submit(task); } Map<String, Integer> cs = new HashMap<String, Integer>(); int numReady = 0; while (numReady < numClients) { numReady = 0; for (NATSClient cli : tasks) { if (cli.isConnected()) { numReady++; } } sleep(100); } s1.shutdown(); sleep(1000); int disconnected = 0; // wait for disconnects while (dch.size() > 0 && disconnected < numClients) { Integer instance = -1; instance = dch.poll(5, TimeUnit.SECONDS); assertNotNull("timed out waiting for disconnect signal", instance); disconnected++; } assertTrue(disconnected > 0); int reconnected = 0; // wait for reconnects for (int i = 0; i < disconnected; i++) { String url = null; while (rch.size() == 0) { sleep(50); } url = rch.poll(5, TimeUnit.SECONDS); assertNotNull("timed out waiting for reconnect signal", url); reconnected++; Integer count = cs.get(url); if (count != null) { cs.put(url, ++count); } else { cs.put(url, 1); } } for (NATSClient client : tasks) { client.shutdown(); } executor.shutdownNow(); assertTrue(executor.awaitTermination(2, TimeUnit.SECONDS)); assertEquals(disconnected, reconnected); int numServers = 2; assertEquals(numServers, cs.size()); int expected = numClients / numServers; // We expect a 40 percent variance int var = (int) ((float) expected * 0.40); int delta = Math.abs(cs.get(testServers[2]) - cs.get(testServers[4])); // System.err.printf("var = %d, delta = %d\n", var, delta); if (delta > var) { String str = String.format("Connected clients to servers out of range: %d/%d", delta, var); fail(str); } } } } }
From source file:net.tomp2p.simgrid.SimGridTomP2P.java
public static SendingMessage getPendingMessag(Number160 senderID) throws InterruptedException { BlockingQueue<SendingMessage> queue = pendingMessages.get(senderID); if (queue == null) { queue = new LinkedBlockingQueue<SendingMessage>(); pendingMessages.put(senderID, queue); }//from w w w.j av a 2 s. c o m return queue.poll(); }
From source file:com.facebook.LinkBench.LinkBenchDriverInj.java
void load() throws IOException, InterruptedException, Throwable { if (!doLoad) { logger.info("Skipping load data per the cmdline arg"); return;/*from ww w .j av a2s.c o m*/ } // load data int nLinkLoaders = ConfigUtil.getInt(props, Config.NUM_LOADERS); boolean bulkLoad = true; BlockingQueue<LoadChunk> chunk_q = new LinkedBlockingQueue<LoadChunk>(); // max id1 to generate long maxid1 = ConfigUtil.getLong(props, Config.MAX_ID); // id1 at which to start long startid1 = ConfigUtil.getLong(props, Config.MIN_ID); // Create loaders logger.info("Starting loaders " + nLinkLoaders); logger.debug("Bulk Load setting: " + bulkLoad); Random masterRandom = createMasterRNG(props, Config.LOAD_RANDOM_SEED); boolean genNodes = ConfigUtil.getBool(props, Config.GENERATE_NODES); int nTotalLoaders = genNodes ? nLinkLoaders + 1 : nLinkLoaders; LatencyStats latencyStats = new LatencyStats(nTotalLoaders); List<Runnable> loaders = new ArrayList<Runnable>(nTotalLoaders); LoadProgress loadTracker = LoadProgress.create(logger, props); for (int i = 0; i < nLinkLoaders; i++) { LinkStore linkStore = createLinkStore(); bulkLoad = bulkLoad && linkStore.bulkLoadBatchSize() > 0; LinkBenchLoad l = new LinkBenchLoad(linkStore, props, latencyStats, csvStreamFile, i, maxid1 == startid1 + 1, chunk_q, loadTracker); loaders.add(l); } if (genNodes) { logger.info("Will generate graph nodes during loading"); int loaderId = nTotalLoaders - 1; NodeStore nodeStore = createNodeStore(null); Random rng = new Random(masterRandom.nextLong()); loaders.add(new NodeLoader(props, logger, nodeStore, rng, latencyStats, csvStreamFile, loaderId)); } enqueueLoadWork(chunk_q, startid1, maxid1, nLinkLoaders, new Random(masterRandom.nextLong())); // run loaders loadTracker.startTimer(); long loadTime = concurrentExec(loaders, false, new Random(masterRandom.nextLong())); long expectedNodes = maxid1 - startid1; long actualLinks = 0; long actualNodes = 0; for (final Runnable l : loaders) { if (l instanceof LinkBenchLoad) { actualLinks += ((LinkBenchLoad) l).getLinksLoaded(); } else { assert (l instanceof NodeLoader); actualNodes += ((NodeLoader) l).getNodesLoaded(); } } latencyStats.displayLatencyStats(); if (csvStatsFile != null) { latencyStats.printCSVStats(csvStatsFile, true); } double loadTime_s = (loadTime / 1000.0); logger.info(String.format( "LOAD PHASE COMPLETED. " + " Loaded %d nodes (Expected %d)." + " Loaded %d links (%.2f links per node). " + " Took %.1f seconds. Links/second = %d", actualNodes, expectedNodes, actualLinks, actualLinks / (double) actualNodes, loadTime_s, (long) Math.round(actualLinks / loadTime_s))); }
From source file:com.barchart.netty.server.http.TestHttpServer.java
@Test public void testTooManyConnections() throws Exception { final Queue<Integer> status = new LinkedBlockingQueue<Integer>(); final Runnable r = new Runnable() { @Override/*w w w . jav a 2 s . c o m*/ public void run() { try { final HttpResponse response = client .execute(new HttpGet("http://localhost:" + port + "/client-disconnect")); status.add(response.getStatusLine().getStatusCode()); EntityUtils.consume(response.getEntity()); } catch (final Exception e) { e.printStackTrace(); } } }; final Thread t1 = new Thread(r); t1.start(); final Thread t2 = new Thread(r); t2.start(); t1.join(); t2.join(); assertEquals(2, status.size()); assertTrue(status.contains(200)); assertTrue(status.contains(503)); }
From source file:org.apache.accumulo.core.file.rfile.MultiThreadedRFileTest.java
@SuppressFBWarnings(value = "INFORMATION_EXPOSURE_THROUGH_AN_ERROR_MESSAGE", justification = "information put into error message is safe and used for testing") @Test/*w w w. j a va 2s . co m*/ public void testMultipleReaders() throws IOException { final List<Throwable> threadExceptions = Collections.synchronizedList(new ArrayList<Throwable>()); Map<String, MutableInt> messages = new HashMap<>(); Map<String, String> stackTrace = new HashMap<>(); final TestRFile trfBase = new TestRFile(conf); writeData(trfBase); trfBase.openReader(); try { validate(trfBase); final TestRFile trfBaseCopy = trfBase.deepCopy(); validate(trfBaseCopy); // now start up multiple RFile deepcopies int maxThreads = 10; String name = "MultiThreadedRFileTestThread"; ThreadPoolExecutor pool = new ThreadPoolExecutor(maxThreads + 1, maxThreads + 1, 5 * 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamingThreadFactory(name)); pool.allowCoreThreadTimeOut(true); try { Runnable runnable = () -> { try { TestRFile trf = trfBase; synchronized (trfBaseCopy) { trf = trfBaseCopy.deepCopy(); } validate(trf); } catch (Throwable t) { threadExceptions.add(t); } }; for (int i = 0; i < maxThreads; i++) { pool.submit(runnable); } } finally { pool.shutdown(); try { pool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { e.printStackTrace(); } } for (Throwable t : threadExceptions) { String msg = t.getClass() + " : " + t.getMessage(); if (!messages.containsKey(msg)) { messages.put(msg, new MutableInt(1)); } else { messages.get(msg).increment(); } StringWriter string = new StringWriter(); PrintWriter writer = new PrintWriter(string); t.printStackTrace(writer); writer.flush(); stackTrace.put(msg, string.getBuffer().toString()); } } finally { trfBase.closeReader(); trfBase.close(); } for (String message : messages.keySet()) { LOG.error(messages.get(message) + ": " + message); LOG.error(stackTrace.get(message)); } assertTrue(threadExceptions.isEmpty()); }
From source file:com.netflix.curator.framework.recipes.leader.TestLeaderSelector.java
@SuppressWarnings({ "ForLoopReplaceableByForEach" }) @Test//from w ww. ja va2 s .com public void testRotatingLeadership() throws Exception { final int LEADER_QTY = 5; final int REPEAT_QTY = 3; final Timing timing = new Timing(); CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); client.start(); try { final BlockingQueue<Integer> leaderList = new LinkedBlockingQueue<Integer>(); List<LeaderSelector> selectors = Lists.newArrayList(); for (int i = 0; i < LEADER_QTY; ++i) { final int ourIndex = i; LeaderSelector leaderSelector = new LeaderSelector(client, PATH_NAME, new LeaderSelectorListener() { @Override public void takeLeadership(CuratorFramework client) throws Exception { timing.sleepABit(); leaderList.add(ourIndex); } @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { } }); selectors.add(leaderSelector); } List<Integer> localLeaderList = Lists.newArrayList(); for (int i = 1; i <= REPEAT_QTY; ++i) { for (LeaderSelector leaderSelector : selectors) { if (i > 1) { leaderSelector.requeue(); } else { leaderSelector.start(); } } while (localLeaderList.size() != (i * selectors.size())) { Integer polledIndex = leaderList.poll(10, TimeUnit.SECONDS); Assert.assertNotNull(polledIndex); localLeaderList.add(polledIndex); } timing.sleepABit(); } for (LeaderSelector leaderSelector : selectors) { leaderSelector.close(); } System.out.println(localLeaderList); for (int i = 0; i < REPEAT_QTY; ++i) { Set<Integer> uniques = Sets.newHashSet(); for (int j = 0; j < selectors.size(); ++j) { Assert.assertTrue(localLeaderList.size() > 0); int thisIndex = localLeaderList.remove(0); Assert.assertFalse(uniques.contains(thisIndex)); uniques.add(thisIndex); } } } finally { client.close(); } }
From source file:de.tu_dortmund.ub.data.dswarm.TaskProcessingUnit.java
private static String executeTPUTask(final String[] watchFolderFiles, final String resourceWatchFolder, final Optional<String> optionalOutputDataModelID, final Optional<String> optionalExportMimeType, final Optional<String> optionalExportFileExtension, final Integer engineThreads, final String serviceName, final Properties config) throws Exception { // create job list final LinkedList<Callable<String>> transforms = new LinkedList<>(); int cnt = 1;/*from w ww .j av a 2s. c o m*/ for (final String watchFolderFile : watchFolderFiles) { LOG.info("[{}][{}] do TPU task execution '{}' for file '{}'", serviceName, cnt, cnt, watchFolderFile); transforms.add(new TPUTask(config, watchFolderFile, resourceWatchFolder, optionalOutputDataModelID, optionalExportMimeType, optionalExportFileExtension, serviceName, cnt)); cnt++; } // work on jobs final ThreadPoolExecutor pool = new ThreadPoolExecutor(engineThreads, engineThreads, 0L, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); try { final List<Future<String>> futureList = pool.invokeAll(transforms); final StringBuilder resultSB = new StringBuilder(); for (final Future<String> f : futureList) { final String message = f.get(); LOG.info(message); resultSB.append(message).append("\n"); } return resultSB.toString(); } catch (final Exception e) { LOG.error("something went wrong", e); throw e; } finally { pool.shutdown(); } }
From source file:test.com.azaptree.services.executor.ThreadPoolExecutorTest.java
@Test public void testThreadPoolConfigEquals() { Assert.assertEquals(new ThreadPoolConfig("azap", true), new ThreadPoolConfig("azap", true)); Assert.assertEquals(new ThreadPoolConfig("azap"), new ThreadPoolConfig("azap")); Assert.assertEquals(new ThreadPoolConfig(), new ThreadPoolConfig()); Assert.assertNotEquals(new ThreadPoolConfig(), new ThreadPoolConfig("azap")); final ThreadPoolConfig[] configs = new ThreadPoolConfig[2]; for (int i = 0; i < 2; i++) { configs[i] = new ThreadPoolConfig("azap", 10, 50, true); configs[i].setAllowCoreThreadTimeOut(true); configs[i].setWorkQueue(new LinkedBlockingQueue<Runnable>()); }/*w w w . j a va2 s .c om*/ Assert.assertEquals(configs[0], configs[1]); }
From source file:com.test.HibernateDerbyLockingTest.java
public void runTest(final SessionFactory sessionFactory) throws Exception { Person person = new Person(); Session session = sessionFactory.openSession(); session.save(person);/*from w ww . ja v a2 s . co m*/ session.flush(); session.close(); final String id = person.getId(); final LinkedBlockingQueue<String> queue = new LinkedBlockingQueue<String>(); ExecutorService executorService = Executors.newCachedThreadPool(); Future<?> submit = executorService.submit(new Runnable() { public void run() { Session session = sessionFactory.openSession(); Transaction transaction = session.beginTransaction(); session.load(Person.class, id, LockMode.UPGRADE); try { Thread.sleep(2000); } catch (Throwable t) { } System.out.println("one"); queue.add("one"); try { Thread.sleep(500); } catch (Throwable t) { } transaction.commit(); session.flush(); session.close(); } }); Thread.sleep(500); Future<?> submit2 = executorService.submit(new Runnable() { public void run() { Session session = sessionFactory.openSession(); Transaction transaction = session.beginTransaction(); session.load(Person.class, id, LockMode.UPGRADE); queue.add("two"); System.out.println("two"); transaction.commit(); session.flush(); session.close(); } }); submit.get(); submit2.get(); assertEquals("one", queue.poll(3, TimeUnit.SECONDS)); assertEquals("two", queue.poll(3, TimeUnit.SECONDS)); }
From source file:com.curecomp.primefaces.migrator.PrimefacesMigration.java
private static Stream<WidgetVarLocation> findWidgetVars(Path sourceDirectory, String sourcePattern, ThreadPoolExecutor threadPool) throws IOException { BlockingQueue<WidgetVarLocation> pipe = new LinkedBlockingQueue<>(); List<Future<?>> futures = new ArrayList<>(); Files.walkFileTree(sourceDirectory, new FileActionVisitor(sourceDirectory, sourcePattern, sourceFile -> futures.add(threadPool.submit(() -> { try (BufferedReader br = Files.newBufferedReader(sourceFile, StandardCharsets.UTF_8)) { int lineNr = 0; String line;/* w ww .ja v a 2 s. co m*/ while ((line = br.readLine()) != null) { lineNr++; if (line.contains("widgetVar=\"")) { int startIndex = line.indexOf("widgetVar=\"") + "widgetVar=\"".length(); int endIndex = line.indexOf('"', startIndex); String var = line.substring(startIndex, endIndex); WidgetVarLocation widgetVar = new WidgetVarLocation(var, sourceFile, lineNr, startIndex, line); pipe.add(widgetVar); } } } catch (IOException ex) { throw new RuntimeException(ex); } })))); return StreamSupport.stream(new PipeSpliterator(pipe, futures), true); }