List of usage examples for java.lang Thread join
public final void join() throws InterruptedException
From source file:com.linkedin.pinot.core.data.manager.BaseTableDataManagerTest.java
private void runStorageServer(int numQueryThreads, int runTimeSec, TableDataManager tableDataManager) throws Exception { // Start 1 helix worker thread and as many query threads as configured. List<Thread> queryThreads = new ArrayList<>(numQueryThreads); for (int i = 0; i < numQueryThreads; i++) { BaseTableDataManagerTest.TestSegmentUser segUser = new BaseTableDataManagerTest.TestSegmentUser( tableDataManager);/* ww w . ja v a 2s .c om*/ Thread segUserThread = new Thread(segUser); queryThreads.add(segUserThread); segUserThread.start(); } BaseTableDataManagerTest.TestHelixWorker helixWorker = new BaseTableDataManagerTest.TestHelixWorker( tableDataManager); Thread helixWorkerThread = new Thread(helixWorker); helixWorkerThread.start(); _masterThread = Thread.currentThread(); try { Thread.sleep(runTimeSec * 1000); } catch (InterruptedException e) { } _closing = true; helixWorkerThread.join(); for (Thread t : queryThreads) { t.join(); } if (_exception != null) { Assert.fail("One of the threads failed", _exception); } // tableDataManager should be quiescent now. // All segments we ever created must have a corresponding segment manager. Assert.assertEquals(_allSegManagers.size(), _allSegments.size()); final int nSegsAcccessed = _accessedSegManagers.size(); for (SegmentDataManager segmentDataManager : _internalSegMap.values()) { Assert.assertEquals(segmentDataManager.getReferenceCount(), 1); // We should never have called destroy on these segments. Remove it from the list of accessed segments. verify(segmentDataManager.getSegment(), never()).destroy(); _allSegManagers.remove(segmentDataManager); _accessedSegManagers.remove(segmentDataManager); } // For the remaining segments in accessed list, destroy must have been called exactly once. for (SegmentDataManager segmentDataManager : _allSegManagers) { verify(segmentDataManager.getSegment(), times(1)).destroy(); // Also their count should be 0 Assert.assertEquals(segmentDataManager.getReferenceCount(), 0); } // The number of segments we accessed must be <= total segments created. Assert.assertTrue(nSegsAcccessed <= _allSegments.size(), "Accessed=" + nSegsAcccessed + ",created=" + _allSegments.size()); // The number of segments we have seen and that are not there anymore, must be <= number destroyed. Assert.assertTrue(_accessedSegManagers.size() <= _nDestroys, "SeenButUnavailableNow=" + _accessedSegManagers.size() + ",Destroys=" + _nDestroys); // The current number of segments must be the as expected (hi-lo+1) Assert.assertEquals(_internalSegMap.size(), _hi - _lo + 1); }
From source file:de.undercouch.bson4jackson.BsonParserTest.java
/** * Tests reading a very large string using multiple threads. Refers * issue #19. Does not fail reproducibly, but with very high probability. * You may have to run unit tests several times though to really rule out * multi-threading issues.//from w ww. ja v a 2 s . c o m * @throws Exception if something went wrong * @author endasb */ @Test public void parseBigStringInThreads() throws Exception { final BSONObject o = new BasicBSONObject(); final AtomicInteger fails = new AtomicInteger(0); StringBuilder bigStr = new StringBuilder(); for (int i = 0; i < 80000; i++) { bigStr.append("abc"); } o.put("String", bigStr.toString()); ArrayList<Thread> threads = new ArrayList<Thread>(); for (int i = 0; i < 50; i++) { threads.add(new Thread(new Runnable() { @Override public void run() { try { Map<?, ?> data = parseBsonObject(o); data = parseBsonObject(o); assertNotNull(data); } catch (Exception e) { fail("Threading issue " + fails.incrementAndGet()); } } })); } for (Thread thread : threads) { thread.start(); } for (Thread thread : threads) { thread.join(); } assertEquals(0, fails.get()); }
From source file:ExcelFx.FXMLDocumentController.java
private void mainProcessing(Task task) { final Thread thread = new Thread(null, task, "Background"); thread.setDaemon(true);// ww w.ja v a 2 s .co m thread.start(); new Thread() { @Override public void run() { try { thread.join(); } catch (InterruptedException e) { } Platform.runLater(() -> { setProgressBar(0); Alert alert = new Alert(Alert.AlertType.INFORMATION); alert.setTitle(""); alert.setHeaderText(""); alert.setContentText(" "); alert.showAndWait(); }); } }.start(); this.Print.setDisable(false); }
From source file:com.opengamma.bbg.replay.BloombergTickWriterTest.java
@Test(invocationCount = 5, successPercentage = 19) public void performance() throws Exception { ExecutorService writerExecutor = Executors.newSingleThreadExecutor(); Future<?> writerFuture = writerExecutor.submit(_writer); double nStartTime = System.currentTimeMillis(); //create ticks generators List<RandomTicksGeneratorJob> ticksGeneratorsList = new ArrayList<RandomTicksGeneratorJob>(); List<Thread> ticksGeneratorThreads = new ArrayList<Thread>(); for (int i = 0; i < TICKS_GENERATOR_THREAD_SIZE; i++) { RandomTicksGeneratorJob ticksGeneratorJob = new RandomTicksGeneratorJob( new ArrayList<String>(_ticker2buid.keySet()), _allTicksQueue); ticksGeneratorsList.add(ticksGeneratorJob); Thread thread = new Thread(ticksGeneratorJob, "TicksGenerator" + i); thread.start();/*from ww w . j a v a 2 s .co m*/ ticksGeneratorThreads.add(thread); } s_logger.info("Test running for 1min to gather stats"); Thread.sleep(RUN_DURATION); for (RandomTicksGeneratorJob ticksGeneratorJob : ticksGeneratorsList) { ticksGeneratorJob.terminate(); } //wait for all ticksGenerator threads to finish for (Thread thread : ticksGeneratorThreads) { thread.join(); } //send terminate message for tickWriter to terminate sendTerminateMessage(); //test should fail if writer throws an exception writerFuture.get(); writerExecutor.shutdown(); writerExecutor.awaitTermination(1, TimeUnit.SECONDS); double nRunDuration = System.currentTimeMillis() - nStartTime; double nTicks = ((double) _writer.getNTicks() / nRunDuration) * 1000; s_logger.info("ticks {}/s", nTicks); double nWrites = ((double) _writer.getNWrites() / nRunDuration) * 1000; s_logger.info("fileOperations {}/s", nWrites); double nBlocks = (double) _writer.getNBlocks() / (double) _writer.getNWrites(); s_logger.info("average blocks {}bytes", nBlocks); assertTrue("reportInterval > testRunTime", REPORT_INTERVAL > nRunDuration); if ((nWrites * nBlocks) < WRITER_SPEED_THRESHOLD) { s_logger.warn("BloombergTickWriter looks like running really slower than {}b/s", WRITER_SPEED_THRESHOLD); } }
From source file:idgs.client.TcpClientPoolIT.java
public synchronized void testTcpClientPool() { log.info("test create singleton pool instance"); final TcpClientPool pool = TcpClientPool.getInstance(); final TcpClientPool pool1 = TcpClientPool.getInstance(); assertEquals(pool, pool1);/* w w w. j a v a 2 s . c om*/ log.info("test load client config to create all clients"); // String cfgFile = getClass().getResource("/test-client.conf").getPath(); String cfgFile = "conf/client.conf"; try { pool.loadClientConfig(cfgFile); } catch (IOException e) { log.error(e.getMessage(), e); } int actualPoolSize = pool.size(); int expectPoolSize = pool.getClientConfig().getPoolSize(); log.info("config pool size: " + expectPoolSize); log.info("actual pool size: " + actualPoolSize); assertTrue(expectPoolSize >= pool.size()); log.info("test poll a client from the pool, and insert a customer into store, then push it back the pool"); ClientActorMessage requestActorMsg = createActorMessage(); final int poolSize = pool.size(); if (poolSize <= 0) { log.warn("no avaiable client to be used"); return; } final int loopCount = poolSize * 2; // let all client to be used at least once for (int i = 0; i < loopCount; i++) { int prevPoolSize = pool.size(); TcpClientInterface client = pool.getClient(); int currPoolSize = pool.size(); log.info( "after poll out, current pool size: " + currPoolSize + ", previous pool size: " + prevPoolSize); assertEquals(currPoolSize, prevPoolSize - 1); if (client != null) { try { log.debug("-------------------request msg----------------------"); log.debug(requestActorMsg.toString()); ClientActorMessage responseActorMsg = client.sendRecv(requestActorMsg); InsertResponse.Builder builder = InsertResponse.newBuilder(); responseActorMsg.parsePayload(builder); log.debug("-------------------response msg----------------------"); log.debug(responseActorMsg.toString()); InsertResponse response = builder.build(); log.debug(response.toString()); assertEquals(response.getResultCode(), StoreResultCode.SRC_SUCCESS); } catch (Exception e) { log.error(e.getMessage(), e); } finally { prevPoolSize = pool.size(); client.close(); currPoolSize = pool.size(); log.info("after push back, current pool size: " + currPoolSize + ", previous pool size: " + prevPoolSize); assertEquals(currPoolSize, prevPoolSize + 1); } } } log.info("test client pool timer task whether auto create client connection when empty"); Thread t1 = new Thread() { @Override public void run() { while (pool.size() != pool.getClientConfig().getPoolSize() * pool.getLoadFactor()) { // poll out client, never push back, test pool.getClient(); // client.close(); try { Thread.sleep(1000); } catch (InterruptedException e) { // do nothing } } } }; t1.start(); try { t1.join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } assertEquals(pool.getClientConfig().getPoolSize() * pool.getLoadFactor(), pool.size()); log.info("test close pool to release all clients "); assertTrue(pool.size() >= 0); pool.close(); assertEquals(0, pool.size()); }
From source file:de.innovationgate.wgpublisher.design.sync.DesignSyncManager.java
public void scanForUpdates() { Thread thread = new Thread(new PollingTask()); thread.start();/*from w w w . j ava2 s. co m*/ try { thread.join(); } catch (InterruptedException e) { e.printStackTrace(); } }
From source file:com.ibm.bi.dml.runtime.controlprogram.parfor.opt.PerfTestTool.java
/** * //from w w w . j av a2 s .c o m * @param measure * @param pb * @return * @throws DMLRuntimeException * @throws DMLUnsupportedOperationException */ public static double executeGenericProgramBlock(TestMeasure measure, ProgramBlock pb, ExecutionContext ec) throws DMLRuntimeException, DMLUnsupportedOperationException { double value = 0; try { switch (measure) { case EXEC_TIME: Timing time = new Timing(); time.start(); pb.execute(ec); value = time.stop(); break; case MEMORY_USAGE: PerfTestMemoryObserver mo = new PerfTestMemoryObserver(); mo.measureStartMem(); Thread t = new Thread(mo); t.start(); pb.execute(ec); mo.setStopped(); value = mo.getMaxMemConsumption(); t.join(); break; } } catch (Exception ex) { throw new DMLRuntimeException(ex); } //clear matrixes from cache for (String str : ec.getVariables().keySet()) { Data dat = ec.getVariable(str); if (dat instanceof MatrixObject) ((MatrixObject) dat).clearData(); } return value; }
From source file:gobblin.tunnel.TestTunnelWithArbitraryTCPTraffic.java
private void runSimultaneousDataExchange(boolean useTunnel, int nclients) throws IOException, InterruptedException, NoSuchAlgorithmException { long t0 = System.currentTimeMillis(); final int nMsgs = 50; final Map<String, MessageDigest> digestMsgsRecvdAtServer = new HashMap<String, MessageDigest>(); final Map<String, MessageDigest> digestMsgsSentByClients = new HashMap<String, MessageDigest>(); final Map<String, MessageDigest> digestMsgsRecvdAtClients = new HashMap<String, MessageDigest>(); for (int c = 0; c < nclients; c++) { digestMsgsRecvdAtServer.put(Integer.toString(c), MessageDigest.getInstance("MD5")); digestMsgsSentByClients.put(Integer.toString(c), MessageDigest.getInstance("MD5")); digestMsgsRecvdAtClients.put(Integer.toString(c), MessageDigest.getInstance("MD5")); }/*from ww w . jav a 2 s. c o m*/ final MessageDigest digestMsgsSentByServer = MessageDigest.getInstance("MD5"); for (int i = 0; i < nMsgs; i++) { digestMsgsSentByServer.update(TalkPastServer.generateMsgFromServer(i).getBytes()); } String hashOfMsgsSentByServer = Hex.encodeHexString(digestMsgsSentByServer.digest()); MockServer talkPastServer = startTalkPastServer(nMsgs, digestMsgsRecvdAtServer); int targetPort = talkPastServer.getServerSocketPort(); Tunnel tunnel = null; MockServer proxyServer = null; if (useTunnel) { proxyServer = startConnectProxyServer(); tunnel = Tunnel.build("localhost", talkPastServer.getServerSocketPort(), "localhost", proxyServer.getServerSocketPort()); targetPort = tunnel.getPort(); } try { List<EasyThread> clientThreads = new ArrayList<EasyThread>(); final int portToUse = targetPort; for (int c = 0; c < nclients; c++) { final int clientId = c; clientThreads.add(new EasyThread() { @Override void runQuietly() throws Exception { long t = System.currentTimeMillis(); LOG.info("\t" + clientId + ": Client starting"); final MessageDigest digestMsgsRecvdAtClient = digestMsgsRecvdAtClients .get(Integer.toString(clientId)); //final SocketChannel client = SocketChannel.open(); // tunnel test hangs for some reason with SocketChannel final Socket client = new Socket(); client.connect(new InetSocketAddress("localhost", portToUse)); EasyThread serverReaderThread = new EasyThread() { @Override public void runQuietly() { try { BufferedReader clientIn = new BufferedReader( new InputStreamReader(client.getInputStream())); String line = clientIn.readLine(); while (line != null && !line.equals("Goodbye")) { //LOG.info("\t" + clientId + ": Server said [" + line.substring(0, 32) + "... ]"); digestMsgsRecvdAtClient.update(line.getBytes()); digestMsgsRecvdAtClient.update("\n".getBytes()); line = clientIn.readLine(); } } catch (IOException e) { e.printStackTrace(); } LOG.info("\t" + clientId + ": Client done reading"); } }.startThread(); MessageDigest hashMsgsFromClient = digestMsgsSentByClients.get(Integer.toString(clientId)); BufferedOutputStream clientOut = new BufferedOutputStream(client.getOutputStream()); for (int i = 0; i < nMsgs; i++) { String msg = clientId + ":" + i + " " + StringUtils.repeat("Blahhh Blahhh ", 10000) + "\n"; //LOG.info(clientId + " sending " + msg.length() + " bytes"); byte[] bytes = msg.getBytes(); hashMsgsFromClient.update(bytes); clientOut.write(bytes); MockServer.sleepQuietly(2); } clientOut.write(("Goodbye\n".getBytes())); clientOut.flush(); LOG.info("\t" + clientId + ": Client done writing in " + (System.currentTimeMillis() - t) + " ms"); serverReaderThread.join(); LOG.info("\t" + clientId + ": Client done in " + (System.currentTimeMillis() - t) + " ms"); client.close(); } }.startThread()); } for (Thread clientThread : clientThreads) { clientThread.join(); } LOG.info("All data transfer done in " + (System.currentTimeMillis() - t0) + " ms"); } finally { talkPastServer.stopServer(); if (tunnel != null) { proxyServer.stopServer(); tunnel.close(); assertFalse(tunnel.isTunnelThreadAlive()); assertEquals(proxyServer.getNumConnects(), nclients); } Map<String, String> hashOfMsgsRecvdAtServer = new HashMap<String, String>(); Map<String, String> hashOfMsgsSentByClients = new HashMap<String, String>(); Map<String, String> hashOfMsgsRecvdAtClients = new HashMap<String, String>(); for (int c = 0; c < nclients; c++) { String client = Integer.toString(c); hashOfMsgsRecvdAtServer.put(client, Hex.encodeHexString(digestMsgsRecvdAtServer.get(client).digest())); hashOfMsgsSentByClients.put(client, Hex.encodeHexString(digestMsgsSentByClients.get(client).digest())); hashOfMsgsRecvdAtClients.put(client, Hex.encodeHexString(digestMsgsRecvdAtClients.get(client).digest())); } LOG.info("\tComparing client sent to server received"); assertEquals(hashOfMsgsSentByClients, hashOfMsgsRecvdAtServer); LOG.info("\tComparing server sent to client received"); for (String hashOfMsgsRecvdAtClient : hashOfMsgsRecvdAtClients.values()) { assertEquals(hashOfMsgsSentByServer, hashOfMsgsRecvdAtClient); } LOG.info("\tDone"); } }
From source file:bq.jpa.demo.lock.LockTester.java
@Test public void test4() { test();/*from w w w . ja v a 2 s. c om*/ System.out.println("---- do statistic during writing ----"); Thread thread1 = new Thread(new Runnable() { @Override public void run() { service.doReadWhileModify(); } }); Thread thread2 = new Thread(new Runnable() { @Override public void run() { service.doModify(); } }); thread1.setName("readthread"); thread2.setName("writethread"); thread1.start(); try { Thread.sleep(1000); } catch (InterruptedException e1) { } thread2.start(); try { thread1.join(); thread2.join(); } catch (InterruptedException e) { e.printStackTrace(); } }
From source file:com.linkedin.pinot.core.offline.OfflineTableDataManagerTest.java
private void runStorageServer(int numQueryThreads, int runTimeSec, TableDataManager tableDataManager, boolean replaceSegments) throws Exception { // Start 1 helix worker thread and as many query threads as configured. List<Thread> queryThreads = new ArrayList<>(numQueryThreads); for (int i = 0; i < numQueryThreads; i++) { TestSegmentUser segUser = new TestSegmentUser(tableDataManager); Thread segUserThread = new Thread(segUser); queryThreads.add(segUserThread); segUserThread.start();/* w w w .j a va 2s .c om*/ } TestHelixWorker helixWorker = new TestHelixWorker(tableDataManager, replaceSegments); Thread helixWorkerThread = new Thread(helixWorker); helixWorkerThread.start(); _masterThread = Thread.currentThread(); try { Thread.sleep(runTimeSec * 1000); } catch (InterruptedException e) { } _closing = true; helixWorkerThread.join(); for (Thread t : queryThreads) { t.join(); } if (_exception != null) { Assert.fail("One of the threads failed", _exception); } // tableDataManager should be quiescent now. // All segments we ever created must have a corresponding segment manager. Assert.assertEquals(_allSegManagers.size(), _allSegments.size()); final int nSegsAcccessed = _accessedSegManagers.size(); for (SegmentDataManager segmentDataManager : _internalSegMap.values()) { verifyCount(segmentDataManager, 1); // We should never have called destroy on these segments. Remove it from the list of accessed segments. verify(segmentDataManager.getSegment(), never()).destroy(); _allSegManagers.remove(segmentDataManager); _accessedSegManagers.remove(segmentDataManager); } // For the remaining segments in accessed list, destroy must have been called exactly once. for (SegmentDataManager segmentDataManager : _allSegManagers) { verify(segmentDataManager.getSegment(), times(1)).destroy(); // Also their count should be 0 verifyCount(segmentDataManager, 0); } // The number of segments we accessed must be <= total segments created. Assert.assertTrue(nSegsAcccessed <= _allSegments.size(), "Accessed=" + nSegsAcccessed + ",created=" + _allSegments.size()); // The number of segments we have seen and that are not there anymore, must be <= number destroyed. Assert.assertTrue(_accessedSegManagers.size() <= _nDestroys, "SeenButUnavailableNow=" + _accessedSegManagers.size() + ",Destroys=" + _nDestroys); // The current number of segments must be the as expected (hi-lo+1) Assert.assertEquals(_internalSegMap.size(), _hi - _lo + 1); }