List of usage examples for java.util.concurrent CompletionService take
Future<V> take() throws InterruptedException;
From source file:WordLengthCallable.java
public static void main(String[] args) throws Exception { int THREAD_COUNT = 4; ExecutorService execService = Executors.newFixedThreadPool(THREAD_COUNT); CompletionService<Integer> completionService = new ExecutorCompletionService<>(execService); for (int i = 0; i < THREAD_COUNT; i++) { completionService.submit(new WordLengthCallable()); }//from www. j a v a 2s. c o m execService.shutdown(); while (!execService.isTerminated()) { int result = completionService.take().get().intValue(); System.out.println("Result is: " + result); } Thread.sleep(1000); System.out.println("done!"); }
From source file:com.siva.javamultithreading.MultiThreadExecutor.java
public static void main(String[] args) throws ExecutionException, IOException { //Populate the data List<DomainObject> list = new ArrayList<>(); DomainObject object = null;/* w w w . j av a2 s .c o m*/ for (int i = 0; i < 230000; i++) { object = new DomainObject(); object.setId("ID" + i); object.setName("NAME" + i); object.setComment("COMMENT" + i); list.add(object); } int maxNoOfRows = 40000; int noOfThreads = 1; int remaining = 0; if (list.size() > 40000) { noOfThreads = list.size() / maxNoOfRows; remaining = list.size() % maxNoOfRows; if (remaining > 0) { noOfThreads++; } } List<List<DomainObject>> dos = ListUtils.partition(list, maxNoOfRows); ExecutorService threadPool = Executors.newFixedThreadPool(noOfThreads); CompletionService<HSSFWorkbook> pool = new ExecutorCompletionService<>(threadPool); // Excel creation through multiple threads long startTime = System.currentTimeMillis(); for (List<DomainObject> listObj : dos) { pool.submit(new ExcelChunkSheetWriter(listObj)); } HSSFWorkbook hSSFWorkbook = null; HSSFWorkbook book = new HSSFWorkbook(); HSSFSheet sheet = book.createSheet("Report"); try { for (int i = 0; i < 5; i++) { hSSFWorkbook = pool.take().get(); System.out.println( "sheet row count : sheet.PhysicalNumberOfRows() = " + sheet.getPhysicalNumberOfRows()); int currentCount = sheet.getPhysicalNumberOfRows(); int incomingCount = hSSFWorkbook.getSheetAt(0).getPhysicalNumberOfRows(); if ((currentCount + incomingCount) > 60000) { sheet = book.createSheet("Report" + i); } ExcelUtil.copySheets(book, sheet, hSSFWorkbook.getSheetAt(0)); } } catch (InterruptedException ex) { Logger.getLogger(MultiThreadExecutor.class.getName()).log(Level.SEVERE, null, ex); } catch (ExecutionException ex) { Logger.getLogger(MultiThreadExecutor.class.getName()).log(Level.SEVERE, null, ex); } try { writeFile(book, new FileOutputStream("Report.xls")); } catch (Exception e) { e.printStackTrace(); } //System.out.println("No of Threads : " + noOfThreads + " Size : " + list.size() + " remaining : " + remaining); long endTime = System.currentTimeMillis(); System.out.println("Time taken: " + (endTime - startTime) + " ms"); threadPool.shutdown(); //startProcess(); }
From source file:Main.java
public static Set<String> findMatches(List<String> searchList, Set<String> targetSet) throws InterruptedException, ExecutionException { Set<String> locatedMatchSet = new HashSet<String>(); int threadCount = Runtime.getRuntime().availableProcessors(); List<List<String>> partitionList = getChunkList(searchList, threadCount); if (partitionList.size() == 1) { // if we only have one "chunk" then don't bother with a thread-pool locatedMatchSet = new ListSearcher(searchList, targetSet).call(); } else {// w w w. j av a2s .co m ExecutorService executor = Executors.newFixedThreadPool(threadCount); CompletionService<Set<String>> completionService = new ExecutorCompletionService<Set<String>>(executor); for (List<String> chunkList : partitionList) completionService.submit(new ListSearcher(chunkList, targetSet)); for (int x = 0; x < partitionList.size(); x++) { Set<String> threadMatchSet = completionService.take().get(); locatedMatchSet.addAll(threadMatchSet); } executor.shutdown(); } return locatedMatchSet; }
From source file:com.siva.javamultithreading.MultiThreadExecutor.java
/** * This is sample.// www.ja va 2s. c om */ private static void startProcess() { ExecutorService threadPool = Executors.newFixedThreadPool(4); CompletionService<HSSFWorkbook> pool = new ExecutorCompletionService<>(threadPool); // Excel creation through multiple threads long startTime = System.currentTimeMillis(); pool.submit(new ExcelChunkSheetWriter(0, 1000)); pool.submit(new ExcelChunkSheetWriter(1001, 20000)); pool.submit(new ExcelChunkSheetWriter(2, 3000)); pool.submit(new ExcelChunkSheetWriter(3, 40000)); pool.submit(new ExcelChunkSheetWriter(4, 50000)); HSSFWorkbook hSSFWorkbook = null; HSSFWorkbook book = new HSSFWorkbook(); HSSFSheet sheet = book.createSheet("Report"); try { for (int i = 0; i < 5; i++) { hSSFWorkbook = pool.take().get(); System.out.println( "sheet row count : sheet.PhysicalNumberOfRows() = " + sheet.getPhysicalNumberOfRows()); int currentCount = sheet.getPhysicalNumberOfRows(); int incomingCount = hSSFWorkbook.getSheetAt(0).getPhysicalNumberOfRows(); if ((currentCount + incomingCount) > 60000) { sheet = book.createSheet("Report" + i); } ExcelUtil.copySheets(book, sheet, hSSFWorkbook.getSheetAt(0)); } } catch (InterruptedException ex) { Logger.getLogger(MultiThreadExecutor.class.getName()).log(Level.SEVERE, null, ex); } catch (ExecutionException ex) { Logger.getLogger(MultiThreadExecutor.class.getName()).log(Level.SEVERE, null, ex); } try { writeFile(book, new FileOutputStream("Report.xls")); } catch (Exception e) { e.printStackTrace(); } /* FileOutputStream fos = new FileOutputStream("all.zip"); ZipOutputStream zos = new ZipOutputStream(fos); for (int i = 0; i < 5; i++) { try { hSSFWorkbook = pool.take().get(); ZipEntry ze = new ZipEntry("Excel" + i + ".xls"); zos.putNextEntry(ze); hSSFWorkbook.write(zos); zos.closeEntry(); } catch (InterruptedException ex) { Logger.getLogger(MultiThreadExecutor.class.getName()).log(Level.SEVERE, null, ex); } } zos.close(); */ long endTime = System.currentTimeMillis(); System.out.println("Time taken: " + (endTime - startTime) + " ms"); threadPool.shutdown(); }
From source file:org.apache.hadoop.hbase.util.ModifyRegionUtils.java
/** * Create new set of regions on the specified file-system. * NOTE: that you should add the regions to hbase:meta after this operation. * * @param exec Thread Pool Executor/*from w w w . ja v a2 s. c om*/ * @param conf {@link Configuration} * @param rootDir Root directory for HBase instance * @param tableDir table directory * @param hTableDescriptor description of the table * @param newRegions {@link HRegionInfo} that describes the regions to create * @param task {@link RegionFillTask} custom code to populate region after creation * @throws IOException */ public static List<HRegionInfo> createRegions(final ThreadPoolExecutor exec, final Configuration conf, final Path rootDir, final Path tableDir, final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions, final RegionFillTask task) throws IOException { if (newRegions == null) return null; int regionNumber = newRegions.length; CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<HRegionInfo>(exec); List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>(); for (final HRegionInfo newRegion : newRegions) { completionService.submit(new Callable<HRegionInfo>() { @Override public HRegionInfo call() throws IOException { return createRegion(conf, rootDir, tableDir, hTableDescriptor, newRegion, task); } }); } try { // wait for all regions to finish creation for (int i = 0; i < regionNumber; i++) { Future<HRegionInfo> future = completionService.take(); HRegionInfo regionInfo = future.get(); regionInfos.add(regionInfo); } } catch (InterruptedException e) { LOG.error("Caught " + e + " during region creation"); throw new InterruptedIOException(e.getMessage()); } catch (ExecutionException e) { throw new IOException(e); } return regionInfos; }
From source file:org.mule.module.db.performance.LoadGenerator.java
public void generateLoad(final LoadTask loadTask) throws InterruptedException, ExecutionException { Collection<Callable<Integer>> solvers = new ArrayList<Callable<Integer>>(getThreadCount()); for (int i = 1; i <= getThreadCount(); i++) { solvers.add(new Callable<Integer>() { public Integer call() throws Exception { for (int message = 1; message <= getMessagesPerThread(); message++) { try { loadTask.execute(message); } catch (Exception e) { // Ignore and continue logger.error("Error sending message: " + e.getMessage()); }//from w w w . j av a 2 s.c o m Thread.sleep(getMessageDelay()); } return getMessagesPerThread(); } }); } ExecutorService exec = Executors.newFixedThreadPool(getThreadCount()); CompletionService<Integer> executorCompletionService = new ExecutorCompletionService<Integer>(exec); for (Callable<Integer> s : solvers) { executorCompletionService.submit(s); } Integer count = 0; for (int i = 0; i < getThreadCount(); ++i) { count = count + executorCompletionService.take().get(); logger.info("Current row processed count: " + count); } logger.info("Load generation completed"); }
From source file:com.linkedin.pinot.controller.api.restlet.resources.ServerTableSizeReader.java
public Map<String, List<SegmentSizeInfo>> getSizeDetailsFromServers(BiMap<String, String> serverEndPoints, String table, int timeoutMsec) { List<String> serverUrls = new ArrayList<>(serverEndPoints.size()); BiMap<String, String> endpointsToServers = serverEndPoints.inverse(); for (String endpoint : endpointsToServers.keySet()) { String tableSizeUri = "http://" + endpoint + "/table/" + table + "/size"; serverUrls.add(tableSizeUri);/*w ww. j a v a2 s .c om*/ } MultiGetRequest mget = new MultiGetRequest(executor, connectionManager); LOGGER.info("Reading segment sizes from servers for table: {}, timeoutMsec: {}", table, timeoutMsec); CompletionService<GetMethod> completionService = mget.execute(serverUrls, timeoutMsec); Map<String, List<SegmentSizeInfo>> serverSegmentSizes = new HashMap<>(serverEndPoints.size()); for (int i = 0; i < serverUrls.size(); i++) { try { GetMethod getMethod = completionService.take().get(); URI uri = getMethod.getURI(); String instance = endpointsToServers.get(uri.getHost() + ":" + uri.getPort()); if (getMethod.getStatusCode() >= 300) { LOGGER.error("Server: {} returned error: {}", instance, getMethod.getStatusCode()); continue; } TableSizeInfo tableSizeInfo = new ObjectMapper().readValue(getMethod.getResponseBodyAsString(), TableSizeInfo.class); serverSegmentSizes.put(instance, tableSizeInfo.segments); } catch (InterruptedException e) { LOGGER.warn("Interrupted exception while reading segment size for table: {}", table, e); } catch (ExecutionException e) { if (Throwables.getRootCause(e) instanceof SocketTimeoutException) { LOGGER.warn("Server request to read table size was timed out for table: {}", table, e); } else if (Throwables.getRootCause(e) instanceof ConnectTimeoutException) { LOGGER.warn("Server request to read table size timed out waiting for connection. table: {}", table, e); } else if (Throwables.getRootCause(e) instanceof ConnectionPoolTimeoutException) { LOGGER.warn( "Server request to read table size timed out on getting a connection from pool, table: {}", table, e); } else { LOGGER.warn("Execution exception while reading segment sizes for table: {}", table, e); } } catch (Exception e) { LOGGER.warn("Error while reading segment sizes for table: {}", table); } } LOGGER.info("Finished reading segment sizes for table: {}", table); return serverSegmentSizes; }
From source file:com.linkedin.pinot.common.http.MultiGetRequestTest.java
@Test public void testMultiGet() { MultiGetRequest mget = new MultiGetRequest(Executors.newCachedThreadPool(), new MultiThreadedHttpConnectionManager()); List<String> urls = Arrays.asList("http://localhost:" + String.valueOf(portStart) + URI_PATH, "http://localhost:" + String.valueOf(portStart + 1) + URI_PATH, "http://localhost:" + String.valueOf(portStart + 2) + URI_PATH, // 2nd request to the same server "http://localhost:" + String.valueOf(portStart) + URI_PATH); // timeout value needs to be less than 5000ms set above for // third server final int requestTimeoutMs = 1000; CompletionService<GetMethod> completionService = mget.execute(urls, requestTimeoutMs); int success = 0; int errors = 0; int timeouts = 0; for (int i = 0; i < urls.size(); i++) { GetMethod getMethod = null;// w ww.j a va2s . c om try { getMethod = completionService.take().get(); if (getMethod.getStatusCode() >= 300) { ++errors; Assert.assertEquals(getMethod.getResponseBodyAsString(), ERROR_MSG); } else { ++success; Assert.assertEquals(getMethod.getResponseBodyAsString(), SUCCESS_MSG); } } catch (InterruptedException e) { LOGGER.error("Interrupted", e); ++errors; } catch (ExecutionException e) { if (Throwables.getRootCause(e) instanceof SocketTimeoutException) { LOGGER.debug("Timeout"); ++timeouts; } else { LOGGER.error("Error", e); ++errors; } } catch (IOException e) { ++errors; } } Assert.assertEquals(2, success); Assert.assertEquals(1, errors); Assert.assertEquals(1, timeouts); }
From source file:com.sitewhere.test.MultithreadedRestTest.java
@Test public void doRestTest() throws Exception { java.util.logging.Logger.getLogger("org.apache.http.wire").setLevel(java.util.logging.Level.FINEST); java.util.logging.Logger.getLogger("org.apache.http.headers").setLevel(java.util.logging.Level.FINEST); System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.SimpleLog"); System.setProperty("org.apache.commons.logging.simplelog.showdatetime", "true"); System.setProperty("org.apache.commons.logging.simplelog.log.httpclient.wire", "ERROR"); System.setProperty("org.apache.commons.logging.simplelog.log.org.apache.http", "ERROR"); System.setProperty("org.apache.commons.logging.simplelog.log.org.apache.http.headers", "ERROR"); ExecutorService executor = Executors.newFixedThreadPool(numThreads); CompletionService<SiteWhereClientTester.TestResults> completionService = new ExecutorCompletionService<SiteWhereClientTester.TestResults>( executor);/*from w w w. ja v a2 s . c om*/ for (int i = 0; i < numThreads; i++) { completionService .submit(new SiteWhereClientTester("90389b40-7c25-401b-bf72-98673913d59e", 100, updateState)); } for (int i = 0; i < numThreads; ++i) { completionService.take().get(); } }
From source file:com.linkedin.pinot.controller.api.resources.ServerTableSizeReader.java
public Map<String, List<SegmentSizeInfo>> getSizeDetailsFromServers(BiMap<String, String> serverEndPoints, String table, int timeoutMsec) { List<String> serverUrls = new ArrayList<>(serverEndPoints.size()); BiMap<String, String> endpointsToServers = serverEndPoints.inverse(); for (String endpoint : endpointsToServers.keySet()) { String tableSizeUri = "http://" + endpoint + "/table/" + table + "/size"; serverUrls.add(tableSizeUri);/*from w w w .ja va 2 s. c o m*/ } MultiGetRequest mget = new MultiGetRequest(executor, connectionManager); LOGGER.info("Reading segment sizes from servers for table: {}, timeoutMsec: {}", table, timeoutMsec); CompletionService<GetMethod> completionService = mget.execute(serverUrls, timeoutMsec); Map<String, List<SegmentSizeInfo>> serverSegmentSizes = new HashMap<>(serverEndPoints.size()); for (int i = 0; i < serverUrls.size(); i++) { GetMethod getMethod = null; try { getMethod = completionService.take().get(); URI uri = getMethod.getURI(); String instance = endpointsToServers.get(uri.getHost() + ":" + uri.getPort()); if (getMethod.getStatusCode() >= 300) { LOGGER.error("Server: {} returned error: {}", instance, getMethod.getStatusCode()); continue; } TableSizeInfo tableSizeInfo = new ObjectMapper().readValue(getMethod.getResponseBodyAsString(), TableSizeInfo.class); serverSegmentSizes.put(instance, tableSizeInfo.segments); } catch (InterruptedException e) { LOGGER.warn("Interrupted exception while reading segment size for table: {}", table, e); } catch (ExecutionException e) { if (Throwables.getRootCause(e) instanceof SocketTimeoutException) { LOGGER.warn("Server request to read table size was timed out for table: {}", table, e); } else if (Throwables.getRootCause(e) instanceof ConnectTimeoutException) { LOGGER.warn("Server request to read table size timed out waiting for connection. table: {}", table, e); } else if (Throwables.getRootCause(e) instanceof ConnectionPoolTimeoutException) { LOGGER.warn( "Server request to read table size timed out on getting a connection from pool, table: {}", table, e); } else { LOGGER.warn("Execution exception while reading segment sizes for table: {}", table, e); } } catch (Exception e) { LOGGER.warn("Error while reading segment sizes for table: {}", table); } finally { if (getMethod != null) { getMethod.releaseConnection(); } } } LOGGER.info("Finished reading segment sizes for table: {}", table); return serverSegmentSizes; }