List of usage examples for java.util.concurrent ExecutorService submit
Future<?> submit(Runnable task);
From source file:com.twitter.distributedlog.auditor.DLAuditor.java
static <T> void executeAction(final LinkedBlockingQueue<T> queue, final int numThreads, final Action<T> action) throws IOException { final CountDownLatch failureLatch = new CountDownLatch(1); final CountDownLatch doneLatch = new CountDownLatch(queue.size()); final AtomicInteger numFailures = new AtomicInteger(0); final AtomicInteger completedThreads = new AtomicInteger(0); ExecutorService executorService = Executors.newFixedThreadPool(numThreads); try {/*from www. j a va 2 s. c o m*/ for (int i = 0; i < numThreads; i++) { executorService.submit(new Runnable() { @Override public void run() { while (true) { T item = queue.poll(); if (null == item) { break; } try { action.execute(item); } catch (IOException ioe) { logger.error("Failed to execute action on item '{}'", item, ioe); numFailures.incrementAndGet(); failureLatch.countDown(); break; } doneLatch.countDown(); } if (numFailures.get() == 0 && completedThreads.incrementAndGet() == numThreads) { failureLatch.countDown(); } } }); } try { failureLatch.await(); if (numFailures.get() > 0) { throw new IOException("Encountered " + numFailures.get() + " failures on executing action."); } doneLatch.await(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); logger.warn("Interrupted on executing action", ie); throw new DLInterruptedException("Interrupted on executing action", ie); } } finally { executorService.shutdown(); } }
From source file:it.unibo.alchemist.language.EnvironmentBuilder.java
private static <T> Future<Result<T>> build(final EnvironmentBuilder<T> builder) { final ExecutorService ex = Executors.newSingleThreadExecutor(); final Future<Result<T>> result = ex.submit(() -> { builder.buildEnvironment();/*w ww . ja v a 2 s. c o m*/ return Result.build(builder.result, builder.random); }); ex.shutdown(); return result; }
From source file:no.ntnu.idi.socialhitchhiking.map.GeoHelper.java
/** * Retrieves a {@link List} of addresses that match the given {@link GeoPoint}. * The first element in the list has the best match (but is not guaranteed to be correct). <br><br> * /*www. java 2s .co m*/ * This method tries to use the {@link Geocoder} to transform a (latitude, longitude) * coordinate into addresses, and if this fails (witch it most likely will under emulation), it * tries to use a method from the {@link GeoHelper}-class. * * @param location The location that is transformed into a list of addresses * @param maxResults The maximum number of addresses to retrieve (should be small). * @param maxAddressLines The maximum number of lines in the addresses. This should be high if you want a complete address! If it is smaller than the total number of lines in the address, it cuts off the last part...) * @return Returns the {@link List} of addresses (as {@link String}s). */ public static List<String> getAddressesAtPoint(final GeoPoint location, final int maxResults, int maxAddressLines) { List<String> addressList = new ArrayList<String>(); List<Address> possibleAddresses = new ArrayList<Address>(); Address address = new Address(Locale.getDefault()); String addressString = "Could not find the address..."; ExecutorService executor = Executors.newSingleThreadExecutor(); Callable<List<Address>> callable = new Callable<List<Address>>() { @Override public List<Address> call() throws IOException { return fancyGeocoder.getFromLocation(location.getLatitudeE6() / 1E6, location.getLongitudeE6() / 1E6, maxResults); } }; Future<List<Address>> future = executor.submit(callable); try { possibleAddresses = future.get(); } catch (InterruptedException e1) { possibleAddresses = GeoHelper.getAddressesFromLocation(location.getLatitudeE6() / 1E6, location.getLongitudeE6() / 1E6, maxResults); } catch (ExecutionException e1) { possibleAddresses = GeoHelper.getAddressesFromLocation(location.getLatitudeE6() / 1E6, location.getLongitudeE6() / 1E6, maxResults); } executor.shutdown(); if (possibleAddresses.size() > 0) { for (int i = 0; i < possibleAddresses.size(); i++) { addressString = ""; address = possibleAddresses.get(i); for (int j = 0; j <= address.getMaxAddressLineIndex() && j <= maxAddressLines; j++) { addressString += address.getAddressLine(j); addressString += "\n"; } addressList.add(addressString.trim()); } } return addressList; }
From source file:no.ntnu.idi.socialhitchhiking.map.RouteProvider.java
/** * Returning a {@link MapRoute}, containing data that is retrieved from Google Maps. * /*from w ww . j a v a 2s. com*/ * @param fromLat The latitude where the route starts. * @param fromLon The longitude where the route starts. * @param toLat The latitude where the route ends. * @param toLon The latitude where the route ends. * @return Returns a {@link MapRoute} containing all the map data needed for showing a route in a map view. * @throws MalformedURLException * @throws ParserConfigurationException * @throws SAXException * @throws IOException * @throws XmlPullParserException */ public static MapRoute getRoute(double fromLat, double fromLon, double toLat, double toLon, final boolean drawable) throws MalformedURLException, IOException, XmlPullParserException { final String url = RouteProvider.getUrl(fromLat, fromLon, toLat, toLon); ExecutorService executor = Executors.newSingleThreadExecutor(); Callable<MapRoute> callable = new Callable<MapRoute>() { @Override public MapRoute call() throws ClientProtocolException, IOException, XmlPullParserException { InputStream is = RouteProvider.getConnectionInputStream(url); MapRoute temp = new MapRoute(); temp = RouteProvider.getRoute(is, drawable); return temp; } }; Future<MapRoute> future = executor.submit(callable); MapRoute ret; try { ret = future.get(); } catch (InterruptedException e) { ret = null; } catch (ExecutionException e) { // TODO Auto-generated catch block ret = null; } executor.shutdown(); return ret; }
From source file:gridool.db.helpers.GridDbUtils.java
public static void sendfile(final ExecutorService sencExecs, final GridXferClient dfsClient, final String fileName, final FastByteArrayOutputStream data, final GridNode dstNode, final int dstPort) { InetAddress dstAddr = dstNode.getPhysicalAdress(); Runnable run = new RunnableFileSender(data, fileName, null, dstAddr, dstPort, true, true); sencExecs.submit(run); String alteredFileName = GridUtils.alterFileName(fileName, dstNode); List<GridNode> replicas = dstNode.getReplicas(); for (GridNode replicaNode : replicas) { dstAddr = replicaNode.getPhysicalAdress(); run = new RunnableFileSender(data, alteredFileName, null, dstAddr, dstPort, true, true); sencExecs.submit(run);/* ww w . j ava 2 s.c o m*/ } }
From source file:com.linkedin.pinot.tools.perf.QueryRunner.java
/** * Use multiple threads to run query at a target QPS. * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send * them.// ww w . jav a 2 s . c om * <p>The main thread is responsible for collecting and logging the statistic information periodically. * <p>Queries are picked sequentially from the query file. * <p>Query runner will stop when all queries in the query file has been executed number of times configured. * * @param conf perf benchmark driver config. * @param queryFile query file. * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times. * @param numThreads number of threads sending queries. * @param startQPS start QPS (target QPS). * @param reportIntervalMs report interval in milliseconds. * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear * them, 0 means never. * @throws Exception */ public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, double startQPS, int reportIntervalMs, int numIntervalsToReportAndClearStatistics) throws Exception { List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>(); AtomicInteger numQueriesExecuted = new AtomicInteger(0); AtomicLong totalBrokerTime = new AtomicLong(0L); AtomicLong totalClientTime = new AtomicLong(0L); List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS)); ExecutorService executorService = Executors.newFixedThreadPool(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList)); } executorService.shutdown(); int queryIntervalMs = (int) (MILLIS_PER_SECOND / startQPS); long startTime = System.currentTimeMillis(); long reportStartTime = startTime; int numReportIntervals = 0; int numTimesExecuted = 0; while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) { if (executorService.isTerminated()) { LOGGER.error("All threads got exception and already dead."); return; } for (String query : queries) { queryQueue.add(query); Thread.sleep(queryIntervalMs); long currentTime = System.currentTimeMillis(); if (currentTime - reportStartTime >= reportIntervalMs) { long timePassed = currentTime - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", startQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size()); reportStartTime = currentTime; numReportIntervals++; if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals == numIntervalsToReportAndClearStatistics)) { numReportIntervals = 0; startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); } } } numTimesExecuted++; } // Wait for all queries getting executed. while (queryQueue.size() != 0) { Thread.sleep(1); } executorService.shutdownNow(); while (!executorService.isTerminated()) { Thread.sleep(1); } long timePassed = System.currentTimeMillis() - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info("--------------------------------------------------------------------------------"); LOGGER.info("FINAL REPORT:"); LOGGER.info( "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms.", startQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); for (Statistics statistics : statisticsList) { statistics.report(); } }
From source file:com.linkedin.pinot.tools.perf.QueryRunner.java
/** * Use multiple threads to run queries as fast as possible. * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the * queue whenever the queue length is low, and start <code>numThreads</code> worker threads to fetch queries from the * queue and send them.//ww w. j a va 2s .c om * <p>The main thread is responsible for collecting and logging the statistic information periodically. * <p>Queries are picked sequentially from the query file. * <p>Query runner will stop when all queries in the query file has been executed number of times configured. * * @param conf perf benchmark driver config. * @param queryFile query file. * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times. * @param numThreads number of threads sending queries. * @param reportIntervalMs report interval in milliseconds. * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear * them, 0 means never. * @throws Exception */ public static void multiThreadedQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, int reportIntervalMs, int numIntervalsToReportAndClearStatistics) throws Exception { List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>(); AtomicInteger numQueriesExecuted = new AtomicInteger(0); AtomicLong totalBrokerTime = new AtomicLong(0L); AtomicLong totalClientTime = new AtomicLong(0L); List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS)); ExecutorService executorService = Executors.newFixedThreadPool(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList)); } executorService.shutdown(); long startTime = System.currentTimeMillis(); long reportStartTime = startTime; int numReportIntervals = 0; int numTimesExecuted = 0; while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) { if (executorService.isTerminated()) { LOGGER.error("All threads got exception and already dead."); return; } for (String query : queries) { queryQueue.add(query); // Keep 20 queries inside the query queue. while (queryQueue.size() == 20) { Thread.sleep(1); long currentTime = System.currentTimeMillis(); if (currentTime - reportStartTime >= reportIntervalMs) { long timePassed = currentTime - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, " + "Average Client Time: {}ms.", timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); reportStartTime = currentTime; numReportIntervals++; if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals == numIntervalsToReportAndClearStatistics)) { numReportIntervals = 0; startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); } } } } numTimesExecuted++; } // Wait for all queries getting executed. while (queryQueue.size() != 0) { Thread.sleep(1); } executorService.shutdownNow(); while (!executorService.isTerminated()) { Thread.sleep(1); } long timePassed = System.currentTimeMillis() - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info("--------------------------------------------------------------------------------"); LOGGER.info("FINAL REPORT:"); LOGGER.info( "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, " + "Average Client Time: {}ms.", timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); for (Statistics statistics : statisticsList) { statistics.report(); } }
From source file:com.taobao.android.tpatch.utils.SmaliUtils.java
/** * dex?smali/*from w w w .j av a 2 s .c om*/ * @param dex * @param outputDir * @param includeClasses ?? */ public static boolean disassembleDexFile(File dex, File outputDir, final Set<String> includeClasses) throws IOException { final baksmaliOptions options = createBaksmaliOptions(); if (!outputDir.exists()) { outputDir.mkdirs(); } DexFile dexFile = DexFileFactory.loadDexFile(dex, DEFAULT_API_LEVEL, true); options.outputDirectory = outputDir.getAbsolutePath(); //1. options.jobs = 3; if (options.registerInfo != 0 || options.deodex) { try { Iterable<String> extraClassPathEntries; if (options.extraClassPathEntries != null) { extraClassPathEntries = options.extraClassPathEntries; } else { extraClassPathEntries = ImmutableList.of(); } options.classPath = ClassPath.fromClassPath(options.bootClassPathDirs, Iterables.concat(options.bootClassPathEntries, extraClassPathEntries), dexFile, options.apiLevel, options.checkPackagePrivateAccess, options.experimental); if (options.customInlineDefinitions != null) { options.inlineResolver = new CustomInlineMethodResolver(options.classPath, options.customInlineDefinitions); } } catch (Exception ex) { System.err.println("\n\nError occurred while loading boot class path files. Aborting."); ex.printStackTrace(System.err); return false; } } if (options.resourceIdFileEntries != null) { class PublicHandler extends DefaultHandler { String prefix = null; public PublicHandler(String prefix) { super(); this.prefix = prefix; } public void startElement(String uri, String localName, String qName, Attributes attr) throws SAXException { if (qName.equals("public")) { String type = attr.getValue("type"); String name = attr.getValue("name").replace('.', '_'); Integer public_key = Integer.decode(attr.getValue("id")); String public_val = new StringBuffer().append(prefix).append(".").append(type).append(".") .append(name).toString(); options.resourceIds.put(public_key, public_val); } } } ; for (Map.Entry<String, String> entry : options.resourceIdFileEntries.entrySet()) { try { SAXParser saxp = SAXParserFactory.newInstance().newSAXParser(); String prefix = entry.getValue(); saxp.parse(entry.getKey(), new PublicHandler(prefix)); } catch (ParserConfigurationException e) { continue; } catch (SAXException e) { continue; } catch (IOException e) { continue; } } } File outputDirectoryFile = new File(options.outputDirectory); if (!outputDirectoryFile.exists()) { if (!outputDirectoryFile.mkdirs()) { System.err.println("Can't create the output directory " + options.outputDirectory); return false; } } // sort the classes, so that if we're on a case-insensitive file system and need to handle classes with file // name collisions, then we'll use the same name for each class, if the dex file goes through multiple // baksmali/smali cycles for some reason. If a class with a colliding name is added or removed, the filenames // may still change of course List<? extends ClassDef> classDefs = Ordering.natural().sortedCopy(dexFile.getClasses()); if (!options.noAccessorComments) { options.syntheticAccessorResolver = new SyntheticAccessorResolver(classDefs); } final ClassFileNameHandler fileNameHandler = new ClassFileNameHandler(outputDirectoryFile, ".smali"); ExecutorService executor = Executors.newFixedThreadPool(options.jobs); List<Future<Boolean>> tasks = Lists.newArrayList(); for (final ClassDef classDef : classDefs) { tasks.add(executor.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { String className = getDalvikClassName(classDef.getType()); if (null != includeClasses) { if (includeClasses.contains(className)) { BakSmali.disassembleClass(classDef, fileNameHandler, options); } return true; } else { return BakSmali.disassembleClass(classDef, fileNameHandler, options); } } })); } boolean errorOccurred = false; try { for (Future<Boolean> task : tasks) { while (true) { try { if (!task.get()) { errorOccurred = true; } } catch (InterruptedException ex) { continue; } catch (ExecutionException ex) { throw new RuntimeException(ex); } break; } } } finally { executor.shutdown(); } return !errorOccurred; }
From source file:com.linkedin.pinot.tools.perf.QueryRunner.java
/** * Use multiple threads to run query at an increasing target QPS. * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send * them./* w ww . j av a 2 s. c om*/ * <p>We start with the start QPS, and keep adding delta QPS to the start QPS during the test. * <p>The main thread is responsible for collecting and logging the statistic information periodically. * <p>Queries are picked sequentially from the query file. * <p>Query runner will stop when all queries in the query file has been executed number of times configured. * * @param conf perf benchmark driver config. * @param queryFile query file. * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times. * @param numThreads number of threads sending queries. * @param startQPS start QPS. * @param deltaQPS delta QPS. * @param reportIntervalMs report interval in milliseconds. * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear * them, 0 means never. * @param numIntervalsToIncreaseQPS number of intervals to increase QPS. * @throws Exception */ public static void increasingQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, double startQPS, double deltaQPS, int reportIntervalMs, int numIntervalsToReportAndClearStatistics, int numIntervalsToIncreaseQPS) throws Exception { List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>(); AtomicInteger numQueriesExecuted = new AtomicInteger(0); AtomicLong totalBrokerTime = new AtomicLong(0L); AtomicLong totalClientTime = new AtomicLong(0L); List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS)); ExecutorService executorService = Executors.newFixedThreadPool(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList)); } executorService.shutdown(); long startTime = System.currentTimeMillis(); long reportStartTime = startTime; int numReportIntervals = 0; int numTimesExecuted = 0; double currentQPS = startQPS; int queryIntervalMs = (int) (MILLIS_PER_SECOND / currentQPS); while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) { if (executorService.isTerminated()) { LOGGER.error("All threads got exception and already dead."); return; } for (String query : queries) { queryQueue.add(query); Thread.sleep(queryIntervalMs); long currentTime = System.currentTimeMillis(); if (currentTime - reportStartTime >= reportIntervalMs) { long timePassed = currentTime - startTime; reportStartTime = currentTime; numReportIntervals++; if (numReportIntervals == numIntervalsToIncreaseQPS) { // Try to find the next interval. double newQPS = currentQPS + deltaQPS; int newQueryIntervalMs; // Skip the target QPS with the same interval as the previous one. while ((newQueryIntervalMs = (int) (MILLIS_PER_SECOND / newQPS)) == queryIntervalMs) { newQPS += deltaQPS; } if (newQueryIntervalMs == 0) { LOGGER.warn("Due to sleep granularity of millisecond, cannot further increase QPS."); } else { // Find the next interval. LOGGER.info( "--------------------------------------------------------------------------------"); LOGGER.info("REPORT FOR TARGET QPS: {}", currentQPS); int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size()); numReportIntervals = 0; startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); currentQPS = newQPS; queryIntervalMs = newQueryIntervalMs; LOGGER.info( "Increase target QPS to: {}, the following statistics are for the new target QPS.", currentQPS); } } else { int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size()); if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals % numIntervalsToReportAndClearStatistics == 0)) { startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); } } } } numTimesExecuted++; } // Wait for all queries getting executed. while (queryQueue.size() != 0) { Thread.sleep(1); } executorService.shutdownNow(); while (!executorService.isTerminated()) { Thread.sleep(1); } long timePassed = System.currentTimeMillis() - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info("--------------------------------------------------------------------------------"); LOGGER.info("FINAL REPORT:"); LOGGER.info( "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); for (Statistics statistics : statisticsList) { statistics.report(); } }
From source file:no.ntnu.idi.socialhitchhiking.client.RequestTask.java
/** * Static method which adds elements and data to an xml file and sends it as a string to the server. * /*from www .j a v a 2 s.co m*/ * @param req - {@link Request} * @return returns a subclass of {@link Response} to the input {@link Request} * @throws ClientProtocolException * @throws MalformedURLException * @throws FileNotFoundException * @throws IOException * @throws ExecutionException * @throws InterruptedException */ public static Response sendRequest(final Request req, final Context c) throws ClientProtocolException, IOException, InterruptedException, ExecutionException { /** * Code for putting all all network communication on separate thread as required by higher Android APIs */ ExecutorService executor = Executors.newSingleThreadExecutor(); Callable<Response> callable = new Callable<Response>() { @Override /** * This contains the actual code for initiating the communication */ public Response call() throws ClientProtocolException, IOException { String xml = RequestSerializer.serialize(req); con = c; String url = con.getResources().getString(R.string.server_url); RequestTask requestTask = new RequestTask(url, xml); return ResponseParser.parse(requestTask.getResponse()); } }; /** * Execute and retrieve result from network operation */ Future<Response> future = executor.submit(callable); Response ret = future.get(); executor.shutdown(); return ret; }