List of usage examples for java.util.concurrent ExecutorService awaitTermination
boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;
From source file:com.mgmtp.perfload.core.daemon.LtDaemon.java
public static void main(final String... args) { JCommander jCmd = null;// w ww . ja va2 s .c o m try { LtDaemonArgs cliArgs = new LtDaemonArgs(); jCmd = new JCommander(cliArgs); jCmd.parse(args); log().info("Starting perfLoad Daemon..."); log().info(cliArgs.toString()); if (cliArgs.shutdown) { shutdownDaemon(cliArgs.port); System.exit(0); } // Client is expected next to the daemon File clientDir = new File(new File(System.getProperty("user.dir")).getParentFile(), "client"); ExecutorService execService = Executors.newCachedThreadPool(); StreamGobbler gobbler = new StreamGobbler(execService); Server server = new DefaultServer(cliArgs.port); LtDaemon daemon = new LtDaemon(clientDir, new ForkedProcessClientRunner(execService, gobbler), server); daemon.execute(); execService.shutdown(); execService.awaitTermination(10L, TimeUnit.SECONDS); System.exit(0); } catch (ParameterException ex) { jCmd.usage(); System.exit(1); } catch (Exception ex) { log().error(ex.getMessage(), ex); System.exit(-1); } }
From source file:async.nio2.Main.java
public static void main(String[] args) throws IOException, InterruptedException, ExecutionException { if (args.length == 3) { PORT = Integer.valueOf(args[0]); NO_CLIENTS = Integer.valueOf(args[1]); NO_SAMPLES = Integer.valueOf(args[2]); }/*from w w w . ja v a 2s . c o m*/ if (PORT < 0) { System.err.println("Error: port < 0"); System.exit(1); } if (NO_CLIENTS < 1) { System.err.println("Error: #clients < 1"); System.exit(1); } if (NO_SAMPLES < 1) { System.err.println("Error: #samples < 1"); System.exit(1); } AsynchronousChannelGroup groupServer = AsynchronousChannelGroup .withThreadPool(Executors.newFixedThreadPool(1)); AsynchronousChannelGroup groupClient = AsynchronousChannelGroup .withThreadPool(Executors.newFixedThreadPool(1)); Server server = Server.newInstance(new InetSocketAddress("localhost", PORT), groupServer); InetSocketAddress localAddress = server.getLocalAddress(); String hostname = localAddress.getHostName(); int port = localAddress.getPort(); ExecutorService es = Executors.newFixedThreadPool(2); System.out.printf("%03d clients on %s:%d, %03d runs each. All times in s.%n", NO_CLIENTS, hostname, port, NO_SAMPLES); range(0, NO_CLIENTS).unordered().parallel() .mapToObj(i -> CompletableFuture.supplyAsync(newClient(localAddress, groupClient), es).join()) .map(array -> Arrays.stream(array).reduce(new DescriptiveStatistics(), Main::accumulate, Main::combine)) .map(Main::toEvaluationString).forEach(System.out::println); es.shutdown(); es.awaitTermination(5, TimeUnit.SECONDS); groupClient.shutdown(); groupClient.awaitTermination(5, TimeUnit.SECONDS); server.close(); groupServer.shutdown(); groupServer.awaitTermination(5, TimeUnit.SECONDS); }
From source file:fr.tpt.s3.mcdag.bench.MainBench.java
public static void main(String[] args) throws IOException, InterruptedException { // Command line options Options options = new Options(); Option input = new Option("i", "input", true, "MC-DAG XML models"); input.setRequired(true);//from w ww . ja v a 2 s. co m input.setArgs(Option.UNLIMITED_VALUES); options.addOption(input); Option output = new Option("o", "output", true, "Folder where results have to be written."); output.setRequired(true); options.addOption(output); Option uUti = new Option("u", "utilization", true, "Utilization."); uUti.setRequired(true); options.addOption(uUti); Option output2 = new Option("ot", "output-total", true, "File where total results are being written"); output2.setRequired(true); options.addOption(output2); Option oCores = new Option("c", "cores", true, "Cores given to the test"); oCores.setRequired(true); options.addOption(oCores); Option oLvls = new Option("l", "levels", true, "Levels tested for the system"); oLvls.setRequired(true); options.addOption(oLvls); Option jobs = new Option("j", "jobs", true, "Number of threads to be launched."); jobs.setRequired(false); options.addOption(jobs); Option debug = new Option("d", "debug", false, "Debug logs."); debug.setRequired(false); options.addOption(debug); /* * Parsing of the command line */ CommandLineParser parser = new DefaultParser(); HelpFormatter formatter = new HelpFormatter(); CommandLine cmd; try { cmd = parser.parse(options, args); } catch (ParseException e) { System.err.println(e.getMessage()); formatter.printHelp("Benchmarks MultiDAG", options); System.exit(1); return; } String inputFilePath[] = cmd.getOptionValues("input"); String outputFilePath = cmd.getOptionValue("output"); String outputFilePathTotal = cmd.getOptionValue("output-total"); double utilization = Double.parseDouble(cmd.getOptionValue("utilization")); boolean boolDebug = cmd.hasOption("debug"); int nbLvls = Integer.parseInt(cmd.getOptionValue("levels")); int nbJobs = 1; int nbFiles = inputFilePath.length; if (cmd.hasOption("jobs")) nbJobs = Integer.parseInt(cmd.getOptionValue("jobs")); int nbCores = Integer.parseInt(cmd.getOptionValue("cores")); /* * While files need to be allocated * run the tests in the pool of threads */ // For dual-criticality systems we call a specific thread if (nbLvls == 2) { System.out.println(">>>>>>>>>>>>>>>>>>>>> NB levels " + nbLvls); int i_files2 = 0; String outFile = outputFilePath.substring(0, outputFilePath.lastIndexOf('.')) .concat("-schedulability.csv"); PrintWriter writer = new PrintWriter(outFile, "UTF-8"); writer.println( "Thread; File; FSched (%); FPreempts; FAct; LSched (%); LPreempts; LAct; ESched (%); EPreempts; EAct; HSched(%); HPreempts; HAct; Utilization"); writer.close(); ExecutorService executor2 = Executors.newFixedThreadPool(nbJobs); while (i_files2 != nbFiles) { BenchThreadDualCriticality bt2 = new BenchThreadDualCriticality(inputFilePath[i_files2], outFile, nbCores, boolDebug); executor2.execute(bt2); i_files2++; } executor2.shutdown(); executor2.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); int fedTotal = 0; int laxTotal = 0; int edfTotal = 0; int hybridTotal = 0; int fedPreempts = 0; int laxPreempts = 0; int edfPreempts = 0; int hybridPreempts = 0; int fedActiv = 0; int laxActiv = 0; int edfActiv = 0; int hybridActiv = 0; // Read lines in file and do average int i = 0; File f = new File(outFile); @SuppressWarnings("resource") Scanner line = new Scanner(f); while (line.hasNextLine()) { String s = line.nextLine(); if (i > 0) { // To skip the first line try (Scanner inLine = new Scanner(s).useDelimiter("; ")) { int j = 0; while (inLine.hasNext()) { String val = inLine.next(); if (j == 2) { fedTotal += Integer.parseInt(val); } else if (j == 3) { fedPreempts += Integer.parseInt(val); } else if (j == 4) { fedActiv += Integer.parseInt(val); } else if (j == 5) { laxTotal += Integer.parseInt(val); } else if (j == 6) { laxPreempts += Integer.parseInt(val); } else if (j == 7) { laxActiv += Integer.parseInt(val); } else if (j == 8) { edfTotal += Integer.parseInt(val); } else if (j == 9) { edfPreempts += Integer.parseInt(val); } else if (j == 10) { edfActiv += Integer.parseInt(val); } else if (j == 11) { hybridTotal += Integer.parseInt(val); } else if (j == 12) { hybridPreempts += Integer.parseInt(val); } else if (j == 13) { hybridActiv += Integer.parseInt(val); } j++; } } } i++; } // Write percentage double fedPerc = (double) fedTotal / nbFiles; double laxPerc = (double) laxTotal / nbFiles; double edfPerc = (double) edfTotal / nbFiles; double hybridPerc = (double) hybridTotal / nbFiles; double fedPercPreempts = (double) fedPreempts / fedActiv; double laxPercPreempts = (double) laxPreempts / laxActiv; double edfPercPreempts = (double) edfPreempts / edfActiv; double hybridPercPreempts = (double) hybridPreempts / hybridActiv; Writer wOutput = new BufferedWriter(new FileWriter(outputFilePathTotal, true)); wOutput.write(Thread.currentThread().getName() + "; " + utilization + "; " + fedPerc + "; " + fedPreempts + "; " + fedActiv + "; " + fedPercPreempts + "; " + laxPerc + "; " + laxPreempts + "; " + laxActiv + "; " + laxPercPreempts + "; " + edfPerc + "; " + edfPreempts + "; " + edfActiv + "; " + edfPercPreempts + "; " + hybridPerc + "; " + hybridPreempts + "; " + hybridActiv + "; " + hybridPercPreempts + "\n"); wOutput.close(); } else if (nbLvls > 2) { int i_files2 = 0; String outFile = outputFilePath.substring(0, outputFilePath.lastIndexOf('.')) .concat("-schedulability.csv"); PrintWriter writer = new PrintWriter(outFile, "UTF-8"); writer.println( "Thread; File; LSched (%); LPreempts; LAct; ESched (%); EPreempts; EAct; HSched(%); HPreempts; HAct; Utilization"); writer.close(); ExecutorService executor2 = Executors.newFixedThreadPool(nbJobs); while (i_files2 != nbFiles) { BenchThreadNLevels bt2 = new BenchThreadNLevels(inputFilePath[i_files2], outFile, nbCores, boolDebug); executor2.execute(bt2); i_files2++; } executor2.shutdown(); executor2.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); int laxTotal = 0; int edfTotal = 0; int hybridTotal = 0; int laxPreempts = 0; int edfPreempts = 0; int hybridPreempts = 0; int laxActiv = 0; int edfActiv = 0; int hybridActiv = 0; // Read lines in file and do average int i = 0; File f = new File(outFile); @SuppressWarnings("resource") Scanner line = new Scanner(f); while (line.hasNextLine()) { String s = line.nextLine(); if (i > 0) { // To skip the first line try (Scanner inLine = new Scanner(s).useDelimiter("; ")) { int j = 0; while (inLine.hasNext()) { String val = inLine.next(); if (j == 2) { laxTotal += Integer.parseInt(val); } else if (j == 3) { laxPreempts += Integer.parseInt(val); } else if (j == 4) { laxActiv += Integer.parseInt(val); } else if (j == 5) { edfTotal += Integer.parseInt(val); } else if (j == 6) { edfPreempts += Integer.parseInt(val); } else if (j == 7) { edfActiv += Integer.parseInt(val); } else if (j == 8) { hybridTotal += Integer.parseInt(val); } else if (j == 9) { hybridPreempts += Integer.parseInt(val); } else if (j == 10) { hybridActiv += Integer.parseInt(val); } j++; } } } i++; } // Write percentage double laxPerc = (double) laxTotal / nbFiles; double edfPerc = (double) edfTotal / nbFiles; double hybridPerc = (double) hybridTotal / nbFiles; double laxPercPreempts = (double) laxPreempts / laxActiv; double edfPercPreempts = (double) edfPreempts / edfActiv; double hybridPercPreempts = (double) hybridPreempts / hybridActiv; Writer wOutput = new BufferedWriter(new FileWriter(outputFilePathTotal, true)); wOutput.write(Thread.currentThread().getName() + "; " + utilization + "; " + laxPerc + "; " + laxPreempts + "; " + laxActiv + "; " + laxPercPreempts + "; " + edfPerc + "; " + edfPreempts + "; " + edfActiv + "; " + edfPercPreempts + "; " + hybridPerc + "; " + hybridPreempts + "; " + hybridActiv + "; " + hybridPercPreempts + "\n"); wOutput.close(); } else { System.err.println("Wrong number of levels"); System.exit(-1); } System.out.println("[BENCH Main] Done benchmarking U = " + utilization + " Levels " + nbLvls); }
From source file:Test.java
public static void main(String args[]) throws Exception { ExecutorService pool = new ScheduledThreadPoolExecutor(3); AsynchronousFileChannel fileChannel = AsynchronousFileChannel.open(Paths.get("data.txt"), EnumSet.of(StandardOpenOption.READ), pool); CompletionHandler<Integer, ByteBuffer> handler = new CompletionHandler<Integer, ByteBuffer>() { public synchronized void completed(Integer result, ByteBuffer attachment) { for (int i = 0; i < attachment.limit(); i++) { System.out.print((char) attachment.get(i)); }/*w w w. java2 s . co m*/ } public void failed(Throwable e, ByteBuffer attachment) { } }; final int bufferCount = 5; ByteBuffer buffers[] = new ByteBuffer[bufferCount]; for (int i = 0; i < bufferCount; i++) { buffers[i] = ByteBuffer.allocate(10); fileChannel.read(buffers[i], i * 10, buffers[i], handler); } pool.awaitTermination(1, TimeUnit.SECONDS); for (ByteBuffer byteBuffer : buffers) { for (int i = 0; i < byteBuffer.limit(); i++) { System.out.println((char) byteBuffer.get(i)); } } }
From source file:org.accelio.jxio.tests.benchmarks.jxioConnection.StreamClient.java
/** * @param args// ww w. ja va 2s.c o m */ public static void main(String[] args) { String uriString; URI uri; try { LOG.info("arguments = " + args[0] + " " + args[1] + " " + args[2] + " " + args[3] + " " + args[4]); ExecutorService es = Executors .newFixedThreadPool(Integer.parseInt(args[3]) + Integer.parseInt(args[4])); // input for (int i = 0; i < Integer.parseInt(args[3]); i++) { uriString = String.format("rdma://%s:%s/data?size=%s", args[0], args[1], args[2]); uri = new URI(uriString); es.submit(new StreamClient(uri, "input", i, Integer.parseInt(args[4]))); } // output for (int i = 0; i < Integer.parseInt(args[4]); i++) { uriString = String.format("rdma://%s:%s/data?size=%s", args[0], args[1], args[2]); uri = new URI(uriString); es.submit(new StreamClient(uri, "output", i, Integer.parseInt(args[4]))); } es.shutdown(); es.awaitTermination(5, TimeUnit.MINUTES); } catch (URISyntaxException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
From source file:com.amazonaws.services.kinesis.samples.datavis.HttpReferrerStreamWriter.java
/** * Start a number of threads and send randomly generated {@link HttpReferrerPair}s to a Kinesis Stream until the * program is terminated.// w ww . j a v a 2s . c o m * * @param args Expecting 3 arguments: A numeric value indicating the number of threads to use to send * data to Kinesis and the name of the stream to send records to, and the AWS region in which these resources * exist or should be created. * @throws InterruptedException If this application is interrupted while sending records to Kinesis. */ public static void main(String[] args) throws InterruptedException { if (args.length != 3) { System.err.println("Usage: " + HttpReferrerStreamWriter.class.getSimpleName() + " <number of threads> <stream name> <region>"); System.exit(1); } int numberOfThreads = Integer.parseInt(args[0]); String streamName = args[1]; Region region = SampleUtils.parseRegion(args[2]); AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain(); ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration()); AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig); kinesis.setRegion(region); // The more resources we declare the higher write IOPS we need on our DynamoDB table. // We write a record for each resource every interval. // If interval = 500ms, resource count = 7 we need: (1000/500 * 7) = 14 write IOPS minimum. List<String> resources = new ArrayList<>(); resources.add("/index.html"); // These are the possible referrers to use when generating pairs List<String> referrers = new ArrayList<>(); referrers.add("http://www.amazon.com"); referrers.add("http://www.google.com"); referrers.add("http://www.yahoo.com"); referrers.add("http://www.bing.com"); referrers.add("http://www.stackoverflow.com"); referrers.add("http://www.reddit.com"); HttpReferrerPairFactory pairFactory = new HttpReferrerPairFactory(resources, referrers); // Creates a stream to write to with 2 shards if it doesn't exist StreamUtils streamUtils = new StreamUtils(kinesis); streamUtils.createStreamIfNotExists(streamName, 2); LOG.info(String.format("%s stream is ready for use", streamName)); final HttpReferrerKinesisPutter putter = new HttpReferrerKinesisPutter(pairFactory, kinesis, streamName); ExecutorService es = Executors.newCachedThreadPool(); Runnable pairSender = new Runnable() { @Override public void run() { try { putter.sendPairsIndefinitely(DELAY_BETWEEN_RECORDS_IN_MILLIS, TimeUnit.MILLISECONDS); } catch (Exception ex) { LOG.warn( "Thread encountered an error while sending records. Records will no longer be put by this thread.", ex); } } }; for (int i = 0; i < numberOfThreads; i++) { es.submit(pairSender); } LOG.info(String.format("Sending pairs with a %dms delay between records with %d thread(s).", DELAY_BETWEEN_RECORDS_IN_MILLIS, numberOfThreads)); es.shutdown(); es.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); }
From source file:com.alertlogic.aws.kinesis.test1.StreamWriter.java
/** * Start a number of threads and send randomly generated {@link HttpReferrerPair}s to a Kinesis Stream until the * program is terminated.// w ww. j a v a 2s.c om * * @param args Expecting 3 arguments: A numeric value indicating the number of threads to use to send * data to Kinesis and the name of the stream to send records to, and the AWS region in which these resources * exist or should be created. * @throws InterruptedException If this application is interrupted while sending records to Kinesis. */ public static void main(String[] args) throws InterruptedException { if (args.length != 3) { System.err.println( "Usage: " + StreamWriter.class.getSimpleName() + " <number of threads> <stream name> <region>"); System.exit(1); } int numberOfThreads = Integer.parseInt(args[0]); String streamName = args[1]; Region region = SampleUtils.parseRegion(args[2]); AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain(); ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration()); AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig); kinesis.setRegion(region); // The more resources we declare the higher write IOPS we need on our DynamoDB table. // We write a record for each resource every interval. // If interval = 500ms, resource count = 7 we need: (1000/500 * 7) = 14 write IOPS minimum. List<String> resources = new ArrayList<>(); resources.add("/index.html"); // These are the possible referrers to use when generating pairs List<String> referrers = new ArrayList<>(); referrers.add("http://www.amazon.com"); referrers.add("http://www.google.com"); referrers.add("http://www.yahoo.com"); referrers.add("http://www.bing.com"); referrers.add("http://www.stackoverflow.com"); referrers.add("http://www.reddit.com"); HttpReferrerPairFactory pairFactory = new HttpReferrerPairFactory(resources, referrers); // Creates a stream to write to with 2 shards if it doesn't exist StreamUtils streamUtils = new StreamUtils(kinesis); streamUtils.createStreamIfNotExists(streamName, 2); LOG.info(String.format("%s stream is ready for use", streamName)); final HttpReferrerKinesisPutter putter = new HttpReferrerKinesisPutter(pairFactory, kinesis, streamName); ExecutorService es = Executors.newCachedThreadPool(); Runnable pairSender = new Runnable() { @Override public void run() { try { putter.sendPairsIndefinitely(DELAY_BETWEEN_RECORDS_IN_MILLIS, TimeUnit.MILLISECONDS); } catch (Exception ex) { LOG.warn( "Thread encountered an error while sending records. Records will no longer be put by this thread.", ex); } } }; for (int i = 0; i < numberOfThreads; i++) { es.submit(pairSender); } LOG.info(String.format("Sending pairs with a %dms delay between records with %d thread(s).", DELAY_BETWEEN_RECORDS_IN_MILLIS, numberOfThreads)); es.shutdown(); es.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); }
From source file:Test.java
public static void main(String args[]) throws Exception { ExecutorService pool = new ScheduledThreadPoolExecutor(3); AsynchronousFileChannel fileChannel = AsynchronousFileChannel.open(Paths.get("data.txt"), EnumSet.of(StandardOpenOption.READ), pool); CompletionHandler<Integer, ByteBuffer> handler = new CompletionHandler<Integer, ByteBuffer>() { @Override/*from w ww.j ava 2 s . c om*/ public synchronized void completed(Integer result, ByteBuffer attachment) { for (int i = 0; i < attachment.limit(); i++) { System.out.println((char) attachment.get(i)); } } @Override public void failed(Throwable e, ByteBuffer attachment) { } }; final int bufferCount = 5; ByteBuffer buffers[] = new ByteBuffer[bufferCount]; for (int i = 0; i < bufferCount; i++) { buffers[i] = ByteBuffer.allocate(10); fileChannel.read(buffers[i], i * 10, buffers[i], handler); } pool.awaitTermination(1, TimeUnit.SECONDS); for (ByteBuffer byteBuffer : buffers) { for (int i = 0; i < byteBuffer.limit(); i++) { System.out.print((char) byteBuffer.get(i)); } } }
From source file:locking.LockingExample.java
public static void main(String[] args) throws Exception { // all of the useful sample code is in ExampleClientThatLocks.java // FakeLimitedResource simulates some external resource that can only be access by one process at a time final FakeLimitedResource resource = new FakeLimitedResource(); ExecutorService service = Executors.newFixedThreadPool(QTY); final TestingServer server = new TestingServer(); try {// w w w .ja v a2 s .co m for (int i = 0; i < QTY; ++i) { final int index = i; Callable<Void> task = new Callable<Void>() { @Override public Void call() throws Exception { CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new ExponentialBackoffRetry(1000, 3)); try { client.start(); ExampleClientThatLocks example = new ExampleClientThatLocks(client, PATH, resource, "Client " + index); for (int j = 0; j < REPETITIONS; ++j) { example.doWork(10, TimeUnit.SECONDS); } } catch (Throwable e) { e.printStackTrace(); } finally { IOUtils.closeQuietly(client); } return null; } }; service.submit(task); } service.shutdown(); service.awaitTermination(10, TimeUnit.MINUTES); } finally { IOUtils.closeQuietly(server); } }
From source file:co.mitro.core.util.RPCLogReplayer.java
public static void main(String[] args) throws IOException, InterruptedException, KeyManagementException, UnrecoverableKeyException, NoSuchAlgorithmException, KeyStoreException { List<Request> requests = new ArrayList<>(); ExecutorService executor = Executors.newFixedThreadPool(5); for (int i = 0; i < args.length; ++i) { String filename = args[i]; System.err.println("Reading file: " + filename); JsonRecordReader rr = JsonRecordReader.MakeFromFilename(filename); JsonRecordReader.JsonLog log;/*from w w w. j av a 2 s .c o m*/ try { while (null != (log = rr.readJson())) { if (Strings.isNullOrEmpty(log.payload.transactionId) && !log.payload.implicitBeginTransaction) { // read only transaction requests.add(new Request(log.metadata.endpoint, log.payload)); } } } catch (EOFException e) { System.err.println("unexpected end of file; skipping"); } } // run the simulation for a while. long scaling = 1000; double requestsPerMs = 353. / 9805199; long START_MS = 0; // run for 20 min long END_MS = 20 * 60 * 1000; long now = START_MS; int count = 0; while (now < END_MS) { double toSleep = nextExp(requestsPerMs * scaling); now += toSleep; ++count; Thread.sleep((long) toSleep); executor.execute(new SendQuery(requests.get(RNG.nextInt(requests.size())))); System.out.println("count: " + count + "\t time:" + now + "\t rate:" + (double) count / now); } executor.awaitTermination(1, TimeUnit.MINUTES); }