List of usage examples for java.lang String valueOf
public static String valueOf(double d)
From source file:com.ciphertool.zodiacengine.CipherSolutionEngine.java
/** * @param args//from w w w.j a v a 2 s . c o m * @throws InterruptedException */ public static void main(String[] args) throws InterruptedException { // Spin up the Spring application context setUp(); CipherDto cipherDto = null; Runnable cipherTask = null; Thread cipherWorker = null; long threadIterations = 0; Cipher cipher = cipherDao.findByCipherName(cipherName); long start = System.currentTimeMillis(); List<Thread> threads = new ArrayList<Thread>(); List<CipherDto> cipherDtos = new ArrayList<CipherDto>(); if (maxThreads > numIterations) { log.warn("The number of threads is greater than the number of tasks. Reducing thread count to " + numIterations + "."); maxThreads = (int) numIterations; } log.info("Beginning solution generation. Generating " + numIterations + " solutions using " + maxThreads + " threads."); for (int i = 1; i <= maxThreads; i++) { threadIterations = (numIterations / maxThreads); if (i == 1) { /* * If the number of iterations doesn't divide evenly among the * threads, add the remainder to the first thread */ threadIterations += (numIterations % maxThreads); } cipherDto = new CipherDto(String.valueOf(i), cipher); cipherDtos.add(cipherDto); cipherTask = new CipherSolutionRunnable(threadIterations, solutionGenerator, solutionEvaluator, cipherDto); cipherWorker = new Thread(cipherTask, String.valueOf(i)); cipherWorker.start(); threads.add(cipherWorker); } /* * Keep checking threads until no more are left running */ int running = 0; do { running = 0; for (Thread thread : threads) { if (thread.isAlive()) { running++; } } /* * There's no need to loop through this as fast as possible. Sleep * for a short period so that there isn't so much overhead from * monitoring the threads' state. */ Thread.sleep(monitorSleepMillis); } while (running > 0); long totalSolutions = 0; long totalMatchSum = 0; long uniqueMatchSum = 0; long adjacentMatchSum = 0; BigInteger cipherId = cipher.getId(); int rows = cipher.getRows(); int columns = cipher.getColumns(); SolutionChromosome solutionMostMatches = new SolutionChromosome(cipherId, 0, 0, 0, rows, columns); SolutionChromosome solutionMostUnique = new SolutionChromosome(cipherId, 0, 0, 0, rows, columns); SolutionChromosome solutionMostAdjacent = new SolutionChromosome(cipherId, 0, 0, 0, rows, columns); /* * Sum up all data from all CipherDtos passed to the threads */ for (CipherDto nextCipherDto : cipherDtos) { log.debug("Best solution from thread " + nextCipherDto.getThreadName() + ": " + nextCipherDto.getSolutionMostMatches()); log.debug("Most unique solution from thread " + nextCipherDto.getThreadName() + ": " + nextCipherDto.getSolutionMostUnique()); log.debug("Solution with most adjacent matches from thread " + nextCipherDto.getThreadName() + ": " + nextCipherDto.getSolutionMostAdjacent()); totalSolutions += nextCipherDto.getNumSolutions(); totalMatchSum += nextCipherDto.getTotalMatchSum(); uniqueMatchSum += nextCipherDto.getUniqueMatchSum(); adjacentMatchSum += nextCipherDto.getAdjacentMatchSum(); /* * Find the Solution with the highest number of total matches */ if (nextCipherDto.getSolutionMostMatches().getTotalMatches() > solutionMostMatches.getTotalMatches()) { solutionMostMatches = nextCipherDto.getSolutionMostMatches(); } /* * Find the Solution with the highest number of unique matches in * plaintext */ if (nextCipherDto.getSolutionMostUnique().getUniqueMatches() > solutionMostUnique.getUniqueMatches()) { solutionMostUnique = nextCipherDto.getSolutionMostUnique(); } /* * Find the Solution with the highest number of adjacent matches in * plaintext */ if (nextCipherDto.getSolutionMostAdjacent().getAdjacentMatchCount() > solutionMostAdjacent .getAdjacentMatchCount()) { solutionMostAdjacent = nextCipherDto.getSolutionMostAdjacent(); } } /* * Print out summary information */ log.info("Took " + (System.currentTimeMillis() - start) + "ms to generate and validate " + totalSolutions + " solutions."); log.info("Most total matches achieved: " + solutionMostMatches.getTotalMatches()); log.info("Average total matches: " + (totalMatchSum / totalSolutions)); log.info("Best solution found: " + solutionMostMatches); log.info("Most unique matches achieved: " + solutionMostUnique.getUniqueMatches()); log.info("Average unique matches: " + (uniqueMatchSum / totalSolutions)); log.info("Solution with most unique matches found: " + solutionMostUnique); log.info("Most adjacent matches achieved: " + solutionMostAdjacent.getAdjacentMatchCount()); log.info("Average adjacent matches: " + (adjacentMatchSum / totalSolutions)); log.info("Solution with most adjacent matches found: " + solutionMostAdjacent); }
From source file:hydrograph.server.execution.tracking.client.main.HydrographMain.java
/** * The main method.//from w w w. j av a2s . c om * * @param args * the arguments * @throws Exception * the exception */ public static void main(String[] args) throws Exception { HydrographMain hydrographMain = new HydrographMain(); final Timer timer = new Timer(); final CountDownLatch latch = new CountDownLatch(1); try { Session session = null; boolean isExecutionTracking = false; String[] argsList = args; List<String> argumentList = new ArrayList<String>(Arrays.asList(args)); final String jobId = hydrographMain.getJobId(argumentList); getLogLevel(argumentList).ifPresent(x -> { if (!x.equalsIgnoreCase(String.valueOf(logger.getLevel()))) { setLoglevel(x); } else { Optional.empty(); } }); logger.info("Argument List: " + argumentList.toString()); String trackingClientSocketPort = hydrographMain.getTrackingClientSocketPort(argumentList); if (argumentList.contains(Constants.IS_TRACKING_ENABLE)) { int index = argumentList.indexOf(Constants.IS_TRACKING_ENABLE); isExecutionTracking = Boolean.valueOf(argsList[index + 1]); argumentList = removeItemFromIndex(index, argumentList); } if (argumentList.contains(Constants.TRACKING_CLIENT_SOCKET_PORT)) { int index = argumentList.indexOf(Constants.TRACKING_CLIENT_SOCKET_PORT); argumentList = removeItemFromIndex(index, argumentList); } argsList = argumentList.toArray(new String[argumentList.size()]); logger.debug("Execution tracking enabled - " + isExecutionTracking); logger.info("Tracking Client Port: " + trackingClientSocketPort); /** * Start new thread to run job */ final HydrographService execution = new HydrographService(); FutureTask task = hydrographMain.executeGraph(latch, jobId, argsList, execution, isExecutionTracking); hydrographMain.executorService = Executors.newSingleThreadExecutor(); hydrographMain.executorService.submit(task); if (isExecutionTracking) { //If tracking is enabled, start to post execution tracking status. final HydrographEngineCommunicatorSocket socket = new HydrographEngineCommunicatorSocket(execution); session = hydrographMain.connectToServer(socket, jobId, trackingClientSocketPort); hydrographMain.sendExecutionTrackingStatus(latch, session, jobId, timer, execution, socket); } //waiting for execute graph thread task.get(); } catch (Exception exp) { logger.info("Getting exception from HydrographMain"); throw new RuntimeException(exp); } finally { //cleanup threads --> executor thread and timer thread logger.info("HydrographMain releasing resources"); if (!hydrographMain.executorService.isShutdown() && !hydrographMain.executorService.isTerminated()) { hydrographMain.executorService.shutdown(); } timer.cancel(); } }
From source file:com.sdw.dream.spark.examples.ml.JavaOneVsRestExample.java
public static void main(String[] args) { // parse the arguments Params params = parse(args);/* www . ja v a 2s . co m*/ SparkConf conf = new SparkConf().setAppName("JavaOneVsRestExample"); JavaSparkContext jsc = new JavaSparkContext(conf); SQLContext jsql = new SQLContext(jsc); // $example on$ // configure the base classifier LogisticRegression classifier = new LogisticRegression().setMaxIter(params.maxIter).setTol(params.tol) .setFitIntercept(params.fitIntercept); if (params.regParam != null) { classifier.setRegParam(params.regParam); } if (params.elasticNetParam != null) { classifier.setElasticNetParam(params.elasticNetParam); } // instantiate the One Vs Rest Classifier OneVsRest ovr = new OneVsRest().setClassifier(classifier); String input = params.input; DataFrame inputData = jsql.read().format("libsvm").load(input); DataFrame train; DataFrame test; // compute the train/ test split: if testInput is not provided use part of input String testInput = params.testInput; if (testInput != null) { train = inputData; // compute the number of features in the training set. int numFeatures = inputData.first().<Vector>getAs(1).size(); test = jsql.read().format("libsvm").option("numFeatures", String.valueOf(numFeatures)).load(testInput); } else { double f = params.fracTest; DataFrame[] tmp = inputData.randomSplit(new double[] { 1 - f, f }, 12345); train = tmp[0]; test = tmp[1]; } // train the multiclass model OneVsRestModel ovrModel = ovr.fit(train.cache()); // score the model on test data DataFrame predictions = ovrModel.transform(test.cache()).select("prediction", "label"); // obtain metrics MulticlassMetrics metrics = new MulticlassMetrics(predictions); StructField predictionColSchema = predictions.schema().apply("prediction"); Integer numClasses = (Integer) MetadataUtils.getNumClasses(predictionColSchema).get(); // compute the false positive rate per label StringBuilder results = new StringBuilder(); results.append("label\tfpr\n"); for (int label = 0; label < numClasses; label++) { results.append(label); results.append("\t"); results.append(metrics.falsePositiveRate((double) label)); results.append("\n"); } Matrix confusionMatrix = metrics.confusionMatrix(); // output the Confusion Matrix System.out.println("Confusion Matrix"); System.out.println(confusionMatrix); System.out.println(); System.out.println(results); // $example off$ jsc.stop(); }
From source file:edu.usc.goffish.gopher.impl.Main.java
public static void main(String[] args) { Properties properties = new Properties(); try {//w ww.j ava 2 s. c o m properties.load(new FileInputStream(CONFIG_FILE)); } catch (IOException e) { String message = "Error while loading Container Configuration from " + CONFIG_FILE + " Cause -" + e.getCause(); log.warning(message); } if (args.length == 4) { PropertiesConfiguration propertiesConfiguration; String url = null; URI uri = URI.create(args[3]); String dataDir = uri.getPath(); String currentHost = uri.getHost(); try { propertiesConfiguration = new PropertiesConfiguration(dataDir + "/gofs.config"); propertiesConfiguration.load(); url = (String) propertiesConfiguration.getString(DataNode.DATANODE_NAMENODE_LOCATION_KEY); } catch (ConfigurationException e) { String message = " Error while reading gofs-config cause -" + e.getCause(); handleException(message); } URI nameNodeUri = URI.create(url); INameNode nameNode = new RemoteNameNode(nameNodeUri); int partition = -1; try { for (URI u : nameNode.getDataNodes()) { if (URIHelper.isLocalURI(u)) { IDataNode dataNode = DataNode.create(u); IntCollection partitions = dataNode.getLocalPartitions(args[2]); partition = partitions.iterator().nextInt(); break; } } if (partition == -1) { String message = "Partition not loaded from uri : " + nameNodeUri; handleException(message); } properties.setProperty(GopherInfraHandler.PARTITION, String.valueOf(partition)); } catch (Exception e) { String message = "Error while loading Partitions from " + nameNodeUri + " Cause -" + e.getMessage(); e.printStackTrace(); handleException(message); } properties.setProperty(Constants.STATIC_PELLET_COUNT, String.valueOf(1)); FloeRuntimeEnvironment environment = FloeRuntimeEnvironment.getEnvironment(); environment.setSystemConfig(properties); properties.setProperty(Constants.CURRET_HOST, currentHost); String managerHost = args[0]; int managerPort = Integer.parseInt(args[1]); Container container = environment.getContainer(); container.setManager(managerHost, managerPort); DefaultClientConfig config = new DefaultClientConfig(); config.getProperties().put(ClientConfig.FEATURE_DISABLE_XML_SECURITY, true); config.getFeatures().put(ClientConfig.FEATURE_DISABLE_XML_SECURITY, true); Client c = Client.create(config); if (managerHost == null || managerPort == 0) { handleException("Manager Host / Port have to be configured in " + args[0]); } WebResource r = c.resource("http://" + managerHost + ":" + managerPort + "/Manager/addContainerInfo/Container=" + container.getContainerInfo().getContainerId() + "/Host=" + container.getContainerInfo().getContainerHost()); c.getProperties().put(ClientConfig.PROPERTY_FOLLOW_REDIRECTS, true); r.post(); log.log(Level.INFO, "Container started "); } else { String message = "Invalid arguments , arg[0]=Manager host, " + "arg[1] = mamanger port,arg[2]=graph id,arg[3]=partition uri"; message += "\n Current Arguments...." + args.length + "\n"; for (int i = 0; i < args.length; i++) { message += "arg " + i + " : " + args[i] + "\n"; } handleException(message); } }
From source file:com.calamp.services.kinesis.events.processor.CalAmpEventProcessor.java
public static void main(String[] args) throws Exception { checkUsage(args);//from ww w . ja va 2 s . c om //String applicationName = args[0]; //String streamName = args[1]; //Region region = RegionUtils.getRegion(args[2]); boolean isUnordered = Boolean.valueOf(args[3]); String applicationName = isUnordered ? CalAmpParameters.sortAppName : CalAmpParameters.consumeAppName; String streamName = isUnordered ? CalAmpParameters.unorderdStreamName : CalAmpParameters.orderedStreamName; Region region = RegionUtils.getRegion(CalAmpParameters.regionName); if (region == null) { System.err.println(args[2] + " is not a valid AWS region."); System.exit(1); } setLogLevels(); AWSCredentialsProvider credentialsProvider = CredentialUtils.getCredentialsProvider(); ClientConfiguration cc = ConfigurationUtils.getClientConfigWithUserAgent(true); AmazonKinesis kinesisClient = new AmazonKinesisClient(credentialsProvider, cc); kinesisClient.setRegion(region); //Utils.kinesisClient = kinesisClient; String workerId = String.valueOf(UUID.randomUUID()); KinesisClientLibConfiguration kclConfig = new KinesisClientLibConfiguration(applicationName, streamName, credentialsProvider, workerId).withRegionName(region.getName()).withCommonClientConfig(cc) .withMaxRecords(com.calamp.services.kinesis.events.utils.CalAmpParameters.maxRecPerPoll) .withIdleTimeBetweenReadsInMillis( com.calamp.services.kinesis.events.utils.CalAmpParameters.pollDelayMillis) .withCallProcessRecordsEvenForEmptyRecordList(CalAmpParameters.alwaysPoll) .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON); IRecordProcessorFactory processorFactory = new RecordProcessorFactory(isUnordered); // Create the KCL worker with the stock trade record processor factory Worker worker = new Worker(processorFactory, kclConfig); int exitCode = 0; try { worker.run(); } catch (Throwable t) { LOG.error("Caught throwable while processing data.", t); exitCode = 1; } System.exit(exitCode); }
From source file:eionet.rod.countrysrv.Extractor.java
/** * Called when script is run from the command-line. Takes one optional argument. The mode, which can be 0-3. Assumes 0 if not * provided./*from w w w . j av a 2 s .c o m*/ * * @param args * command-line arguments */ public static void main(String[] args) { try { String mode = null; String userName = SYSTEM_USER; if (args.length == 1) { mode = args[0]; } else if (args.length > 1) { //this is feedback to the user not debugging as this class is executed in the cmd line //also log to file for other applications System.out.println("Usage: Extractor [mode]"); LOGGER.error("Usage: Extractor [mode]"); return; } else { mode = String.valueOf(ALL_DATA); } /* if (extractor == null) { extractor = new Extractor(); } extractor.harvest(Integer.parseInt(mode), userName); */ execute(Integer.parseInt(mode), userName); } catch (Exception e) { LOGGER.error(e.getMessage(), e); } }
From source file:com.maxpowered.amazon.advertising.api.app.App.java
public static void main(final String... args) throws FileNotFoundException, IOException, JAXBException, XMLStreamException, InterruptedException { try (ClassPathXmlApplicationContext ctx = new ClassPathXmlApplicationContext("application-context.xml")) { /*/*from w ww. j av a2s . co m*/ * Get default options based on spring configs */ final String inputDefault = getOptionDefaultBasedOnSpringProperty(ctx, PROPERTY_APP_INPUT, STD_IN_STR); final String processedDefault = inputDefault.equals(STD_IN_STR) ? DEFAULT_PROCESSED_FILE_BASE : inputDefault + PROCESSED_EXT; final String outputDefault = getOptionDefaultBasedOnSpringProperty(ctx, PROPERTY_APP_OUTPUT, STD_OUT_STR); int throttleDefault = Integer.valueOf(getOptionDefaultBasedOnSpringProperty(ctx, PROPERTY_APP_THROTTLE, String.valueOf(DEFAULT_APP_THROTTLE))); // Maximum of 25000 requests per hour throttleDefault = Math.min(throttleDefault, MAX_APP_THROTTLE); /* * Get options from the CLI args */ final Options options = new Options(); options.addOption("h", false, "Display this help."); options.addOption("i", true, "Set the file to read ASINs from. " + DEFAULT_STR + inputDefault); options.addOption("p", true, "Set the file to store processed ASINs in. " + DEFAULT_STR + processedDefault + " or '" + PROCESSED_EXT + "' appended to the input file name."); // Add a note that the output depends on the configured processors. If none are configured, it defaults to a // std.out processor options.addOption("o", true, "Set the file to write fetched info xml to via FileProcessor. " + DEFAULT_STR + outputDefault); options.addOption("1", false, "Override output file and always output fetched info xml to std.out."); options.addOption("t", true, "Set the requests per hour throttle (max of " + MAX_APP_THROTTLE + "). " + DEFAULT_STR + throttleDefault); final CommandLineParser parser = new DefaultParser(); CommandLine cmd = null; boolean needsHelp = false; try { cmd = parser.parse(options, args); } catch (final ParseException e) { needsHelp = true; } if (cmd.hasOption("h") || needsHelp) { final HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("App", options); return; } // Get throttle rate final int throttle = Math.min( cmd.hasOption("t") ? Integer.valueOf(cmd.getOptionValue("t")) : throttleDefault, MAX_APP_THROTTLE); LOG.debug("Throttle (default {}) is {} requests per hour", throttleDefault, throttle); // We don't want to hit our limit, just under an hour worth of milliseconds final int requestWait = 3540000 / throttle; // Get input stream String input; if (cmd.hasOption("i")) { input = cmd.getOptionValue("i"); } else { input = inputDefault; } LOG.debug("Input name (default {}) is {}", inputDefault, input); // Get processed file String processed; if (cmd.hasOption("p")) { processed = cmd.getOptionValue("p"); } else { processed = input + PROCESSED_EXT; } LOG.debug("Processed file name (default {}) is {}", processedDefault, processed); final File processedFile = new File(processed); processedFile.createNewFile(); try (final InputStream inputStream = getInputStream(input)) { // Get output stream String output; if (cmd.hasOption("o")) { output = cmd.getOptionValue("o"); } else { output = outputDefault; } if (cmd.hasOption("1")) { output = STD_OUT_STR; } LOG.debug("Output (default {}) name is {}", outputDefault, output); // Special logic to set the FileProcessor output if (output.equals(STD_OUT_STR)) { final FileProcessor fileProcessor = ctx.getBeanFactory().getBean(FileProcessor.class); fileProcessor.setOutputStream(System.out); } else if (!output.equals(outputDefault)) { final FileProcessor fileProcessor = ctx.getBeanFactory().getBean(FileProcessor.class); fileProcessor.setOutputFile(output); } // This could be easily configured through CLI or properties final List<String> responseGroups = Lists.newArrayList(); for (final ResponseGroup responseGroup : new ResponseGroup[] { ResponseGroup.IMAGES, ResponseGroup.ITEM_ATTRIBUTES }) { responseGroups.add(responseGroup.getResponseGroupName()); } final String responseGroupString = Joiner.on(",").join(responseGroups); // Search the list of remaining ASINs final ProductFetcher fetcher = ctx.getBeanFactory().getBean(ProductFetcher.class); fetcher.setProcessedFile(processedFile); fetcher.setRequestWait(requestWait); fetcher.setInputStream(inputStream); fetcher.setResponseGroups(responseGroupString); // This ensures that statistics of processed asins should almost always get printed at the end Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { fetcher.logStatistics(); } }); fetcher.fetchProductInformation(); } } }
From source file:com.wso2telco.core.mnc.resolver.MNCQueryClient.java
public static void main(String arg[]) { //IProviderNetwork networkprovider = new DNSSSLQueryClient(); try {//from w ww . ja v a 2s . c o m String pfapiMnc = new DNSSSLQueryClient().queryNetworkStandalone(String.valueOf(arg[0]), arg[1].substring(arg[0].length())); System.out.println("Mnc:" + pfapiMnc); } catch (Exception ex) { Logger.getLogger(MNCQueryClient.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:com.betfair.cougar.test.socket.app.SocketCompatibilityTestingApp.java
public static void main(String[] args) throws Exception { Parser parser = new PosixParser(); Options options = new Options(); options.addOption("r", "repo", true, "Repository type to search: local|central"); options.addOption("c", "client-concurrency", true, "Max threads to allow each client tester to run tests, defaults to 10"); options.addOption("t", "test-concurrency", true, "Max client testers to run concurrently, defaults to 5"); options.addOption("m", "max-time", true, "Max time (in minutes) to allow tests to complete, defaults to 10"); options.addOption("v", "version", false, "Print version and exit"); options.addOption("h", "help", false, "This help text"); CommandLine commandLine = parser.parse(options, args); if (commandLine.hasOption("h")) { System.out.println(options); System.exit(0);// w w w. j a v a 2 s .c om } if (commandLine.hasOption("v")) { System.out.println("How the hell should I know?"); System.exit(0); } // 1. Find all testers in given repos List<RepoSearcher> repoSearchers = new ArrayList<>(); for (String repo : commandLine.getOptionValues("r")) { if ("local".equals(repo.toLowerCase())) { repoSearchers.add(new LocalRepoSearcher()); } else if ("central".equals(repo.toLowerCase())) { repoSearchers.add(new CentralRepoSearcher()); } else { System.err.println("Unrecognized repo: " + repo); System.err.println(options); System.exit(1); } } int clientConcurrency = 10; if (commandLine.hasOption("c")) { try { clientConcurrency = Integer.parseInt(commandLine.getOptionValue("c")); } catch (NumberFormatException nfe) { System.err.println( "client-concurrency is not a valid integer: '" + commandLine.getOptionValue("c") + "'"); System.exit(1); } } int testConcurrency = 5; if (commandLine.hasOption("t")) { try { testConcurrency = Integer.parseInt(commandLine.getOptionValue("t")); } catch (NumberFormatException nfe) { System.err.println( "test-concurrency is not a valid integer: '" + commandLine.getOptionValue("t") + "'"); System.exit(1); } } int maxMinutes = 10; if (commandLine.hasOption("m")) { try { maxMinutes = Integer.parseInt(commandLine.getOptionValue("m")); } catch (NumberFormatException nfe) { System.err.println("max-time is not a valid integer: '" + commandLine.getOptionValue("m") + "'"); System.exit(1); } } Properties clientProps = new Properties(); clientProps.setProperty("client.concurrency", String.valueOf(clientConcurrency)); File baseRunDir = new File(System.getProperty("user.dir") + "/run"); baseRunDir.mkdirs(); File tmpDir = new File(baseRunDir, "jars"); tmpDir.mkdirs(); List<ServerRunner> serverRunners = new ArrayList<>(); List<ClientRunner> clientRunners = new ArrayList<>(); for (RepoSearcher searcher : repoSearchers) { List<File> jars = searcher.findAndCache(tmpDir); for (File f : jars) { ServerRunner serverRunner = new ServerRunner(f, baseRunDir); System.out.println("Found tester: " + serverRunner.getVersion()); serverRunners.add(serverRunner); clientRunners.add(new ClientRunner(f, baseRunDir, clientProps)); } } // 2. Start servers and collect ports System.out.println(); System.out.println("Starting " + serverRunners.size() + " servers..."); for (ServerRunner server : serverRunners) { server.startServer(); } System.out.println(); List<TestCombo> tests = new ArrayList<>(serverRunners.size() * clientRunners.size()); for (ServerRunner server : serverRunners) { for (ClientRunner client : clientRunners) { tests.add(new TestCombo(server, client)); } } System.out.println("Enqueued " + tests.size() + " test combos to run..."); long startTime = System.currentTimeMillis(); // 3. Run every client against every server, collecting results BlockingQueue<Runnable> workQueue = new ArrayBlockingQueue(serverRunners.size() * clientRunners.size()); ThreadPoolExecutor service = new ThreadPoolExecutor(testConcurrency, testConcurrency, 5000, TimeUnit.MILLISECONDS, workQueue); service.prestartAllCoreThreads(); workQueue.addAll(tests); while (!workQueue.isEmpty()) { Thread.sleep(1000); } service.shutdown(); service.awaitTermination(maxMinutes, TimeUnit.MINUTES); long endTime = System.currentTimeMillis(); long totalTimeSecs = Math.round((endTime - startTime) / 1000.0); for (ServerRunner server : serverRunners) { server.shutdownServer(); } System.out.println(); System.out.println("======="); System.out.println("Results"); System.out.println("-------"); // print a summary int totalTests = 0; int totalSuccess = 0; for (TestCombo combo : tests) { String clientVer = combo.getClientVersion(); String serverVer = combo.getServerVersion(); String results = combo.getClientResults(); ObjectMapper mapper = new ObjectMapper(new JsonFactory()); JsonNode node = mapper.reader().readTree(results); JsonNode resultsArray = node.get("results"); int numTests = resultsArray.size(); int numSuccess = 0; for (int i = 0; i < numTests; i++) { if ("success".equals(resultsArray.get(i).get("result").asText())) { numSuccess++; } } totalSuccess += numSuccess; totalTests += numTests; System.out.println(clientVer + "/" + serverVer + ": " + numSuccess + "/" + numTests + " succeeded - took " + String.format("%2f", combo.getRunningTime()) + " seconds"); } System.out.println("-------"); System.out.println( "Overall: " + totalSuccess + "/" + totalTests + " succeeded - took " + totalTimeSecs + " seconds"); FileWriter out = new FileWriter("results.json"); PrintWriter pw = new PrintWriter(out); // 4. Output full results pw.println("{\n \"results\": ["); for (TestCombo combo : tests) { combo.emitResults(pw, " "); } pw.println(" ],"); pw.println(" \"servers\": ["); for (ServerRunner server : serverRunners) { server.emitInfo(pw, " "); } pw.println(" ],"); pw.close(); }
From source file:com.alertlogic.aws.kinesis.test1.StreamProcessor.java
/** * Start the Kinesis Client application. * /* w w w. ja va2 s. co m*/ * @param args Expecting 4 arguments: Application name to use for the Kinesis Client Application, Stream name to * read from, DynamoDB table name to persist counts into, and the AWS region in which these resources * exist or should be created. */ public static void main(String[] args) throws UnknownHostException { if (args.length != 4) { System.err.println("Usage: " + StreamProcessor.class.getSimpleName() + " <application name> <stream name> <DynamoDB table name> <region>"); System.exit(1); } String applicationName = args[0]; String streamName = args[1]; String countsTableName = args[2]; Region region = SampleUtils.parseRegion(args[3]); AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain(); ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration()); AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig); kinesis.setRegion(region); AmazonDynamoDB dynamoDB = new AmazonDynamoDBClient(credentialsProvider, clientConfig); dynamoDB.setRegion(region); // Creates a stream to write to, if it doesn't exist StreamUtils streamUtils = new StreamUtils(kinesis); streamUtils.createStreamIfNotExists(streamName, 2); LOG.info(String.format("%s stream is ready for use", streamName)); DynamoDBUtils dynamoDBUtils = new DynamoDBUtils(dynamoDB); dynamoDBUtils.createCountTableIfNotExists(countsTableName); LOG.info(String.format("%s DynamoDB table is ready for use", countsTableName)); String workerId = String.valueOf(UUID.randomUUID()); LOG.info(String.format("Using working id: %s", workerId)); KinesisClientLibConfiguration kclConfig = new KinesisClientLibConfiguration(applicationName, streamName, credentialsProvider, workerId); kclConfig.withCommonClientConfig(clientConfig); kclConfig.withRegionName(region.getName()); kclConfig.withInitialPositionInStream(InitialPositionInStream.LATEST); // Persist counts to DynamoDB DynamoDBCountPersister persister = new DynamoDBCountPersister( dynamoDBUtils.createMapperForTable(countsTableName)); IRecordProcessorFactory recordProcessor = new CountingRecordProcessorFactory<HttpReferrerPair>( HttpReferrerPair.class, persister, COMPUTE_RANGE_FOR_COUNTS_IN_MILLIS, COMPUTE_INTERVAL_IN_MILLIS); Worker worker = new Worker(recordProcessor, kclConfig); int exitCode = 0; try { worker.run(); } catch (Throwable t) { LOG.error("Caught throwable while processing data.", t); exitCode = 1; } System.exit(exitCode); }