List of usage examples for java.util List size
int size();
From source file:com.ibm.watson.catalyst.corpus.tfidf.ApplyTemplate.java
public static void main(String[] args) { System.out.println("Loading Corpus."); JsonNode root;/*from w w w . ja va2 s . c o m*/ TermCorpus c; JsonNode documents; try (InputStream in = new FileInputStream(new File("tfidf-health-1.json"))) { root = MAPPER.readTree(in); documents = root.get("documents"); TermCorpusBuilder cb = new TermCorpusBuilder(); cb.setDocumentCombiner(0, 0); cb.setJson(new File("health-corpus.json")); c = cb.build(); } catch (FileNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); return; } catch (JsonProcessingException e) { // TODO Auto-generated catch block e.printStackTrace(); return; } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); return; } System.out.println("Corpus loaded."); List<TemplateMatch> matches = new ArrayList<TemplateMatch>(); Iterator<TermDocument> documentIterator = c.getDocuments().iterator(); int index = 0; for (JsonNode document : documents) { Pattern p1 = Template.getTemplatePattern(document, "\\b(an? |the )?(\\w+ ){0,4}", "( \\w+)?(?= is (an?|one|the)\\b)"); if (p1.toString().equals("\\b(an? |the )?(\\w+ ){0,4}()( \\w+)?(?= is (an?|one|the)\\b)")) continue; Pattern p2 = Template.getTemplatePattern(document, "^(\\w+ ){0,2}", "( \\w+){0,1}?(?=( can| may)? causes?\\b)"); Pattern p3 = Template.getTemplatePattern(document, "(?<=the use of )(\\w+ ){0,3}", "( \\w+| ){0,2}?(?=( (and|does|in|for|can|is|as|to|of)\\b|\\.))"); Pattern p4 = Template.getTemplatePattern(document, "^(\\w+ ){0,3}", "( \\w+){0,1}(?=( can| may) leads? to\\b)"); Pattern p5 = Template.getTemplatePattern(document, "(?<=\\bthe risk of )(\\w+ ){0,3}", "( (disease|stroke|attack|cancer))?\\b"); Pattern p6 = Template.getTemplatePattern(document, "(\\w{3,} ){0,3}", "( (disease|stroke|attack|cancer))?(?= is caused by\\b)"); Pattern p7 = Template.getTemplatePattern(document, "(?<= is caused by )(\\w+ ){0,10}", ""); Pattern p8 = Template.getTemplatePattern(document, "\\b", "( \\w{4,})(?= can be used)"); Pattern p9 = Template.getTemplatePattern(document, "(?<= can be used )(\\w+ ){0,10}", "\\b"); TermDocument d = documentIterator.next(); DocumentMatcher dm = new DocumentMatcher(d); matches.addAll(dm.getParagraphMatches(p1, "What is ", "?")); matches.addAll(dm.getParagraphMatches(p2, "What does ", " cause?")); matches.addAll(dm.getParagraphMatches(p3, "How is ", " used?")); matches.addAll(dm.getParagraphMatches(p4, "What can ", " lead to?")); matches.addAll(dm.getParagraphMatches(p5, "What impacts the risk of ", "?")); matches.addAll(dm.getParagraphMatches(p6, "What causes ", "?")); matches.addAll(dm.getParagraphMatches(p7, "What is caused by ", "?")); matches.addAll(dm.getParagraphMatches(p8, "How can ", " be used?")); matches.addAll(dm.getParagraphMatches(p9, "What can be used ", "?")); System.out.print("Progress: " + ((100 * ++index) / documents.size()) + "%\r"); } System.out.println(); List<TemplateMatch> condensedMatches = new ArrayList<TemplateMatch>(); for (TemplateMatch match : matches) { for (TemplateMatch baseMatch : condensedMatches) { if (match.sameQuestion(baseMatch)) { baseMatch.addAnswers(match); break; } } condensedMatches.add(match); } try (BufferedWriter bw = new BufferedWriter(new FileWriter("health-questions.txt"))) { for (TemplateMatch match : condensedMatches) { bw.write(match.toString()); } bw.write("\n"); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } System.out.println("Done and generated: " + condensedMatches.size()); }
From source file:edu.monash.merc.system.parser.nextprot.NxXMLParser.java
public static void main(String[] args) throws Exception { String filename = "./testData/nextprot_chromosome_7.xml"; FileInputStream fileInputStream = new FileInputStream(new File(filename)); NxXMLParser parser = new NxXMLParser(); List<NXEntryBean> nxEntryBeans = parser.parseNextProtXML(fileInputStream); System.out.println("======== total size of nextprot entry: " + nxEntryBeans.size()); ////from w w w. j a v a 2s.c o m // // String ftpUrl = "ftp://ftp.nextprot.org/pub/current_release/xml/nextprot_chromosome_7.xml.gz"; // String filename = "./testData/nextprot_chromosome_7.xml.gz"; // try { // GZIPInputStream gzipInputStream = null; // FileInputStream fileInputStream = null; // gzipInputStream = new GZIPInputStream(new FileInputStream(new File(filename))); // System.out.println("Opening the output file............:opened"); // String outFilename = "chromosome_7.xml"; // OutputStream out = new FileOutputStream(outFilename); // System.out.println("Transferring bytes from the compressed file to the output file........:Transfer successful "); // byte[] buf = new byte[1024]; // //size can be changed according to programmer 's need. // int len; // while ((len = gzipInputStream.read(buf)) > 0) { // out.write(buf, 0, len); // } // System.out.println("The file and stream is ......closing..........:closed"); // gzipInputStream.close(); // out.close(); // } catch (IOException e) { // System.out.println("Exception has been thrown" + e); // } }
From source file:com.sccl.attech.common.utils.excel.ExportExcel.java
/** * // ww w .j a v a 2 s . c o m */ public static void main(String[] args) throws Throwable { List<String> headerList = Lists.newArrayList(); headerList.add("??"); headerList.add("??"); headerList.add(""); headerList.add("?"); headerList.add(""); List<String> dataRowList = Lists.newArrayList(); for (int i = 1; i <= headerList.size(); i++) { dataRowList.add("?" + i); } List<List<String>> dataList = Lists.newArrayList(); for (int i = 1; i <= 100; i++) { dataList.add(dataRowList); } ExportExcel ee = new ExportExcel(null, headerList); for (int i = 0; i < dataList.size(); i++) { Row row = ee.addRow(); for (int j = 0; j < dataList.get(i).size(); j++) { ee.addCell(row, j, dataList.get(i).get(j)); } } ee.createAddMergedRegion(); StringBuilder sb = new StringBuilder(); sb.append("????").append("\r\n"); sb.append( "???64????? ?: ; { } ! @ $ ^ & | , . / ? [ ] ~ * # <+ - = ") .append("\r"); sb.append("??:").append("").append("'").append("%") .append("\\").append("? ") .append("\r"); sb.append( "?? ?60?????? : ; { } ! @ $ ^ & | , . / ? [ ] ~ * # < +- = ") .append("\r"); sb.append( " ????") .append("\r"); sb.append( "??180???1590???15???? :") .append("\r"); sb.append( "79.000000000000000?GPS???????6?") .append("\r"); sb.append( "* ??2000??2000?????") .append("\r"); sb.append("* EXCEL???.xls97-2003").append("\r"); sb.append("* ??????").append("\r"); sb.append( "* ??") .append("\r"); sb.append( "* ????POI??POI??????") .append("\r"); ee.addCellStyle(ee.getExistRow(1), 6, sb.toString()); //demo ee.initializeDemo(null, headerList); ee.addCellStyle(ee.getExistRow(25), 6, "?"); ee.addCellStyle(ee.getExistRow(25), 7, ""); ee.addCellStyle(ee.getExistRow(25), 8, ""); ee.addCellStyle(ee.getExistRow(25), 9, "116.35526644472"); ee.addCellStyle(ee.getExistRow(25), 10, "40.03711432476"); //ee.addCell(ee.getExistRow(26), 6, dataList.get(i).get(j)); ee.writeFile("D:\\export.xlsx"); ee.dispose(); log.debug("Export success."); }
From source file:org.ala.hbase.RepoDataLoader.java
/** * This takes a list of infosource ids... * <p/>/* ww w. j a v a 2 s .c o m*/ * Usage: -stats or -reindex or -gList and list of infosourceId * * @param args */ public static void main(String[] args) throws Exception { //RepoDataLoader loader = new RepoDataLoader(); ApplicationContext context = SpringUtils.getContext(); RepoDataLoader loader = (RepoDataLoader) context.getBean(RepoDataLoader.class); long start = System.currentTimeMillis(); loader.loadInfoSources(); String filePath = repositoryDir; if (args.length > 0) { if (args[0].equalsIgnoreCase("-stats")) { loader.statsOnly = true; args = (String[]) ArrayUtils.subarray(args, 1, args.length); } if (args[0].equalsIgnoreCase("-reindex")) { loader.reindex = true; loader.indexer = context.getBean(PartialIndex.class); args = (String[]) ArrayUtils.subarray(args, 1, args.length); logger.info("**** -reindex: " + loader.reindex); logger.debug("reindex url: " + loader.reindexUrl); } if (args[0].equalsIgnoreCase("-gList")) { loader.gList = true; args = (String[]) ArrayUtils.subarray(args, 1, args.length); logger.info("**** -gList: " + loader.gList); } if (args[0].equalsIgnoreCase("-biocache")) { Hashtable<String, String> hashTable = new Hashtable<String, String>(); hashTable.put("accept", "application/json"); ObjectMapper mapper = new ObjectMapper(); mapper.getDeserializationConfig().set(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false); RestfulClient restfulClient = new RestfulClient(0); String fq = "&fq="; if (args.length > 1) { java.util.Date date = new java.util.Date(); if (args[1].equals("-lastWeek")) { date = DateUtils.addWeeks(date, -1); } else if (args[1].equals("-lastMonth")) { date = DateUtils.addMonths(date, -1); } else if (args[1].equals("-lastYear")) { date = DateUtils.addYears(date, -1); } else date = null; if (date != null) { SimpleDateFormat sfd = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); fq += "last_load_date:%5B" + sfd.format(date) + "%20TO%20*%5D"; } } Object[] resp = restfulClient .restGet("http://biocache.ala.org.au/ws/occurrences/search?q=multimedia:Image" + fq + "&facets=data_resource_uid&pageSize=0", hashTable); logger.info("The URL: " + "http://biocache.ala.org.au/ws/occurrences/search?q=multimedia:Image" + fq + "&facets=data_resource_uid&pageSize=0"); if ((Integer) resp[0] == HttpStatus.SC_OK) { String content = resp[1].toString(); logger.debug(resp[1]); if (content != null && content.length() > "[]".length()) { Map map = mapper.readValue(content, Map.class); try { List<java.util.LinkedHashMap<String, String>> list = ((List<java.util.LinkedHashMap<String, String>>) ((java.util.LinkedHashMap) ((java.util.ArrayList) map .get("facetResults")).get(0)).get("fieldResult")); Set<String> arg = new LinkedHashSet<String>(); for (int i = 0; i < list.size(); i++) { java.util.LinkedHashMap<String, String> value = list.get(i); String dataResource = getDataResource(value.get("fq")); Object provider = (loader.getUidInfoSourceMap().get(dataResource)); if (provider != null) { arg.add(provider.toString()); } } logger.info("Set of biocache infosource ids to load: " + arg); args = new String[] {}; args = arg.toArray(args); //handle the situation where biocache-service reports no data resources if (args.length < 1) { logger.error("No biocache data resources found. Unable to load."); System.exit(0); } } catch (Exception e) { logger.error("ERROR: exit process....." + e); e.printStackTrace(); System.exit(0); } } } else { logger.warn("Unable to process url: "); } } } int filesRead = loader.load(filePath, args); //FIX ME - move to config long finish = System.currentTimeMillis(); logger.info(filesRead + " files scanned/loaded in: " + ((finish - start) / 60000) + " minutes " + ((finish - start) / 1000) + " seconds."); System.exit(1); }
From source file:imp.lstm.main.Driver.java
public static void main(String[] args) throws FileNotFoundException, IOException, ConfigurationException, InvalidParametersException { FileBasedConfigurationBuilder<PropertiesConfiguration> builder = new FileBasedConfigurationBuilder<>( PropertiesConfiguration.class).configure( new Parameters().properties().setFileName(args[0]).setThrowExceptionOnMissing(true) .setListDelimiterHandler(new DefaultListDelimiterHandler(';')) .setIncludesAllowed(false)); Configuration config = builder.getConfiguration(); String inputSongPath = config.getString("input_song"); String outputFolderPath = config.getString("output_folder"); String autoEncoderParamsPath = config.getString("auto_encoder_params"); String nameGeneratorParamsPath = config.getString("name_generator_params"); String queueFolderPath = config.getString("queue_folder"); String referenceQueuePath = config.getString("reference_queue", "nil"); String inputCorpusFolder = config.getString("input_corpus_folder"); boolean shouldWriteQueue = config.getBoolean("should_write_generated_queue"); boolean frankensteinTest = config.getBoolean("queue_tests_frankenstein"); boolean interpolateTest = config.getBoolean("queue_tests_interpolation"); boolean iterateOverCorpus = config.getBoolean("iterate_over_corpus", false); boolean shouldGenerateSongTitle = config.getBoolean("generate_song_title"); boolean shouldGenerateSong = config.getBoolean("generate_leadsheet"); LogTimer.initStartTime(); //start our logging timer to keep track of our execution time LogTimer.log("Creating name generator..."); //here is just silly code for generating name based on an LSTM lol $wag LSTM lstm = new LSTM(); FullyConnectedLayer fullLayer = new FullyConnectedLayer(Operations.None); Loadable titleNetLoader = new Loadable() { @Override//from w ww.ja v a2 s. c o m public boolean load(INDArray array, String path) { String car = pathCar(path); String cdr = pathCdr(path); switch (car) { case "full": return fullLayer.load(array, cdr); case "lstm": return lstm.load(array, cdr); default: return false; } } }; LogTimer.log("Packing name generator from files..."); (new NetworkConnectomeLoader()).load(nameGeneratorParamsPath, titleNetLoader); String characterString = " !\"'[],-.01245679:?ABCDEFGHIJKLMNOPQRSTUVWYZabcdefghijklmnopqrstuvwxyz"; //Initialization LogTimer.log("Creating autoencoder..."); int inputSize = 34; int outputSize = EncodingParameters.noteEncoder.getNoteLength(); int featureVectorSize = 100; ProductCompressingAutoencoder autoencoder = new ProductCompressingAutoencoder(24, 48, 84 + 1, false); //create our network int numInterpolationDivisions = 5; //"pack" the network from weights and biases file directory LogTimer.log("Packing autoencoder from files"); (new NetworkConnectomeLoader()).load(autoEncoderParamsPath, autoencoder); File[] songFiles; if (iterateOverCorpus) { songFiles = new File(inputCorpusFolder).listFiles(); } else { songFiles = new File[] { new File(inputSongPath) }; } for (File inputFile : songFiles) { (new NetworkConnectomeLoader()).refresh(autoEncoderParamsPath, autoencoder, "initialstate"); String songTitle; if (shouldGenerateSong) { Random rand = new Random(); AVector charOut = Vector.createLength(characterString.length()); GroupedSoftMaxSampler sampler = new GroupedSoftMaxSampler( new Group[] { new Group(0, characterString.length(), true) }); songTitle = ""; for (int i = 0; i < 50; i++) { charOut = fullLayer.forward(lstm.step(charOut)); charOut = sampler.filter(charOut); int charIndex = 0; for (; charIndex < charOut.length(); charIndex++) { if (charOut.get(charIndex) == 1.0) { break; } } songTitle += characterString.substring(charIndex, charIndex + 1); } songTitle = songTitle.trim(); LogTimer.log("Generated song name: " + songTitle); } else { songTitle = "The Song We Never Name"; } LogTimer.log("Reading file..."); LeadSheetDataSequence inputSequence = LeadSheetIO.readLeadSheet(inputFile); //read our leadsheet to get a data vessel as retrieved in rbm-provisor LeadSheetDataSequence outputSequence = inputSequence.copy(); outputSequence.clearMelody(); if (interpolateTest) { LeadSheetDataSequence additionalOutput = outputSequence.copy(); for (int i = 0; i < numInterpolationDivisions; i++) { outputSequence.concat(additionalOutput.copy()); } } LeadSheetDataSequence decoderInputSequence = outputSequence.copy(); LogTimer.startLog("Encoding data..."); //TradingTimer.initStart(); //start our trading timer to keep track our our generation versus realtime play while (inputSequence.hasNext()) { //iterate through time steps in input data //TradingTimer.waitForNextTimedInput(); autoencoder.encodeStep(inputSequence.retrieve()); //feed the resultant input vector into the network if (advanceDecoding) { //if we are using advance decoding (we start decoding as soon as we can) if (autoencoder.canDecode()) { //if queue has enough data to decode from outputSequence.pushStep(null, null, autoencoder.decodeStep(decoderInputSequence.retrieve())); //take sampled data for a timestep from autoencoder //TradingTimer.logTimestep(); //log our time to TradingTimer so we can know how far ahead of realtime we are } } } LogTimer.endLog(); if (shouldWriteQueue) { String queueFilePath = queueFolderPath + java.io.File.separator + inputFile.getName().replace(".ls", ".q"); FragmentedNeuralQueue currQueue = autoencoder.getQueue(); currQueue.writeToFile(queueFilePath); LogTimer.log("Wrote queue " + inputFile.getName().replace(".ls", ".q") + " to file..."); } if (shouldGenerateSong) { if (interpolateTest) { FragmentedNeuralQueue refQueue = new FragmentedNeuralQueue(); refQueue.initFromFile(referenceQueuePath); FragmentedNeuralQueue currQueue = autoencoder.getQueue(); //currQueue.writeToFile(queueFilePath); autoencoder.setQueue(currQueue.copy()); while (autoencoder.hasDataStepsLeft()) { //we are done encoding all time steps, so just finish decoding!{ outputSequence.pushStep(null, null, autoencoder.decodeStep(decoderInputSequence.retrieve())); //take sampled data for a timestep from autoencoder //TradingTimer.logTimestep(); //log our time to TradingTimer so we can know how far ahead of realtime we are } for (int i = 1; i <= numInterpolationDivisions; i++) { System.out.println("Starting interpolation " + ((1.0 / numInterpolationDivisions) * (i))); (new NetworkConnectomeLoader()).refresh(autoEncoderParamsPath, autoencoder, "initialstate"); FragmentedNeuralQueue currCopy = currQueue.copy(); currCopy.basicInterpolate(refQueue, (1.0 / numInterpolationDivisions) * (i)); autoencoder.setQueue(currCopy); int timeStep = 0; while (autoencoder.hasDataStepsLeft()) { //we are done encoding all time steps, so just finish decoding!{ System.out.println("interpolation " + i + " step " + ++timeStep); outputSequence.pushStep(null, null, autoencoder.decodeStep(decoderInputSequence.retrieve())); //take sampled data for a timestep from autoencoder //TradingTimer.logTimestep(); //log our time to TradingTimer so we can know how far ahead of realtime we are } } } if (frankensteinTest) { LogTimer.startLog("Loading queues"); File queueFolder = new File(queueFolderPath); int numComponents = config.getInt("frankenstein_num_components", 5); int numCombinations = config.getInt("frankenstein_num_combinations", 6); double interpolationMagnitude = config.getDouble("frankenstein_magnitude", 2.0); if (queueFolder.isDirectory()) { File[] queueFiles = queueFolder.listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.contains(".q"); } }); List<File> fileList = new ArrayList<>(); for (File file : queueFiles) { fileList.add(file); } Collections.shuffle(fileList); int numSelectedFiles = (numComponents > queueFiles.length) ? queueFiles.length : numComponents; for (int i = 0; i < queueFiles.length - numSelectedFiles; i++) { fileList.remove(fileList.size() - 1); } List<FragmentedNeuralQueue> queuePopulation = new ArrayList<>(fileList.size()); songTitle += " - a mix of "; for (File file : fileList) { FragmentedNeuralQueue newQueue = new FragmentedNeuralQueue(); newQueue.initFromFile(file.getPath()); queuePopulation.add(newQueue); songTitle += file.getName().replaceAll(".ls", "") + ", "; } LogTimer.endLog(); LeadSheetDataSequence additionalOutput = outputSequence.copy(); for (int i = 1; i < numCombinations; i++) { outputSequence.concat(additionalOutput.copy()); } decoderInputSequence = outputSequence.copy(); FragmentedNeuralQueue origQueue = autoencoder.getQueue(); for (int i = 0; i < numCombinations; i++) { LogTimer.startLog("Performing queue interpolation..."); AVector combinationStrengths = Vector.createLength(queuePopulation.size()); Random vectorRand = new Random(i); for (int j = 0; j < combinationStrengths.length(); j++) { combinationStrengths.set(j, vectorRand.nextDouble()); } combinationStrengths.divide(combinationStrengths.elementSum()); FragmentedNeuralQueue currQueue = origQueue.copy(); for (int k = 0; k < combinationStrengths.length(); k++) { currQueue.basicInterpolate(queuePopulation.get(k), combinationStrengths.get(k) * interpolationMagnitude); } LogTimer.endLog(); autoencoder.setQueue(currQueue); LogTimer.startLog("Refreshing autoencoder state..."); (new NetworkConnectomeLoader()).refresh(autoEncoderParamsPath, autoencoder, "initialstate"); LogTimer.endLog(); LogTimer.startLog("Decoding segment..."); while (autoencoder.hasDataStepsLeft()) { //we are done encoding all time steps, so just finish decoding!{ outputSequence.pushStep(null, null, autoencoder.decodeStep(decoderInputSequence.retrieve())); //take sampled data for a timestep from autoencoder //TradingTimer.logTimestep(); //log our time to TradingTimer so we can know how far ahead of realtime we are } LogTimer.endLog(); } } } while (autoencoder.hasDataStepsLeft()) { //we are done encoding all time steps, so just finish decoding!{ outputSequence.pushStep(null, null, autoencoder.decodeStep(decoderInputSequence.retrieve())); //take sampled data for a timestep from autoencoder //TradingTimer.logTimestep(); //log our time to TradingTimer so we can know how far ahead of realtime we are } LogTimer.log("Writing file..."); String outputFilename = outputFolderPath + java.io.File.separator + inputFile.getName().replace(".ls", "_Output"); //we'll write our generated file with the same name plus "_Output" LeadSheetIO.writeLeadSheet(outputSequence, outputFilename, songTitle); System.out.println(outputFilename); } else { autoencoder.setQueue(new FragmentedNeuralQueue()); } } LogTimer.log("Process finished"); //Done! }
From source file:com.betfair.cougar.test.socket.app.SocketCompatibilityTestingApp.java
public static void main(String[] args) throws Exception { Parser parser = new PosixParser(); Options options = new Options(); options.addOption("r", "repo", true, "Repository type to search: local|central"); options.addOption("c", "client-concurrency", true, "Max threads to allow each client tester to run tests, defaults to 10"); options.addOption("t", "test-concurrency", true, "Max client testers to run concurrently, defaults to 5"); options.addOption("m", "max-time", true, "Max time (in minutes) to allow tests to complete, defaults to 10"); options.addOption("v", "version", false, "Print version and exit"); options.addOption("h", "help", false, "This help text"); CommandLine commandLine = parser.parse(options, args); if (commandLine.hasOption("h")) { System.out.println(options); System.exit(0);/*www . ja va 2 s. co m*/ } if (commandLine.hasOption("v")) { System.out.println("How the hell should I know?"); System.exit(0); } // 1. Find all testers in given repos List<RepoSearcher> repoSearchers = new ArrayList<>(); for (String repo : commandLine.getOptionValues("r")) { if ("local".equals(repo.toLowerCase())) { repoSearchers.add(new LocalRepoSearcher()); } else if ("central".equals(repo.toLowerCase())) { repoSearchers.add(new CentralRepoSearcher()); } else { System.err.println("Unrecognized repo: " + repo); System.err.println(options); System.exit(1); } } int clientConcurrency = 10; if (commandLine.hasOption("c")) { try { clientConcurrency = Integer.parseInt(commandLine.getOptionValue("c")); } catch (NumberFormatException nfe) { System.err.println( "client-concurrency is not a valid integer: '" + commandLine.getOptionValue("c") + "'"); System.exit(1); } } int testConcurrency = 5; if (commandLine.hasOption("t")) { try { testConcurrency = Integer.parseInt(commandLine.getOptionValue("t")); } catch (NumberFormatException nfe) { System.err.println( "test-concurrency is not a valid integer: '" + commandLine.getOptionValue("t") + "'"); System.exit(1); } } int maxMinutes = 10; if (commandLine.hasOption("m")) { try { maxMinutes = Integer.parseInt(commandLine.getOptionValue("m")); } catch (NumberFormatException nfe) { System.err.println("max-time is not a valid integer: '" + commandLine.getOptionValue("m") + "'"); System.exit(1); } } Properties clientProps = new Properties(); clientProps.setProperty("client.concurrency", String.valueOf(clientConcurrency)); File baseRunDir = new File(System.getProperty("user.dir") + "/run"); baseRunDir.mkdirs(); File tmpDir = new File(baseRunDir, "jars"); tmpDir.mkdirs(); List<ServerRunner> serverRunners = new ArrayList<>(); List<ClientRunner> clientRunners = new ArrayList<>(); for (RepoSearcher searcher : repoSearchers) { List<File> jars = searcher.findAndCache(tmpDir); for (File f : jars) { ServerRunner serverRunner = new ServerRunner(f, baseRunDir); System.out.println("Found tester: " + serverRunner.getVersion()); serverRunners.add(serverRunner); clientRunners.add(new ClientRunner(f, baseRunDir, clientProps)); } } // 2. Start servers and collect ports System.out.println(); System.out.println("Starting " + serverRunners.size() + " servers..."); for (ServerRunner server : serverRunners) { server.startServer(); } System.out.println(); List<TestCombo> tests = new ArrayList<>(serverRunners.size() * clientRunners.size()); for (ServerRunner server : serverRunners) { for (ClientRunner client : clientRunners) { tests.add(new TestCombo(server, client)); } } System.out.println("Enqueued " + tests.size() + " test combos to run..."); long startTime = System.currentTimeMillis(); // 3. Run every client against every server, collecting results BlockingQueue<Runnable> workQueue = new ArrayBlockingQueue(serverRunners.size() * clientRunners.size()); ThreadPoolExecutor service = new ThreadPoolExecutor(testConcurrency, testConcurrency, 5000, TimeUnit.MILLISECONDS, workQueue); service.prestartAllCoreThreads(); workQueue.addAll(tests); while (!workQueue.isEmpty()) { Thread.sleep(1000); } service.shutdown(); service.awaitTermination(maxMinutes, TimeUnit.MINUTES); long endTime = System.currentTimeMillis(); long totalTimeSecs = Math.round((endTime - startTime) / 1000.0); for (ServerRunner server : serverRunners) { server.shutdownServer(); } System.out.println(); System.out.println("======="); System.out.println("Results"); System.out.println("-------"); // print a summary int totalTests = 0; int totalSuccess = 0; for (TestCombo combo : tests) { String clientVer = combo.getClientVersion(); String serverVer = combo.getServerVersion(); String results = combo.getClientResults(); ObjectMapper mapper = new ObjectMapper(new JsonFactory()); JsonNode node = mapper.reader().readTree(results); JsonNode resultsArray = node.get("results"); int numTests = resultsArray.size(); int numSuccess = 0; for (int i = 0; i < numTests; i++) { if ("success".equals(resultsArray.get(i).get("result").asText())) { numSuccess++; } } totalSuccess += numSuccess; totalTests += numTests; System.out.println(clientVer + "/" + serverVer + ": " + numSuccess + "/" + numTests + " succeeded - took " + String.format("%2f", combo.getRunningTime()) + " seconds"); } System.out.println("-------"); System.out.println( "Overall: " + totalSuccess + "/" + totalTests + " succeeded - took " + totalTimeSecs + " seconds"); FileWriter out = new FileWriter("results.json"); PrintWriter pw = new PrintWriter(out); // 4. Output full results pw.println("{\n \"results\": ["); for (TestCombo combo : tests) { combo.emitResults(pw, " "); } pw.println(" ],"); pw.println(" \"servers\": ["); for (ServerRunner server : serverRunners) { server.emitInfo(pw, " "); } pw.println(" ],"); pw.close(); }
From source file:br.com.autonomiccs.cloudTraces.main.CloudTracesSimulator.java
public static void main(String[] args) { validateInputFile(args);/* www .j a va2 s. co m*/ String cloudTracesFile = args[0]; Collection<VirtualMachine> virtualMachines = getAllVirtualMachinesFromCloudTraces(cloudTracesFile); logger.info(String.format("#VirtualMachines [%d] found on [%s].", virtualMachines.size(), cloudTracesFile)); Map<Integer, List<VirtualMachine>> mapVirtualMachinesTaskExecutionByTime = createMapVirtualMachinesTaskExecutionByTime( virtualMachines); logger.info(String.format("#Times [%d] that have tasks being executed by VMs ", mapVirtualMachinesTaskExecutionByTime.size())); Cloud cloud = createCloudEnvirtonmentToStartsimulation(); logger.info("Cloud configuration: " + cloud); List<Integer> timesToExecuteTasks = new ArrayList<>(mapVirtualMachinesTaskExecutionByTime.keySet()); Collections.sort(timesToExecuteTasks); Integer firstTimeInTimeUnitOfUsedCloudData = timesToExecuteTasks.get(0); Integer lastTimeInTimeUnitOfUserCloudData = timesToExecuteTasks.get(timesToExecuteTasks.size() - 1); logger.info("First time: " + firstTimeInTimeUnitOfUsedCloudData); logger.info("Last time: " + lastTimeInTimeUnitOfUserCloudData); double timeUnitPerLoopIteration = getTimeUnitPerLoopIteration(firstTimeInTimeUnitOfUsedCloudData, lastTimeInTimeUnitOfUserCloudData); logger.info("The time unit converted to trace time: " + timeUnitPerLoopIteration); double currentTime = firstTimeInTimeUnitOfUsedCloudData; long highetResourceAllocation = Long.MIN_VALUE; String cloudStateHighestMemoryAllocation = ""; while (currentTime < lastTimeInTimeUnitOfUserCloudData + 2 * timeUnitPerLoopIteration) { logger.debug("Current time of iteration: " + currentTime); if (cloud.getMemoryAllocatedInBytes() > highetResourceAllocation) { highetResourceAllocation = cloud.getMemoryAllocatedInBytes(); cloudStateHighestMemoryAllocation = cloud.toString(); } applyLoadOnCloudForCurrentTime(mapVirtualMachinesTaskExecutionByTime, cloud, currentTime); destroyVirtualMachinesIfNeeded(cloud, currentTime); logger.info(String.format("Time [%.3f], cloud state [%s] ", currentTime, cloud)); executeManagement(cloud, currentTime); logClustersConfigurationsAndStdAtTime(cloud.getClusters(), currentTime); currentTime += timeUnitPerLoopIteration; } logger.info("Cloud configuration after simulation: " + cloud); logger.info("Cloud highestResourceUsage: " + cloudStateHighestMemoryAllocation); }
From source file:com.alexoree.jenkins.Main.java
public static void main(String[] args) throws Exception { // create Options object Options options = new Options(); options.addOption("t", false, "throttle the downloads, waits 5 seconds in between each d/l"); // automatically generate the help statement HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("jenkins-sync", options); CommandLineParser parser = new DefaultParser(); CommandLine cmd = parser.parse(options, args); boolean throttle = cmd.hasOption("t"); String plugins = "https://updates.jenkins-ci.org/latest/"; List<String> ps = new ArrayList<String>(); Document doc = Jsoup.connect(plugins).get(); for (Element file : doc.select("td a")) { //System.out.println(file.attr("href")); if (file.attr("href").endsWith(".hpi") || file.attr("href").endsWith(".war")) { ps.add(file.attr("href")); }//ww w. j ava 2 s .com } File root = new File("."); //https://updates.jenkins-ci.org/latest/AdaptivePlugin.hpi new File("./latest").mkdirs(); //output zip file String zipFile = "jenkinsSync.zip"; // create byte buffer byte[] buffer = new byte[1024]; FileOutputStream fos = new FileOutputStream(zipFile); ZipOutputStream zos = new ZipOutputStream(fos); //download the plugins for (int i = 0; i < ps.size(); i++) { System.out.println("[" + i + "/" + ps.size() + "] downloading " + plugins + ps.get(i)); String outputFile = download(root.getAbsolutePath() + "/latest/" + ps.get(i), plugins + ps.get(i)); FileInputStream fis = new FileInputStream(outputFile); // begin writing a new ZIP entry, positions the stream to the start of the entry data zos.putNextEntry(new ZipEntry(outputFile.replace(root.getAbsolutePath(), "") .replace("updates.jenkins-ci.org/", "").replace("https:/", ""))); int length; while ((length = fis.read(buffer)) > 0) { zos.write(buffer, 0, length); } zos.closeEntry(); fis.close(); if (throttle) Thread.sleep(WAIT); new File(root.getAbsolutePath() + "/latest/" + ps.get(i)).deleteOnExit(); } //download the json metadata plugins = "https://updates.jenkins-ci.org/"; ps = new ArrayList<String>(); doc = Jsoup.connect(plugins).get(); for (Element file : doc.select("td a")) { //System.out.println(file.attr("href")); if (file.attr("href").endsWith(".json")) { ps.add(file.attr("href")); } } for (int i = 0; i < ps.size(); i++) { download(root.getAbsolutePath() + "/" + ps.get(i), plugins + ps.get(i)); FileInputStream fis = new FileInputStream(root.getAbsolutePath() + "/" + ps.get(i)); // begin writing a new ZIP entry, positions the stream to the start of the entry data zos.putNextEntry(new ZipEntry(plugins + ps.get(i))); int length; while ((length = fis.read(buffer)) > 0) { zos.write(buffer, 0, length); } zos.closeEntry(); fis.close(); new File(root.getAbsolutePath() + "/" + ps.get(i)).deleteOnExit(); if (throttle) Thread.sleep(WAIT); } // close the ZipOutputStream zos.close(); }
From source file:com.cyberway.issue.io.arc.ARCReader.java
/** * Command-line interface to ARCReader.// www . j a v a 2 s .c om * * Here is the command-line interface: * <pre> * usage: java com.cyberway.issue.io.arc.ARCReader [--offset=#] ARCFILE * -h,--help Prints this message and exits. * -o,--offset Outputs record at this offset into arc file.</pre> * * <p>See in <code>$HERITRIX_HOME/bin/arcreader</code> for a script that'll * take care of classpaths and the calling of ARCReader. * * <p>Outputs using a pseudo-CDX format as described here: * <a href="http://www.archive.org/web/researcher/cdx_legend.php">CDX * Legent</a> and here * <a href="http://www.archive.org/web/researcher/example_cdx.php">Example</a>. * Legend used in below is: 'CDX b e a m s c V (or v if uncompressed) n g'. * Hash is hard-coded straight SHA-1 hash of content. * * @param args Command-line arguments. * @throws ParseException Failed parse of the command line. * @throws IOException * @throws java.text.ParseException */ public static void main(String[] args) throws ParseException, IOException, java.text.ParseException { Options options = getOptions(); options.addOption(new Option("p", "parse", false, "Parse headers.")); PosixParser parser = new PosixParser(); CommandLine cmdline = parser.parse(options, args, false); List cmdlineArgs = cmdline.getArgList(); Option[] cmdlineOptions = cmdline.getOptions(); HelpFormatter formatter = new HelpFormatter(); // If no args, print help. if (cmdlineArgs.size() <= 0) { usage(formatter, options, 0); } // Now look at options passed. long offset = -1; boolean digest = false; boolean strict = false; boolean parse = false; String format = CDX; for (int i = 0; i < cmdlineOptions.length; i++) { switch (cmdlineOptions[i].getId()) { case 'h': usage(formatter, options, 0); break; case 'o': offset = Long.parseLong(cmdlineOptions[i].getValue()); break; case 's': strict = true; break; case 'p': parse = true; break; case 'd': digest = getTrueOrFalse(cmdlineOptions[i].getValue()); break; case 'f': format = cmdlineOptions[i].getValue().toLowerCase(); boolean match = false; // List of supported formats. final String[] supportedFormats = { CDX, DUMP, GZIP_DUMP, HEADER, NOHEAD, CDX_FILE }; for (int ii = 0; ii < supportedFormats.length; ii++) { if (supportedFormats[ii].equals(format)) { match = true; break; } } if (!match) { usage(formatter, options, 1); } break; default: throw new RuntimeException("Unexpected option: " + +cmdlineOptions[i].getId()); } } if (offset >= 0) { if (cmdlineArgs.size() != 1) { System.out.println("Error: Pass one arcfile only."); usage(formatter, options, 1); } ARCReader arc = ARCReaderFactory.get((String) cmdlineArgs.get(0), offset); arc.setStrict(strict); // We must parse headers if we need to skip them. if (format.equals(NOHEAD) || format.equals(HEADER)) { parse = true; } arc.setParseHttpHeaders(parse); outputRecord(arc, format); } else { for (Iterator i = cmdlineArgs.iterator(); i.hasNext();) { String urlOrPath = (String) i.next(); try { ARCReader r = ARCReaderFactory.get(urlOrPath); r.setStrict(strict); r.setParseHttpHeaders(parse); r.setDigest(digest); output(r, format); } catch (RuntimeException e) { // Write out name of file we failed on to help with // debugging. Then print stack trace and try to keep // going. We do this for case where we're being fed // a bunch of ARCs; just note the bad one and move // on to the next. System.err.println("Exception processing " + urlOrPath + ": " + e.getMessage()); e.printStackTrace(System.err); System.exit(1); } } } }
From source file:edu.umd.cloud9.example.bigram.AnalyzeBigramCount.java
@SuppressWarnings({ "static-access" }) public static void main(String[] args) { Options options = new Options(); options.addOption(OptionBuilder.withArgName("path").hasArg().withDescription("input path").create(INPUT)); CommandLine cmdline = null;/* ww w .j a v a2 s. c o m*/ CommandLineParser parser = new GnuParser(); try { cmdline = parser.parse(options, args); } catch (ParseException exp) { System.err.println("Error parsing command line: " + exp.getMessage()); System.exit(-1); } if (!cmdline.hasOption(INPUT)) { System.out.println("args: " + Arrays.toString(args)); HelpFormatter formatter = new HelpFormatter(); formatter.setWidth(120); formatter.printHelp(AnalyzeBigramCount.class.getName(), options); ToolRunner.printGenericCommandUsage(System.out); System.exit(-1); } String inputPath = cmdline.getOptionValue(INPUT); System.out.println("input path: " + inputPath); List<PairOfWritables<Text, IntWritable>> bigrams = SequenceFileUtils.readDirectory(new Path(inputPath)); Collections.sort(bigrams, new Comparator<PairOfWritables<Text, IntWritable>>() { public int compare(PairOfWritables<Text, IntWritable> e1, PairOfWritables<Text, IntWritable> e2) { if (e2.getRightElement().compareTo(e1.getRightElement()) == 0) { return e1.getLeftElement().compareTo(e2.getLeftElement()); } return e2.getRightElement().compareTo(e1.getRightElement()); } }); int singletons = 0; int sum = 0; for (PairOfWritables<Text, IntWritable> bigram : bigrams) { sum += bigram.getRightElement().get(); if (bigram.getRightElement().get() == 1) { singletons++; } } System.out.println("total number of unique bigrams: " + bigrams.size()); System.out.println("total number of bigrams: " + sum); System.out.println("number of bigrams that appear only once: " + singletons); System.out.println("\nten most frequent bigrams: "); Iterator<PairOfWritables<Text, IntWritable>> iter = Iterators.limit(bigrams.iterator(), 10); while (iter.hasNext()) { PairOfWritables<Text, IntWritable> bigram = iter.next(); System.out.println(bigram.getLeftElement() + "\t" + bigram.getRightElement()); } }