List of usage examples for java.lang Exception Exception
public Exception(Throwable cause)
From source file:edu.msu.cme.rdp.classifier.ClassifierCmd.java
/** * This is the main method to do classification. * <p>Usage: java ClassifierCmd queryFile outputFile [property file]. * <br>/*from ww w. j a va2 s . co m*/ * queryFile can be one of the following formats: Fasta, Genbank and EMBL. * <br> * outputFile will be used to save the classification output. * <br> * property file contains the mapping of the training files. * <br> * Note: the training files and the property file should be in the same directory. * The default property file is set to data/classifier/16srrna/rRNAClassifier.properties. */ public static void main(String[] args) throws Exception { String queryFile = null; String outputFile = null; String propFile = null; String gene = null; ClassificationResultFormatter.FORMAT format = CmdOptions.DEFAULT_FORMAT; int min_bootstrap_words = Classifier.MIN_BOOTSTRSP_WORDS; try { CommandLine line = new PosixParser().parse(options, args); if (line.hasOption(CmdOptions.OUTFILE_SHORT_OPT)) { outputFile = line.getOptionValue(CmdOptions.OUTFILE_SHORT_OPT); } else { throw new Exception("outputFile must be specified"); } if (line.hasOption(CmdOptions.TRAINPROPFILE_SHORT_OPT)) { if (gene != null) { throw new IllegalArgumentException( "Already specified the gene from the default location. Can not specify train_propfile"); } else { propFile = line.getOptionValue(CmdOptions.TRAINPROPFILE_SHORT_OPT); } } if (line.hasOption(CmdOptions.FORMAT_SHORT_OPT)) { String f = line.getOptionValue(CmdOptions.FORMAT_SHORT_OPT); if (f.equalsIgnoreCase("allrank")) { format = ClassificationResultFormatter.FORMAT.allRank; } else if (f.equalsIgnoreCase("fixrank")) { format = ClassificationResultFormatter.FORMAT.fixRank; } else if (f.equalsIgnoreCase("filterbyconf")) { format = ClassificationResultFormatter.FORMAT.filterbyconf; } else if (f.equalsIgnoreCase("db")) { format = ClassificationResultFormatter.FORMAT.dbformat; } else { throw new IllegalArgumentException( "Not valid output format, only allrank, fixrank, filterbyconf and db allowed"); } } if (line.hasOption(CmdOptions.GENE_SHORT_OPT)) { if (propFile != null) { throw new IllegalArgumentException( "Already specified train_propfile. Can not specify gene any more"); } gene = line.getOptionValue(CmdOptions.GENE_SHORT_OPT).toLowerCase(); if (!gene.equals(ClassifierFactory.RRNA_16S_GENE) && !gene.equals(ClassifierFactory.FUNGALLSU_GENE)) { throw new IllegalArgumentException(gene + " is NOT valid, only allows " + ClassifierFactory.RRNA_16S_GENE + " and " + ClassifierFactory.FUNGALLSU_GENE); } } if (line.hasOption(CmdOptions.MIN_BOOTSTRAP_WORDS_SHORT_OPT)) { min_bootstrap_words = Integer .parseInt(line.getOptionValue(CmdOptions.MIN_BOOTSTRAP_WORDS_SHORT_OPT)); if (min_bootstrap_words < Classifier.MIN_BOOTSTRSP_WORDS) { throw new IllegalArgumentException(CmdOptions.MIN_BOOTSTRAP_WORDS_LONG_OPT + " must be at least " + Classifier.MIN_BOOTSTRSP_WORDS); } } args = line.getArgs(); if (args.length != 1) { throw new Exception("Expect one query file"); } queryFile = args[0]; } catch (Exception e) { System.out.println("Command Error: " + e.getMessage()); new HelpFormatter().printHelp(120, "ClassifierCmd [options] <samplefile>\nNote this is the legacy command for one sample classification ", "", options, ""); return; } if (propFile == null && gene == null) { gene = CmdOptions.DEFAULT_GENE; } ClassifierCmd classifierCmd = new ClassifierCmd(); printLicense(); classifierCmd.doClassify(queryFile, outputFile, propFile, format, gene, min_bootstrap_words); }
From source file:com.yahoo.storm.yarn.MasterServer.java
@SuppressWarnings("unchecked") public static void main(String[] args) throws Exception { LOG.info("Starting the AM!!!!"); Options opts = new Options(); opts.addOption("app_attempt_id", true, "App Attempt ID. Not to be used " + "unless for testing purposes"); CommandLine cl = new GnuParser().parse(opts, args); ApplicationAttemptId appAttemptID;/*from w w w .j av a 2s.co m*/ Map<String, String> envs = System.getenv(); if (cl.hasOption("app_attempt_id")) { String appIdStr = cl.getOptionValue("app_attempt_id", ""); appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr); } else if (envs.containsKey(ApplicationConstants.Environment.CONTAINER_ID.name())) { ContainerId containerId = ConverterUtils .toContainerId(envs.get(ApplicationConstants.Environment.CONTAINER_ID.name())); appAttemptID = containerId.getApplicationAttemptId(); LOG.info("appAttemptID from env:" + appAttemptID.toString()); } else { LOG.error("appAttemptID is not specified for storm master"); throw new Exception("appAttemptID is not specified for storm master"); } @SuppressWarnings("rawtypes") Map storm_conf = Config.readStormConfig(null); Util.rmNulls(storm_conf); YarnConfiguration hadoopConf = new YarnConfiguration(); final String host = InetAddress.getLocalHost().getHostName(); storm_conf.put("nimbus.host", host); StormAMRMClient rmClient = new StormAMRMClient(appAttemptID, storm_conf, hadoopConf); rmClient.init(hadoopConf); rmClient.start(); BlockingQueue<Container> launcherQueue = new LinkedBlockingQueue<Container>(); MasterServer server = new MasterServer(storm_conf, rmClient); try { final int port = Utils.getInt(storm_conf.get(Config.MASTER_THRIFT_PORT)); final String target = host + ":" + port; InetSocketAddress addr = NetUtils.createSocketAddr(target); RegisterApplicationMasterResponse resp = rmClient.registerApplicationMaster(addr.getHostName(), port, null); LOG.info("Got a registration response " + resp); LOG.info("Max Capability " + resp.getMaximumResourceCapability()); rmClient.setMaxResource(resp.getMaximumResourceCapability()); LOG.info("Starting HB thread"); server.initAndStartHeartbeat(rmClient, launcherQueue, (Integer) storm_conf.get(Config.MASTER_HEARTBEAT_INTERVAL_MILLIS)); LOG.info("Starting launcher"); initAndStartLauncher(rmClient, launcherQueue); rmClient.startAllSupervisors(); LOG.info("Starting Master Thrift Server"); server.serve(); LOG.info("StormAMRMClient::unregisterApplicationMaster"); rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "AllDone", null); } finally { if (server.isServing()) { LOG.info("Stop Master Thrift Server"); server.stop(); } LOG.info("Stop RM client"); rmClient.stop(); } System.exit(0); }
From source file:com.example.geomesa.kafka08.KafkaLoadTester.java
public static void main(String[] args) throws Exception { // read command line args for a connection to Kafka CommandLineParser parser = new BasicParser(); Options options = getCommonRequiredOptions(); CommandLine cmd = parser.parse(options, args); String visibility = getVisibility(cmd); if (visibility == null) { System.out.println("visibility: null"); } else {// w w w. ja v a 2 s . c o m System.out.println("visibility: '" + visibility + "'"); } // create the producer and consumer KafkaDataStore objects Map<String, String> dsConf = getKafkaDataStoreConf(cmd); System.out.println("KDS config: " + dsConf); dsConf.put("isProducer", "true"); DataStore producerDS = DataStoreFinder.getDataStore(dsConf); dsConf.put("isProducer", "false"); DataStore consumerDS = DataStoreFinder.getDataStore(dsConf); // verify that we got back our KafkaDataStore objects properly if (producerDS == null) { throw new Exception("Null producer KafkaDataStore"); } if (consumerDS == null) { throw new Exception("Null consumer KafkaDataStore"); } // create the schema which creates a topic in Kafka // (only needs to be done once) final String sftName = "KafkaStressTest"; final String sftSchema = "name:String,age:Int,step:Double,lat:Double,dtg:Date,*geom:Point:srid=4326"; SimpleFeatureType sft = SimpleFeatureTypes.createType(sftName, sftSchema); // set zkPath to default if not specified String zkPath = (dsConf.get(ZK_PATH) == null) ? "/geomesa/ds/kafka" : dsConf.get(ZK_PATH); SimpleFeatureType preppedOutputSft = KafkaDataStoreHelper.createStreamingSFT(sft, zkPath); // only create the schema if it hasn't been created already if (!Arrays.asList(producerDS.getTypeNames()).contains(sftName)) producerDS.createSchema(preppedOutputSft); System.out.println("Register KafkaDataStore in GeoServer (Press enter to continue)"); System.in.read(); // the live consumer must be created before the producer writes features // in order to read streaming data. // i.e. the live consumer will only read data written after its instantiation SimpleFeatureStore producerFS = (SimpleFeatureStore) producerDS.getFeatureSource(sftName); SimpleFeatureSource consumerFS = consumerDS.getFeatureSource(sftName); // creates and adds SimpleFeatures to the producer every 1/5th of a second System.out.println("Writing features to Kafka... refresh GeoServer layer preview to see changes"); SimpleFeatureBuilder builder = new SimpleFeatureBuilder(sft); Integer numFeats = getLoad(cmd); System.out.println("Building a list of " + numFeats + " SimpleFeatures."); List<SimpleFeature> features = IntStream.range(1, numFeats) .mapToObj(i -> createFeature(builder, i, visibility)).collect(Collectors.toList()); // set variables to estimate feature production rate Long startTime = null; Long featuresSinceStartTime = 0L; int cycle = 0; int cyclesToSkip = 50000 / numFeats; // collect enough features // to get an accurate rate estimate while (true) { // write features features.forEach(feat -> { try { DefaultFeatureCollection featureCollection = new DefaultFeatureCollection(); featureCollection.add(feat); producerFS.addFeatures(featureCollection); } catch (Exception e) { System.out.println("Caught an exception while writing features."); e.printStackTrace(); } updateFeature(feat); }); // count features written Integer consumerSize = consumerFS.getFeatures().size(); cycle++; featuresSinceStartTime += consumerSize; System.out.println("At " + new Date() + " wrote " + consumerSize + " features"); // if we've collected enough features, calculate the rate if (cycle >= cyclesToSkip || startTime == null) { Long endTime = System.currentTimeMillis(); if (startTime != null) { Long diffTime = endTime - startTime; Double rate = (featuresSinceStartTime.doubleValue() * 1000.0) / diffTime.doubleValue(); System.out.printf("%.1f feats/sec (%d/%d)\n", rate, featuresSinceStartTime, diffTime); } cycle = 0; startTime = endTime; featuresSinceStartTime = 0L; } } }
From source file:edu.msu.cme.rdp.framebot.stat.TaxonAbundance.java
/** * this class group the nearest matches by phylum/class, or by match * @param args/*w ww .j a v a2s . com*/ * @throws Exception */ public static void main(String[] args) throws Exception { HashMap<String, Double> coveragetMap = null; double identity = 0.0; try { CommandLine line = new PosixParser().parse(options, args); if (line.hasOption("seqCoverage")) { String coveragefile = line.getOptionValue("seqCoverage"); coveragetMap = parseKmerCoverage(coveragefile); } if (line.hasOption("identity")) { identity = Double.parseDouble(line.getOptionValue("identity")); if (identity < 0 || identity > 100) { throw new IllegalArgumentException("identity cutoff should be in the range of 0 and 100"); } } args = line.getArgs(); if (args.length != 3) { throw new Exception(""); } } catch (Exception e) { System.out.println("Command Error: " + e.getMessage()); new HelpFormatter().printHelp(80, "[options] <FrameBot Alignment file or Dir> <seqLineage> <out file> ", "", options, "seqLineage: a tab-delimited file with ref seqID and lineage, or fasta of ref seq with lineage as the descrption" + "\nframeBot alignment file or Dir: frameBot alignment files " + "\noutfile: output with the nearest match count group by phylum/class; and by match name"); } TaxonAbundance.mapAbundance(new File(args[0]), new File(args[1]), args[2], coveragetMap, identity); }
From source file:edu.msu.cme.rdp.classifier.train.ClassifierTraineeMaker.java
/** This is the main method to create training files from raw taxonomic information. * <p>//from w ww . j a va2 s .co m * Usage: java ClassifierTraineeMaker tax_file rawseq.fa trainsetNo version version_modification output_directory. * See the ClassifierTraineeMaker constructor for more detail. * @param args * @throws FileNotFoundException * @throws IOException */ public static void main(String[] args) throws FileNotFoundException, IOException { String taxFile; String cnFile = null; String seqFile; int trainset_no = 1; String version = null; String modification = null; String outdir = null; try { CommandLine line = new PosixParser().parse(options, args); if (line.hasOption("t")) { taxFile = line.getOptionValue("t"); } else { throw new Exception("taxon file must be specified"); } if (line.hasOption("c")) { cnFile = line.getOptionValue("c"); } if (line.hasOption("s")) { seqFile = line.getOptionValue("s"); } else { throw new Exception("seq file must be specified"); } if (line.hasOption("n")) { try { trainset_no = Integer.parseInt(line.getOptionValue("n")); } catch (NumberFormatException ex) { throw new IllegalArgumentException("trainset_no needs to be an integer."); } } if (line.hasOption("o")) { outdir = line.getOptionValue("o"); } else { throw new Exception("output directory must be specified"); } if (line.hasOption("v")) { version = line.getOptionValue("v"); } if (line.hasOption("m")) { modification = line.getOptionValue("m"); } } catch (Exception e) { System.out.println("Command Error: " + e.getMessage()); new HelpFormatter().printHelp(120, "train", "", options, "", true); return; } ClassifierTraineeMaker maker = new ClassifierTraineeMaker(taxFile, seqFile, cnFile, trainset_no, version, modification, outdir); }
From source file:edu.msu.cme.rdp.kmer.cli.FastKmerFilter.java
public static void main(String[] args) throws Exception { final KmerSet<Set<RefKmer>> kmerSet; final SeqReader queryReader; final SequenceType querySeqType; final File queryFile; final KmerStartsWriter out; final boolean translQuery; final int wordSize; final int translTable; final boolean alignedSeqs; final List<String> refLabels = new ArrayList(); final int maxThreads; final int trieWordSize; try {//from ww w.jav a2s . com CommandLine cmdLine = new PosixParser().parse(options, args); args = cmdLine.getArgs(); if (args.length < 3) { throw new Exception("Unexpected number of arguments"); } if (cmdLine.hasOption("out")) { out = new KmerStartsWriter(cmdLine.getOptionValue("out")); } else { out = new KmerStartsWriter(System.out); } if (cmdLine.hasOption("aligned")) { alignedSeqs = true; } else { alignedSeqs = false; } if (cmdLine.hasOption("transl-table")) { translTable = Integer.valueOf(cmdLine.getOptionValue("transl-table")); } else { translTable = 11; } if (cmdLine.hasOption("threads")) { maxThreads = Integer.valueOf(cmdLine.getOptionValue("threads")); } else { maxThreads = Runtime.getRuntime().availableProcessors(); } queryFile = new File(args[1]); wordSize = Integer.valueOf(args[0]); SequenceType refSeqType = null; querySeqType = SeqUtils.guessSequenceType(queryFile); queryReader = new SequenceReader(queryFile); if (querySeqType == SequenceType.Protein) { throw new Exception("Expected nucl query sequences"); } refSeqType = SeqUtils .guessSequenceType(new File(args[2].contains("=") ? args[2].split("=")[1] : args[2])); translQuery = refSeqType == SequenceType.Protein; if (translQuery && wordSize % 3 != 0) { throw new Exception("Word size must be a multiple of 3 for nucl ref seqs"); } if (translQuery) { trieWordSize = wordSize / 3; } else { trieWordSize = wordSize; } kmerSet = new KmerSet<Set<RefKmer>>();//new KmerTrie(trieWordSize, translQuery); for (int index = 2; index < args.length; index++) { String refName; String refFileName = args[index]; if (refFileName.contains("=")) { String[] lexemes = refFileName.split("="); refName = lexemes[0]; refFileName = lexemes[1]; } else { String tmpName = new File(refFileName).getName(); if (tmpName.contains(".")) { refName = tmpName.substring(0, tmpName.lastIndexOf(".")); } else { refName = tmpName; } } File refFile = new File(refFileName); if (refSeqType != SeqUtils.guessSequenceType(refFile)) { throw new Exception( "Reference file " + refFile + " contains " + SeqUtils.guessFileFormat(refFile) + " sequences but expected " + refSeqType + " sequences"); } SequenceReader seqReader = new SequenceReader(refFile); Sequence seq; while ((seq = seqReader.readNextSequence()) != null) { if (seq.getSeqName().startsWith("#")) { continue; } KmerGenerator kmers; try { if (translQuery) { //protein ref kmers = new ProtKmerGenerator(seq.getSeqString(), trieWordSize, alignedSeqs); } else { kmers = new NuclKmerGenerator(seq.getSeqString(), trieWordSize, alignedSeqs); } while (kmers.hasNext()) { Kmer temp = kmers.next(); long[] next = temp.getLongKmers(); Set<RefKmer> refKmers = kmerSet.get(next); if (refKmers == null) { refKmers = new HashSet(); kmerSet.add(next, refKmers); } RefKmer kmerRef = new RefKmer(); kmerRef.modelPos = kmers.getPosition(); kmerRef.refFileIndex = refLabels.size(); kmerRef.refSeqid = seq.getSeqName(); refKmers.add(kmerRef); } } catch (IllegalArgumentException ex) { //System.err.println(seq.getSeqName()+ " " + ex.getMessage()); } } seqReader.close(); refLabels.add(refName); } } catch (Exception e) { new HelpFormatter().printHelp( "KmerSearch <kmerSize> <query_file> [name=]<ref_file> ...\nkmerSize should be multiple of 3, (recommend 45, minimum 30, maximum 63) ", options); e.printStackTrace(); System.exit(1); throw new RuntimeException("Stupid jvm"); //While this will never get thrown it is required to make sure javac doesn't get confused about uninitialized variables } long startTime = System.currentTimeMillis(); long seqCount = 0; final int maxTasks = 25000; System.err.println("Starting kmer mapping at " + new Date()); System.err.println("* Number of threads: " + maxThreads); System.err.println("* References: " + refLabels); System.err.println("* Reads file: " + queryFile); System.err.println("* Kmer length: " + trieWordSize); System.err.println("* Kmer Refset Size: " + kmerSet.size()); final AtomicInteger processed = new AtomicInteger(); final AtomicInteger outstandingTasks = new AtomicInteger(); ExecutorService service = Executors.newFixedThreadPool(maxThreads); Sequence querySeq; while ((querySeq = queryReader.readNextSequence()) != null) { seqCount++; String seqString = querySeq.getSeqString(); if ((!translQuery && seqString.length() < wordSize) || (translQuery && seqString.length() < wordSize + 2)) { //System.err.println(querySeq.getSeqName() + "\t" + seqString.length()); continue; } final Sequence threadSeq = querySeq; Runnable r = new Runnable() { public void run() { try { processSeq(threadSeq, refLabels, kmerSet, out, wordSize, translQuery, translTable, false); processSeq(threadSeq, refLabels, kmerSet, out, wordSize, translQuery, translTable, true); processed.incrementAndGet(); outstandingTasks.decrementAndGet(); } catch (Exception e) { e.printStackTrace(); } } }; outstandingTasks.incrementAndGet(); service.submit(r); while (outstandingTasks.get() >= maxTasks) ; if ((processed.get() + 1) % 1000000 == 0) { System.err.println("Processed " + processed + " sequences in " + (System.currentTimeMillis() - startTime) + " ms"); } } service.shutdown(); service.awaitTermination(1, TimeUnit.DAYS); System.err.println("Finished Processed " + processed + " sequences in " + (System.currentTimeMillis() - startTime) + " ms"); out.close(); }
From source file:edu.cmu.lti.oaqa.knn4qa.apps.BuildRetrofitLexicons.java
public static void main(String[] args) { Options options = new Options(); options.addOption(CommonParams.GIZA_ROOT_DIR_PARAM, null, true, CommonParams.GIZA_ROOT_DIR_DESC); options.addOption(CommonParams.GIZA_ITER_QTY_PARAM, null, true, CommonParams.GIZA_ITER_QTY_DESC); options.addOption(CommonParams.MEMINDEX_PARAM, null, true, CommonParams.MEMINDEX_DESC); options.addOption(OUT_FILE_PARAM, null, true, OUT_FILE_DESC); options.addOption(MIN_PROB_PARAM, null, true, MIN_PROB_DESC); options.addOption(FORMAT_PARAM, null, true, FORMAT_DESC); CommandLineParser parser = new org.apache.commons.cli.GnuParser(); try {/* w ww. j a v a 2s.c om*/ CommandLine cmd = parser.parse(options, args); String gizaRootDir = cmd.getOptionValue(CommonParams.GIZA_ROOT_DIR_PARAM); int gizaIterQty = -1; if (cmd.hasOption(CommonParams.GIZA_ITER_QTY_PARAM)) { gizaIterQty = Integer.parseInt(cmd.getOptionValue(CommonParams.GIZA_ITER_QTY_PARAM)); } else { Usage("Specify: " + CommonParams.GIZA_ITER_QTY_PARAM, options); } String outFileName = cmd.getOptionValue(OUT_FILE_PARAM); if (null == outFileName) { Usage("Specify: " + OUT_FILE_PARAM, options); } String indexDir = cmd.getOptionValue(CommonParams.MEMINDEX_PARAM); if (null == indexDir) { Usage("Specify: " + CommonParams.MEMINDEX_DESC, options); } FormatType outType = FormatType.kOrig; String outTypeStr = cmd.getOptionValue(FORMAT_PARAM); if (null != outTypeStr) { if (outTypeStr.equals(ORIG_TYPE)) { outType = FormatType.kOrig; } else if (outTypeStr.equals(WEIGHTED_TYPE)) { outType = FormatType.kWeighted; } else if (outTypeStr.equals(UNWEIGHTED_TYPE)) { outType = FormatType.kUnweighted; } else { Usage("Unknown format type: " + outTypeStr, options); } } float minProb = 0; if (cmd.hasOption(MIN_PROB_PARAM)) { minProb = Float.parseFloat(cmd.getOptionValue(MIN_PROB_PARAM)); } else { Usage("Specify: " + MIN_PROB_PARAM, options); } System.out.println(String.format( "Saving lexicon to '%s' (output format '%s'), keep only entries with translation probability >= %f", outFileName, outType.toString(), minProb)); // We use unlemmatized text here, because lemmatized dictionary is going to be mostly subset of the unlemmatized one. InMemForwardIndex textIndex = new InMemForwardIndex(FeatureExtractor.indexFileName(indexDir, FeatureExtractor.mFieldNames[FeatureExtractor.TEXT_UNLEMM_FIELD_ID])); InMemForwardIndexFilterAndRecoder filterAndRecoder = new InMemForwardIndexFilterAndRecoder(textIndex); String prefix = gizaRootDir + "/" + FeatureExtractor.mFieldNames[FeatureExtractor.TEXT_UNLEMM_FIELD_ID] + "/"; GizaVocabularyReader answVoc = new GizaVocabularyReader(prefix + "source.vcb", filterAndRecoder); GizaVocabularyReader questVoc = new GizaVocabularyReader(prefix + "target.vcb", filterAndRecoder); GizaTranTableReaderAndRecoder gizaTable = new GizaTranTableReaderAndRecoder(false, // we don't need to flip the table for the purpose prefix + "/output.t1." + gizaIterQty, filterAndRecoder, answVoc, questVoc, (float) FeatureExtractor.DEFAULT_PROB_SELF_TRAN, minProb); BufferedWriter outFile = new BufferedWriter(new FileWriter(outFileName)); for (int srcWordId = 0; srcWordId <= textIndex.getMaxWordId(); ++srcWordId) { GizaOneWordTranRecs tranRecs = gizaTable.getTranProbs(srcWordId); if (null != tranRecs) { String wordSrc = textIndex.getWord(srcWordId); StringBuffer sb = new StringBuffer(); sb.append(wordSrc); for (int k = 0; k < tranRecs.mDstIds.length; ++k) { float prob = tranRecs.mProbs[k]; if (prob >= minProb) { int dstWordId = tranRecs.mDstIds[k]; if (dstWordId == srcWordId && outType != FormatType.kWeighted) continue; // Don't duplicate the word, unless it's probability weighted sb.append(' '); String dstWord = textIndex.getWord(dstWordId); if (null == dstWord) { throw new Exception( "Bug or inconsistent data: Couldn't retriev a word for wordId = " + dstWordId); } if (dstWord.indexOf(':') >= 0) throw new Exception( "Illegal dictionary word '" + dstWord + "' b/c it contains ':'"); sb.append(dstWord); if (outType != FormatType.kOrig) { sb.append(':'); sb.append(outType == FormatType.kWeighted ? prob : 1); } } } outFile.write(sb.toString()); outFile.newLine(); } } outFile.close(); } catch (ParseException e) { e.printStackTrace(); Usage("Cannot parse arguments", options); } catch (Exception e) { e.printStackTrace(); System.err.println("Terminating due to an exception: " + e); System.exit(1); } System.out.println("Terminated successfully!"); }
From source file:com.hpe.nv.samples.basic.BasicComparisonWithoutNV.java
public static void main(String[] args) throws Exception { try {//from w ww . j av a 2 s . c om // program arguments Options options = new Options(); options.addOption("i", "server-ip", true, "[mandatory] NV Test Manager IP"); options.addOption("o", "server-port", true, "[mandatory] NV Test Manager port"); options.addOption("u", "username", true, "[mandatory] NV username"); options.addOption("w", "password", true, "[mandatory] NV password"); options.addOption("e", "ssl", true, "[optional] Pass true to use SSL. Default: false"); options.addOption("y", "proxy", true, "[optional] Proxy server host:port"); options.addOption("a", "active-adapter-ip", true, "[optional] Active adapter IP. Default: --server-ip argument"); options.addOption("t", "site-url", true, "[optional] Site under test URL. Default: HPE Network Virtualization site URL. If you change this value, make sure to change the --xpath argument too"); options.addOption("x", "xpath", true, "[optional] Parameter for ExpectedConditions.visibilityOfElementLocated(By.xpath(...)) method. Use an xpath expression of some element in the site. Default: //div[@id='content']"); options.addOption("k", "analysis-ports", true, "[optional] A comma-separated list of ports for test analysis"); options.addOption("b", "browser", true, "[optional] The browser for which the Selenium WebDriver is built. Possible values: Chrome, Firefox. Default: Firefox"); options.addOption("d", "debug", true, "[optional] Pass true to view console debug messages during execution. Default: false"); options.addOption("h", "help", false, "[optional] Generates and prints help information"); // parse and validate the command line arguments CommandLineParser parser = new DefaultParser(); CommandLine line = parser.parse(options, args); if (line.hasOption("help")) { // print help if help argument is passed HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("BasicComparisonWithoutNV.java", options); return; } if (line.hasOption("server-ip")) { serverIp = line.getOptionValue("server-ip"); if (serverIp.equals("0.0.0.0")) { throw new Exception( "Please replace the server IP argument value (0.0.0.0) with your NV Test Manager IP"); } } else { throw new Exception("Missing argument -i/--server-ip <serverIp>"); } if (line.hasOption("server-port")) { serverPort = Integer.parseInt(line.getOptionValue("server-port")); } else { throw new Exception("Missing argument -o/--server-port <serverPort>"); } if (line.hasOption("username")) { username = line.getOptionValue("username"); } else { throw new Exception("Missing argument -u/--username <username>"); } if (line.hasOption("password")) { password = line.getOptionValue("password"); } else { throw new Exception("Missing argument -w/--password <password>"); } if (line.hasOption("ssl")) { ssl = Boolean.parseBoolean(line.getOptionValue("ssl")); } if (line.hasOption("site-url")) { siteUrl = line.getOptionValue("site-url"); } else { siteUrl = "http://www8.hp.com/us/en/software-solutions/network-virtualization/index.html"; } if (line.hasOption("xpath")) { xpath = line.getOptionValue("xpath"); } else { xpath = "//div[@id='content']"; } if (line.hasOption("proxy")) { proxySetting = line.getOptionValue("proxy"); } if (line.hasOption("active-adapter-ip")) { activeAdapterIp = line.getOptionValue("active-adapter-ip"); } else { activeAdapterIp = serverIp; } if (line.hasOption("analysis-ports")) { String analysisPortsStr = line.getOptionValue("analysis-ports"); analysisPorts = analysisPortsStr.split(","); } else { analysisPorts = new String[] { "80", "8080" }; } if (line.hasOption("browser")) { browser = line.getOptionValue("browser"); } else { browser = "Firefox"; } if (line.hasOption("debug")) { debug = Boolean.parseBoolean(line.getOptionValue("debug")); } String newLine = System.getProperty("line.separator"); String testDescription = "*** This sample demonstrates how NV helps you test your application under various network conditions. ***" + newLine + "*** This test starts by navigating to the home page in the HPE Network Virtualization website using the Selenium WebDriver. ***" + newLine + "*** This initial step runs without NV emulation and provides a basis for comparison. ***" + newLine + "*** ***" + newLine + "*** Next, the sample starts an NV test configured with a \"3G Busy\" network scenario. ***" + newLine + "*** The same step runs as before - navigating to the home page in the HPE Network Virtualization website - but this time, ***" + newLine + "*** it does so over an emulated \"3G Busy\" network as part of an NV transaction. ***" + newLine + "*** ***" + newLine + "*** When the sample finishes running, it prints a summary to the console. This summary displays a comparison of the time ***" + newLine + "*** it took to navigate to the site both with and without NV's network emulation. The results show that the slow \"3G Busy\" ***" + newLine + "*** network increases the time it takes to navigate to the site, as you would expect. ***" + newLine + "*** ***" + newLine + "*** You can view the actual steps of this sample in the BasicComparisonWithoutNV.java file. ***" + newLine; // print the sample's description System.out.println(testDescription); // start console spinner if (!debug) { spinner = new Thread(new Spinner()); spinner.start(); } // sample execution steps /***** Part 1 - Navigate to the site without using NV emulation *****/ printPartDescription("\b------ Part 1 - Navigate to the site without using NV emulation"); buildSeleniumWebDriver(); startNoNV = System.currentTimeMillis(); seleniumNavigateToPage(); stopNoNV = System.currentTimeMillis(); driverCloseAndQuit(); printPartSeparator(); /***** Part 2 - Navigate to the site using NV "3G Busy" network scenario emulation *****/ printPartDescription( "------ Part 2 - Navigate to the site using NV \"3G Busy\" network scenario emulation"); initTestManager(); setActiveAdapter(); startBusyTest(); testRunning = true; connectToTransactionManager(); startTransaction(); transactionInProgress = true; buildSeleniumWebDriver(); seleniumNavigateToPage(); stopTransaction(); transactionInProgress = false; driverCloseAndQuit(); stopTest(); testRunning = false; printPartSeparator(); /***** Part 3 - Analyze the NV test and print the results to the console *****/ printPartDescription("------ Part 3 - Analyze the NV test and print the results to the console"); analyzeTestJson(); printPartSeparator(); doneCallback(); } catch (Exception e) { try { handleError(e.getMessage()); } catch (Exception e2) { System.out.println("Error occurred: " + e2.getMessage()); } } }
From source file:org.silverpeas.dbbuilder.DBBuilder.java
/** * @param args//from ww w . j a v a 2s . c o m * @see */ public static void main(String[] args) { ClassPathXmlApplicationContext springContext = new ClassPathXmlApplicationContext( "classpath:/spring-jdbc-datasource.xml"); try { // Ouverture des traces Date startDate = new Date(); System.out.println( MessageFormat.format(messages.getString("dbbuilder.start"), DBBuilderAppVersion, startDate)); console = new Console(DBBuilder.class); console.printMessage("*************************************************************"); console.printMessage( MessageFormat.format(messages.getString("dbbuilder.start"), DBBuilderAppVersion, startDate)); // Lecture des variables d'environnement partir de dbBuilderSettings dbBuilderResources = FileUtil .loadResource("/org/silverpeas/dbBuilder/settings/dbBuilderSettings.properties"); // Lecture des paramtres d'entre params = new CommandLineParameters(console, args); if (params.isSimulate() && DatabaseType.ORACLE == params.getDbType()) { throw new Exception(messages.getString("oracle.simulate.error")); } console.printMessage(messages.getString("jdbc.connection.configuration")); console.printMessage(ConnectionFactory.getConnectionInfo()); console.printMessage("\tAction : " + params.getAction()); console.printMessage("\tVerbose mode : " + params.isVerbose()); console.printMessage("\tSimulate mode : " + params.isSimulate()); if (Action.ACTION_CONNECT == params.getAction()) { // un petit message et puis c'est tout console.printMessage(messages.getString("connection.success")); System.out.println(messages.getString("connection.success")); } else { // Modules en place sur la BD avant install console.printMessage("DB Status before build :"); List<String> packagesIntoDB = checkDBStatus(); // initialisation d'un vecteur des instructions SQL passer en fin d'upgrade // pour mettre niveau les versions de modules en base MetaInstructions sqlMetaInstructions = new MetaInstructions(); File dirXml = new File(params.getDbType().getDBContributionDir()); DBXmlDocument destXml = loadMasterContribution(dirXml); UninstallInformations processesToCacheIntoDB = new UninstallInformations(); File[] listeFileXml = dirXml.listFiles(); Arrays.sort(listeFileXml); List<DBXmlDocument> listeDBXmlDocument = new ArrayList<DBXmlDocument>(listeFileXml.length); int ignoredFiles = 0; // Ouverture de tous les fichiers de configurations console.printMessage(messages.getString("ignored.contribution")); for (File xmlFile : listeFileXml) { if (xmlFile.isFile() && "xml".equals(FileUtil.getExtension(xmlFile)) && !(FIRST_DBCONTRIBUTION_FILE.equalsIgnoreCase(xmlFile.getName())) && !(MASTER_DBCONTRIBUTION_FILE.equalsIgnoreCase(xmlFile.getName()))) { DBXmlDocument fXml = new DBXmlDocument(dirXml, xmlFile.getName()); fXml.load(); // vrification des dpendances et prise en compte uniquement si dependences OK if (hasUnresolvedRequirements(listeFileXml, fXml)) { console.printMessage( '\t' + xmlFile.getName() + " (because of unresolved requirements)."); ignoredFiles++; } else if (ACTION_ENFORCE_UNINSTALL == params.getAction()) { console.printMessage('\t' + xmlFile.getName() + " (because of " + ACTION_ENFORCE_UNINSTALL + " mode)."); ignoredFiles++; } else { listeDBXmlDocument.add(fXml); } } } if (0 == ignoredFiles) { console.printMessage("\t(none)"); } // prpare une HashMap des modules prsents en fichiers de contribution Map packagesIntoFile = new HashMap(); int j = 0; console.printMessage(messages.getString("merged.contribution")); console.printMessage(params.getAction().toString()); if (ACTION_ENFORCE_UNINSTALL != params.getAction()) { console.printMessage('\t' + FIRST_DBCONTRIBUTION_FILE); j++; } for (DBXmlDocument currentDoc : listeDBXmlDocument) { console.printMessage('\t' + currentDoc.getName()); j++; } if (0 == j) { console.printMessage("\t(none)"); } // merge des diffrents fichiers de contribution ligibles : console.printMessage("Build decisions are :"); // d'abord le fichier dbbuilder-contribution ... DBXmlDocument fileXml; if (ACTION_ENFORCE_UNINSTALL != params.getAction()) { try { fileXml = new DBXmlDocument(dirXml, FIRST_DBCONTRIBUTION_FILE); fileXml.load(); } catch (Exception e) { // contribution de dbbuilder non trouve -> on continue, on est certainement en train // de desinstaller la totale fileXml = null; } if (null != fileXml) { DBBuilderFileItem dbbuilderItem = new DBBuilderFileItem(fileXml); packagesIntoFile.put(dbbuilderItem.getModule(), null); mergeActionsToDo(dbbuilderItem, destXml, processesToCacheIntoDB, sqlMetaInstructions); } } // ... puis les autres for (DBXmlDocument currentDoc : listeDBXmlDocument) { DBBuilderFileItem tmpdbbuilderItem = new DBBuilderFileItem(currentDoc); packagesIntoFile.put(tmpdbbuilderItem.getModule(), null); mergeActionsToDo(tmpdbbuilderItem, destXml, processesToCacheIntoDB, sqlMetaInstructions); } // ... et enfin les pices BD dsinstaller // ... attention, l'ordonnancement n'tant pas dispo, on les traite dans // l'ordre inverse pour faire passer busCore a la fin, de nombreuses contraintes // des autres modules referencant les PK de busCore List<String> itemsList = new ArrayList<String>(); boolean foundDBBuilder = false; for (String dbPackage : packagesIntoDB) { if (!packagesIntoFile.containsKey(dbPackage)) { // Package en base et non en contribution -> candidat desinstallation if (DBBUILDER_MODULE.equalsIgnoreCase(dbPackage)) { foundDBBuilder = true; } else if (ACTION_ENFORCE_UNINSTALL == params.getAction()) { if (dbPackage.equals(params.getModuleName())) { itemsList.add(0, dbPackage); } } else { itemsList.add(0, dbPackage); } } } if (foundDBBuilder) { if (ACTION_ENFORCE_UNINSTALL == params.getAction()) { if (DBBUILDER_MODULE.equals(params.getModuleName())) { itemsList.add(itemsList.size(), DBBUILDER_MODULE); } } else { itemsList.add(itemsList.size(), DBBUILDER_MODULE); } } for (String item : itemsList) { console.printMessage("**** Treating " + item + " ****"); DBBuilderDBItem tmpdbbuilderItem = new DBBuilderDBItem(item); mergeActionsToDo(tmpdbbuilderItem, destXml, processesToCacheIntoDB, sqlMetaInstructions); } destXml.setName("res.txt"); destXml.save(); console.printMessage("Build parts are :"); // Traitement des pices slectionnes // remarque : durant cette phase, les erreurs sont traites -> on les catche en // retour sans les retraiter if (ACTION_INSTALL == params.getAction()) { processDB(destXml, processesToCacheIntoDB, sqlMetaInstructions, TAGS_TO_MERGE_4_INSTALL); } else if (ACTION_UNINSTALL == params.getAction() || ACTION_ENFORCE_UNINSTALL == params.getAction()) { processDB(destXml, processesToCacheIntoDB, sqlMetaInstructions, TAGS_TO_MERGE_4_UNINSTALL); } else if (ACTION_OPTIMIZE == params.getAction()) { processDB(destXml, processesToCacheIntoDB, sqlMetaInstructions, TAGS_TO_MERGE_4_OPTIMIZE); } else if (ACTION_ALL == params.getAction()) { processDB(destXml, processesToCacheIntoDB, sqlMetaInstructions, TAGS_TO_MERGE_4_ALL); } // Modules en place sur la BD en final console.printMessage("Finally DB Status :"); checkDBStatus(); } Date endDate = new Date(); console.printMessage(MessageFormat.format(messages.getString("dbbuilder.success"), endDate)); System.out.println("*******************************************************************"); System.out.println(MessageFormat.format(messages.getString("dbbuilder.success"), endDate)); } catch (Exception e) { e.printStackTrace(); console.printError(e.getMessage(), e); Date endDate = new Date(); console.printError(MessageFormat.format(messages.getString("dbbuilder.failure"), endDate)); System.out.println("*******************************************************************"); System.out.println(MessageFormat.format(messages.getString("dbbuilder.failure"), endDate)); System.exit(1); } finally { springContext.close(); console.close(); } }
From source file:junkProducer.java
public static void main(String[] args) throws Exception { // read command line args for a connection to Kafka CommandLineParser parser = new BasicParser(); Options options = getCommonRequiredOptions(); CommandLine cmd = parser.parse(options, args); // create the producer and consumer KafkaDataStore objects Map<String, String> dsConf = getKafkaDataStoreConf(cmd); // dsConf.put("isProducer", "true"); DataStore producerDS = DataStoreFinder.getDataStore(dsConf); // verify that we got back our KafkaDataStore objects properly if (producerDS == null) { throw new Exception("Null producer KafkaDataStore"); }// w w w . j av a 2 s . c o m // create the schema which creates a topic in Kafka // (only needs to be done once) final String sftName = "junk"; final String sftSchema = "trainStatus:String,trainCode:String,publicMessage:String,direction:String,dtg:Date,*geom:Point:srid=4326"; SimpleFeatureType sft = SimpleFeatureTypes.createType(sftName, sftSchema); // set zkPath to default if not specified String zkPath = (dsConf.get(ZK_PATH) == null) ? "/geomesa/ds/kafka" : dsConf.get(ZK_PATH); SimpleFeatureType preppedOutputSft = KafkaDataStoreHelper.createStreamingSFT(sft, zkPath); // only create the schema if it hasn't been created already if (!Arrays.asList(producerDS.getTypeNames()).contains(sftName)) producerDS.createSchema(preppedOutputSft); // the live consumer must be created before the producer writes features // in order to read streaming data. // i.e. the live consumer will only read data written after its instantiation SimpleFeatureStore producerFS = (SimpleFeatureStore) producerDS.getFeatureSource(sftName); // creates and adds SimpleFeatures to the producer on an interval System.out.println("Writing features to Kafka... refresh GeoServer layer preview to see changes"); addSimpleFeatures(sft, producerFS); System.exit(0); }