Example usage for java.lang String endsWith

List of usage examples for java.lang String endsWith

Introduction

In this page you can find the example usage for java.lang String endsWith.

Prototype

public boolean endsWith(String suffix) 

Source Link

Document

Tests if this string ends with the specified suffix.

Usage

From source file:com.github.xmltopdf.JasperPdfGenerator.java

/**.
 * @param args//from   ww  w  .  ja v a  2 s  .  c  o  m
 *            the arguments
 * @throws IOException in case IO error
 */
public static void main(String[] args) throws IOException {
    if (args.length == 0) {
        LOG.info(null, USAGE);
        return;
    }
    List<String> templates = new ArrayList<String>();
    List<String> xmls = new ArrayList<String>();
    List<String> types = new ArrayList<String>();
    for (String arg : args) {
        if (arg.endsWith(".jrxml")) {
            templates.add(arg);
        } else if (arg.endsWith(".xml")) {
            xmls.add(arg);
        } else if (arg.startsWith(DOC_TYPE)) {
            types = Arrays
                    .asList(arg.substring(DOC_TYPE.length()).replaceAll("\\s+", "").toUpperCase().split(","));
        }
    }
    if (templates.isEmpty()) {
        LOG.info(null, USAGE);
        return;
    }
    if (types.isEmpty()) {
        types.add("PDF");
    }
    for (String type : types) {
        ByteArrayOutputStream os = new ByteArrayOutputStream();
        if (DocType.valueOf(type) != null) {
            new JasperPdfGenerator().createDocument(templates, xmls, os, DocType.valueOf(type));
            os.writeTo(
                    new FileOutputStream(templates.get(0).replaceFirst("\\.jrxml$", "." + type.toLowerCase())));
        }
    }
}

From source file:Main.java

public static void main(String[] args) throws Exception {

    String inName = "abc.pack.gz";
    String outName = "abc";

    Pack200.Unpacker unpacker = Pack200.newUnpacker();
    JarOutputStream out = new JarOutputStream(new FileOutputStream(outName));
    InputStream in = new FileInputStream(inName);
    if (inName.endsWith(".gz")) {
        in = new GZIPInputStream(in);
    }//from ww  w . j  av a2s  . c  o m

    unpacker.unpack(in, out);
    out.close();
}

From source file:com.grantingersoll.intell.index.Indexer.java

public static void main(String[] args) throws Exception {
    DefaultOptionBuilder obuilder = new DefaultOptionBuilder();
    ArgumentBuilder abuilder = new ArgumentBuilder();
    GroupBuilder gbuilder = new GroupBuilder();

    Option wikipediaFileOpt = obuilder.withLongName("wikiFile").withRequired(true)
            .withArgument(abuilder.withName("wikiFile").withMinimum(1).withMaximum(1).create())
            .withDescription(//from   w  w  w  .ja v a 2  s  .  c o m
                    "The path to the wikipedia dump file.  Maybe a directory containing wikipedia dump files."
                            + "  If a directory is specified, only .xml files are used.")
            .withShortName("w").create();

    Option numDocsOpt = obuilder.withLongName("numDocs").withRequired(false)
            .withArgument(abuilder.withName("numDocs").withMinimum(1).withMaximum(1).create())
            .withDescription("The number of docs to index").withShortName("n").create();

    Option solrURLOpt = obuilder.withLongName("solrURL").withRequired(false)
            .withArgument(abuilder.withName("solrURL").withMinimum(1).withMaximum(1).create())
            .withDescription("The URL where Solr lives").withShortName("s").create();

    Option solrBatchOpt = obuilder.withLongName("batch").withRequired(false)
            .withArgument(abuilder.withName("batch").withMinimum(1).withMaximum(1).create())
            .withDescription("The number of docs to include in each indexing batch").withShortName("b")
            .create();

    Group group = gbuilder.withName("Options").withOption(wikipediaFileOpt).withOption(numDocsOpt)
            .withOption(solrURLOpt).withOption(solrBatchOpt).create();

    Parser parser = new Parser();
    parser.setGroup(group);
    CommandLine cmdLine = parser.parse(args);

    File file;
    file = new File(cmdLine.getValue(wikipediaFileOpt).toString());
    File[] dumpFiles;
    if (file.isDirectory()) {
        dumpFiles = file.listFiles(new FilenameFilter() {
            public boolean accept(File file, String s) {
                return s.endsWith(".xml");
            }
        });
    } else {
        dumpFiles = new File[] { file };
    }

    int numDocs = Integer.MAX_VALUE;
    if (cmdLine.hasOption(numDocsOpt)) {
        numDocs = Integer.parseInt(cmdLine.getValue(numDocsOpt).toString());
    }
    String url = DEFAULT_SOLR_URL;
    if (cmdLine.hasOption(solrURLOpt)) {
        url = cmdLine.getValue(solrURLOpt).toString();
    }
    int batch = 100;
    if (cmdLine.hasOption(solrBatchOpt)) {
        batch = Integer.parseInt(cmdLine.getValue(solrBatchOpt).toString());
    }
    Indexer indexer = new Indexer(new CommonsHttpSolrServer(url));
    int total = 0;
    for (int i = 0; i < dumpFiles.length && total < numDocs; i++) {
        File dumpFile = dumpFiles[i];
        log.info("Indexing: " + file + " Num files to index: " + (numDocs - total));
        long start = System.currentTimeMillis();
        int totalFile = indexer.index(dumpFile, numDocs - total, batch);
        long finish = System.currentTimeMillis();
        if (log.isInfoEnabled()) {
            log.info("Indexing " + dumpFile + " took " + (finish - start) + " ms");
        }
        total += totalFile;
        log.info("Done Indexing: " + file + ". Indexed " + totalFile + " docs for that file and " + total
                + " overall.");

    }
    log.info("Indexed " + total + " docs overall.");
}

From source file:de.tudarmstadt.ukp.experiments.argumentation.clustering.ClusterCentroidsMain.java

public static void main(String[] args) throws Exception {
    //        String clutoVectors = args[0];
    //        String clutoOuputClusters = args[1];
    //        String outputClusterCentroids = args[2];

    File[] files = new File("//home/user-ukp/data2/debates-ranked.100-xmi").listFiles(new FilenameFilter() {
        @Override//from w  w w.ja v  a 2 s . c  om
        public boolean accept(File dir, String name) {
            //                        return name.startsWith("arg") && name.endsWith(".mat");
            return name.startsWith("sent") && name.endsWith(".mat");
        }
    });
    for (File matFile : files) {
        String clutoVectors = matFile.getAbsolutePath();
        //            String clutoOuputClusters = matFile.getAbsolutePath() + ".clustering.100";
        String clutoOuputClusters = matFile.getAbsolutePath() + ".clustering.1000";
        String outputClusterCentroids = matFile.getAbsolutePath() + ".bin";

        TreeMap<Integer, Vector> centroids = computeClusterCentroids(clutoVectors, clutoOuputClusters);

        // and serialize
        ObjectOutputStream objectOutputStream = new ObjectOutputStream(
                new FileOutputStream(outputClusterCentroids));
        objectOutputStream.writeObject(centroids);
        IOUtils.closeQuietly(objectOutputStream);
    }

    //        System.out.println(centroids);
    //        embeddingsToDistance(args[0], centroids, args[2]);
}

From source file:com.tamingtext.qa.WikipediaIndexer.java

public static void main(String[] args) throws Exception {
    DefaultOptionBuilder obuilder = new DefaultOptionBuilder();
    ArgumentBuilder abuilder = new ArgumentBuilder();
    GroupBuilder gbuilder = new GroupBuilder();

    Option wikipediaFileOpt = obuilder.withLongName("wikiFile").withRequired(true)
            .withArgument(abuilder.withName("wikiFile").withMinimum(1).withMaximum(1).create())
            .withDescription(/* ww  w . j a  va2 s  .c o m*/
                    "The path to the wikipedia dump file.  Maybe a directory containing wikipedia dump files."
                            + "  If a directory is specified, only .xml files are used.")
            .withShortName("w").create();

    Option numDocsOpt = obuilder.withLongName("numDocs").withRequired(false)
            .withArgument(abuilder.withName("numDocs").withMinimum(1).withMaximum(1).create())
            .withDescription("The number of docs to index").withShortName("n").create();

    Option solrURLOpt = obuilder.withLongName("solrURL").withRequired(false)
            .withArgument(abuilder.withName("solrURL").withMinimum(1).withMaximum(1).create())
            .withDescription("The URL where Solr lives").withShortName("s").create();

    Option solrBatchOpt = obuilder.withLongName("batch").withRequired(false)
            .withArgument(abuilder.withName("batch").withMinimum(1).withMaximum(1).create())
            .withDescription("The number of docs to include in each indexing batch").withShortName("b")
            .create();

    Group group = gbuilder.withName("Options").withOption(wikipediaFileOpt).withOption(numDocsOpt)
            .withOption(solrURLOpt).withOption(solrBatchOpt).create();

    Parser parser = new Parser();
    parser.setGroup(group);
    CommandLine cmdLine = parser.parse(args);

    File file;
    file = new File(cmdLine.getValue(wikipediaFileOpt).toString());
    File[] dumpFiles;
    if (file.isDirectory()) {
        dumpFiles = file.listFiles(new FilenameFilter() {
            public boolean accept(File file, String s) {
                return s.endsWith(".xml");
            }
        });
    } else {
        dumpFiles = new File[] { file };
    }

    int numDocs = Integer.MAX_VALUE;
    if (cmdLine.hasOption(numDocsOpt)) {
        numDocs = Integer.parseInt(cmdLine.getValue(numDocsOpt).toString());
    }
    String url = DEFAULT_SOLR_URL;
    if (cmdLine.hasOption(solrURLOpt)) {
        url = cmdLine.getValue(solrURLOpt).toString();
    }
    int batch = 100;
    if (cmdLine.hasOption(solrBatchOpt)) {
        batch = Integer.parseInt(cmdLine.getValue(solrBatchOpt).toString());
    }
    WikipediaIndexer indexer = new WikipediaIndexer(new CommonsHttpSolrServer(url));
    int total = 0;
    for (int i = 0; i < dumpFiles.length && total < numDocs; i++) {
        File dumpFile = dumpFiles[i];
        log.info("Indexing: " + file + " Num files to index: " + (numDocs - total));
        long start = System.currentTimeMillis();
        int totalFile = indexer.index(dumpFile, numDocs - total, batch);
        long finish = System.currentTimeMillis();
        if (log.isInfoEnabled()) {
            log.info("Indexing " + dumpFile + " took " + (finish - start) + " ms");
        }
        total += totalFile;
        log.info("Done Indexing: " + file + ". Indexed " + totalFile + " docs for that file and " + total
                + " overall.");

    }
    log.info("Indexed " + total + " docs overall.");
}

From source file:com.versusoft.packages.jodl.gui.CommandLineGUI.java

public static void main(String args[]) throws SAXException, IOException {

    Handler fh = new FileHandler(LOG_FILENAME_PATTERN);
    fh.setFormatter(new SimpleFormatter());

    //removeAllLoggersHandlers(Logger.getLogger(""));
    Logger.getLogger("").addHandler(fh);
    Logger.getLogger("").setLevel(Level.FINEST);

    Options options = new Options();

    Option option1 = new Option("in", "ODT file (required)");
    option1.setRequired(true);/*from  ww w.  j a v  a 2s.  com*/
    option1.setArgs(1);

    Option option2 = new Option("out", "Output file (required)");
    option2.setRequired(false);
    option2.setArgs(1);

    Option option3 = new Option("pic", "extract pics");
    option3.setRequired(false);
    option3.setArgs(1);

    Option option4 = new Option("page", "enable pagination processing");
    option4.setRequired(false);
    option4.setArgs(0);

    options.addOption(option1);
    options.addOption(option2);
    options.addOption(option3);
    options.addOption(option4);

    CommandLineParser parser = new BasicParser();
    CommandLine cmd = null;

    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e) {
        printHelp();
        return;
    }

    if (cmd.hasOption("help")) {
        printHelp();
        return;
    }

    File outFile = new File(cmd.getOptionValue("out"));

    OdtUtils utils = new OdtUtils();

    utils.open(cmd.getOptionValue("in"));
    //utils.correctionStep();
    utils.saveXML(outFile.getAbsolutePath());

    try {

        if (cmd.hasOption("page")) {
            OdtUtils.paginationProcessing(outFile.getAbsolutePath());
        }

        OdtUtils.correctionProcessing(outFile.getAbsolutePath());

    } catch (ParserConfigurationException ex) {
        logger.log(Level.SEVERE, null, ex);
    } catch (SAXException ex) {
        logger.log(Level.SEVERE, null, ex);
    } catch (IOException ex) {
        logger.log(Level.SEVERE, null, ex);
    } catch (TransformerConfigurationException ex) {
        logger.log(Level.SEVERE, null, ex);
    } catch (TransformerException ex) {
        logger.log(Level.SEVERE, null, ex);
    }

    if (cmd.hasOption("pic")) {

        String imageDir = cmd.getOptionValue("pic");
        if (!imageDir.endsWith("/")) {
            imageDir += "/";
        }

        try {

            String basedir = new File(cmd.getOptionValue("out")).getParent().toString()
                    + System.getProperty("file.separator");
            OdtUtils.extractAndNormalizeEmbedPictures(cmd.getOptionValue("out"), cmd.getOptionValue("in"),
                    basedir, imageDir);
        } catch (SAXException ex) {
            logger.log(Level.SEVERE, null, ex);
        } catch (ParserConfigurationException ex) {
            logger.log(Level.SEVERE, null, ex);
        } catch (TransformerConfigurationException ex) {
            logger.log(Level.SEVERE, null, ex);
        } catch (TransformerException ex) {
            logger.log(Level.SEVERE, null, ex);
        }
    }

}

From source file:com.metamx.druid.utils.ExposeS3DataSource.java

public static void main(String[] args) throws ServiceException, IOException, NoSuchAlgorithmException {
    CLI cli = new CLI();
    cli.addOption(new RequiredOption(null, "s3Bucket", true, "s3 bucket to pull data from"));
    cli.addOption(new RequiredOption(null, "s3Path", true,
            "base input path in s3 bucket.  Everything until the date strings."));
    cli.addOption(new RequiredOption(null, "timeInterval", true, "ISO8601 interval of dates to index"));
    cli.addOption(new RequiredOption(null, "granularity", true, String.format(
            "granularity of index, supported granularities: [%s]", Arrays.asList(Granularity.values()))));
    cli.addOption(new RequiredOption(null, "zkCluster", true, "Cluster string to connect to ZK with."));
    cli.addOption(new RequiredOption(null, "zkBasePath", true, "The base path to register index changes to."));

    CommandLine commandLine = cli.parse(args);

    if (commandLine == null) {
        return;//w  w w  .  j av a2s  .  co  m
    }

    String s3Bucket = commandLine.getOptionValue("s3Bucket");
    String s3Path = commandLine.getOptionValue("s3Path");
    String timeIntervalString = commandLine.getOptionValue("timeInterval");
    String granularity = commandLine.getOptionValue("granularity");
    String zkCluster = commandLine.getOptionValue("zkCluster");
    String zkBasePath = commandLine.getOptionValue("zkBasePath");

    Interval timeInterval = new Interval(timeIntervalString);
    Granularity gran = Granularity.valueOf(granularity.toUpperCase());
    final RestS3Service s3Client = new RestS3Service(new AWSCredentials(
            System.getProperty("com.metamx.aws.accessKey"), System.getProperty("com.metamx.aws.secretKey")));
    ZkClient zkClient = new ZkClient(new ZkConnection(zkCluster), Integer.MAX_VALUE, new StringZkSerializer());

    zkClient.waitUntilConnected();

    for (Interval interval : gran.getIterable(timeInterval)) {
        log.info("Processing interval[%s]", interval);
        String s3DatePath = JOINER.join(s3Path, gran.toPath(interval.getStart()));
        if (!s3DatePath.endsWith("/")) {
            s3DatePath += "/";
        }

        StorageObjectsChunk chunk = s3Client.listObjectsChunked(s3Bucket, s3DatePath, "/", 2000, null, true);
        TreeSet<String> commonPrefixes = Sets.newTreeSet();
        commonPrefixes.addAll(Arrays.asList(chunk.getCommonPrefixes()));

        if (commonPrefixes.isEmpty()) {
            log.info("Nothing at s3://%s/%s", s3Bucket, s3DatePath);
            continue;
        }

        String latestPrefix = commonPrefixes.last();

        log.info("Latest segments at [s3://%s/%s]", s3Bucket, latestPrefix);

        chunk = s3Client.listObjectsChunked(s3Bucket, latestPrefix, "/", 2000, null, true);
        Integer partitionNumber;
        if (chunk.getCommonPrefixes().length == 0) {
            partitionNumber = null;
        } else {
            partitionNumber = -1;
            for (String partitionPrefix : chunk.getCommonPrefixes()) {
                String[] splits = partitionPrefix.split("/");
                partitionNumber = Math.max(partitionNumber, Integer.parseInt(splits[splits.length - 1]));
            }
        }

        log.info("Highest segment partition[%,d]", partitionNumber);

        if (partitionNumber == null) {
            final S3Object s3Obj = new S3Object(new S3Bucket(s3Bucket),
                    String.format("%sdescriptor.json", latestPrefix));
            updateWithS3Object(zkBasePath, s3Client, zkClient, s3Obj);
        } else {
            for (int i = partitionNumber; i >= 0; --i) {
                final S3Object partitionObject = new S3Object(new S3Bucket(s3Bucket),
                        String.format("%s%s/descriptor.json", latestPrefix, i));

                updateWithS3Object(zkBasePath, s3Client, zkClient, partitionObject);
            }
        }
    }
}

From source file:au.org.ands.vocabs.toolkit.db.PopulateAccessPoints.java

/**
 * Main program./*from  w  w w.j a v  a2s  . co m*/
 * @param args Command-line arguments
 */
public static void main(final String[] args) {
    // Create prefixes that both end with a slash, so that
    // they can be substituted for each other.
    String sparqlPrefix = sparqlPrefixProperty;
    if (!sparqlPrefix.endsWith("/")) {
        sparqlPrefix += "/";
    }
    String sesamePrefix = sesamePrefixProperty;
    if (!sesamePrefix.endsWith("/")) {
        sesamePrefix += "/";
    }
    sesamePrefix += "repositories/";
    System.out.println("sparqlPrefix: " + sparqlPrefix);
    System.out.println("sesamePrefix: " + sesamePrefix);
    List<Version> versions = VersionUtils.getAllVersions();
    for (Version version : versions) {
        System.out.println(version.getId());
        System.out.println(version.getTitle());
        String data = version.getData();
        System.out.println(data);
        JsonNode dataJson = TaskUtils.jsonStringToTree(data);
        JsonNode accessPoints = dataJson.get("access_points");
        if (accessPoints != null) {
            System.out.println(accessPoints);
            System.out.println(accessPoints.size());
            for (JsonNode accessPoint : accessPoints) {
                System.out.println(accessPoint);
                AccessPoint ap = new AccessPoint();
                ap.setVersionId(version.getId());
                String type = accessPoint.get("type").asText();
                JsonObjectBuilder jobPortal = Json.createObjectBuilder();
                JsonObjectBuilder jobToolkit = Json.createObjectBuilder();
                String uri;
                switch (type) {
                case AccessPoint.FILE_TYPE:
                    ap.setType(type);
                    // Get the path from the original access point.
                    String filePath = accessPoint.get("uri").asText();
                    // Save the last component of the path to use
                    // in the portal URI.
                    String downloadFilename = Paths.get(filePath).getFileName().toString();
                    if (!filePath.startsWith("/")) {
                        // Relative path that we need to fix up manually.
                        filePath = "FIXME " + filePath;
                    }
                    jobToolkit.add("path", filePath);
                    ap.setPortalData("");
                    ap.setToolkitData(jobToolkit.build().toString());
                    // Persist what we have ...
                    AccessPointUtils.saveAccessPoint(ap);
                    // ... so that now we can get access to the
                    // ID of the persisted object with ap2.getId().
                    String format;
                    if (downloadFilename.endsWith(".trig")) {
                        // Force TriG. This is needed for some legacy
                        // cases where the filename is ".trig" but
                        // the format has been incorrectly recorded
                        // as RDF/XML.
                        format = "TriG";
                    } else {
                        format = accessPoint.get("format").asText();
                    }
                    jobPortal.add("format", format);
                    jobPortal.add("uri", downloadPrefixProperty + ap.getId() + "/" + downloadFilename);
                    ap.setPortalData(jobPortal.build().toString());
                    AccessPointUtils.updateAccessPoint(ap);
                    break;
                case AccessPoint.API_SPARQL_TYPE:
                    ap.setType(type);
                    uri = accessPoint.get("uri").asText();
                    jobPortal.add("uri", uri);
                    if (uri.startsWith(sparqlPrefix)) {
                        // One of ours, so also add a sesameDownload
                        // endpoint.
                        AccessPoint ap2 = new AccessPoint();
                        ap2.setVersionId(version.getId());
                        ap2.setType(AccessPoint.SESAME_DOWNLOAD_TYPE);
                        ap2.setPortalData("");
                        ap2.setToolkitData("");
                        // Persist what we have ...
                        AccessPointUtils.saveAccessPoint(ap2);
                        // ... so that now we can get access to the
                        // ID of the persisted object with ap2.getId().
                        JsonObjectBuilder job2Portal = Json.createObjectBuilder();
                        JsonObjectBuilder job2Toolkit = Json.createObjectBuilder();
                        job2Portal.add("uri", downloadPrefixProperty + ap2.getId() + "/"
                                + Download.downloadFilename(ap2, ""));
                        job2Toolkit.add("uri", uri.replaceFirst(sparqlPrefix, sesamePrefix));
                        ap2.setPortalData(job2Portal.build().toString());
                        ap2.setToolkitData(job2Toolkit.build().toString());
                        AccessPointUtils.updateAccessPoint(ap2);
                        jobPortal.add("source", AccessPoint.SYSTEM_SOURCE);
                    } else {
                        jobPortal.add("source", AccessPoint.USER_SOURCE);
                    }
                    ap.setPortalData(jobPortal.build().toString());
                    ap.setToolkitData(jobToolkit.build().toString());
                    AccessPointUtils.saveAccessPoint(ap);
                    break;
                case AccessPoint.WEBPAGE_TYPE:
                    uri = accessPoint.get("uri").asText();
                    if (uri.endsWith("concept/topConcepts")) {
                        ap.setType(AccessPoint.SISSVOC_TYPE);
                        jobPortal.add("source", AccessPoint.SYSTEM_SOURCE);
                        jobPortal.add("uri", uri.replaceFirst("/concept/topConcepts$", ""));
                    } else {
                        ap.setType(type);
                        jobPortal.add("uri", uri);
                    }
                    ap.setPortalData(jobPortal.build().toString());
                    ap.setToolkitData(jobToolkit.build().toString());
                    AccessPointUtils.saveAccessPoint(ap);
                    break;
                default:
                }
                System.out.println("type is: " + ap.getType());
                System.out.println("portal_data: " + ap.getPortalData());
                System.out.println("toolkit_data: " + ap.getToolkitData());
            }
        }
    }
}

From source file:edu.uiowa.javatm.JavaTM.java

/**
 * @param args First one indicates which topic model to use
 *///ww w .j av a  2 s .c  o  m
public static void main(String[] args) {
    TMGibbsSampler tmGibbsSampler = null;

    Option modelType = Option.builder("model").longOpt("model-type").desc("Type of topic models to use")
            .hasArg().required().build();

    Option dataName = Option.builder("name").longOpt("data-name").desc("Data name: used for saving outputs")
            .hasArg().required().build();

    Option alpha = Option.builder("a").longOpt("alpha")
            .desc("Dirichlet prior for document (author) over topic multinomial").hasArg().required().build();

    Option beta = Option.builder("b").longOpt("beta").desc("Dirichlet prior for topic over word multinomial")
            .hasArg().required().build();

    Option pi = Option.builder("p").longOpt("pi").desc("Dirichlet prior for topic over time multinomial")
            .hasArg().build();

    Option K = Option.builder("K").longOpt("K").desc("The number of timestamp indices").hasArg().build();

    /*Option tau = Option.builder("tau").longOpt("tau")
    .desc("Smoothing constant for topic time")
    .hasArg().build();*/

    Option doc = Option.builder("doc").longOpt("document-file").desc("WD matrix to use").hasArg().required()
            .build();

    Option voc = Option.builder("voc").longOpt("vocabulary-file")
            .desc("Vocabulary file of the corpus of interest").hasArg().required().build();

    Option auth = Option.builder("auth").longOpt("auth-file").desc("Author indices for each token").hasArg()
            .build();

    Option authArray = Option.builder("authArray").longOpt("author-list-file").desc("Author list").hasArg()
            .build();

    Option dkArray = Option.builder("dk").longOpt("document-time-file").desc("Document timestamp file").hasArg()
            .build();

    Option citationMat = Option.builder("cm").longOpt("citation-matrix")
            .desc("Citation overtime for the corpus").hasArg().build();

    Option numTopics = Option.builder("topic").longOpt("num-topics").desc("The total number of topics").hasArg()
            .required().build();

    Option numIters = Option.builder("iter").longOpt("num-iters").desc("The total number of iterations")
            .hasArg().required().build();

    Option outputDir = Option.builder("odir").longOpt("output-dir").desc("Output directory").hasArg().required()
            .build();

    Options options = new Options();
    options.addOption(modelType).addOption(alpha).addOption(beta).addOption(numTopics).addOption(K)
            .addOption(pi).addOption(citationMat).addOption(numIters).addOption(doc).addOption(voc)
            .addOption(dkArray).addOption(outputDir).addOption(auth).addOption(authArray).addOption(dataName);

    CommandLineParser parser = new DefaultParser();
    try {
        // parse the command line arguments
        CommandLine line = parser.parse(options, args);
        String model = line.getOptionValue("model");
        String name = line.getOptionValue("name");
        String docFile = line.getOptionValue("doc");
        String vocFile = line.getOptionValue("voc");
        int topics = Integer.parseInt(line.getOptionValue("topic"));
        int iters = Integer.parseInt(line.getOptionValue("iter"));
        double a = Double.parseDouble(line.getOptionValue("a"));
        double b = Double.parseDouble(line.getOptionValue("b"));

        String modelLower = model.toLowerCase();
        if (modelLower.equals("lda")) {
            tmGibbsSampler = new LDAGibbsSampler(topics, iters, a, b, docFile, vocFile);
        } else if (modelLower.equals("at")) {
            String authFile = line.getOptionValue("auth");
            String authArrayFile = line.getOptionValue("authArray");
            //double tau_val = Double.parseDouble(line.getOptionValue("tau"));
            tmGibbsSampler = new ATGibbsSampler(topics, iters, a, b, docFile, vocFile, authFile, authArrayFile);
        } else if (modelLower.equals("tot")) {
            String dkFile = line.getOptionValue("dk");
            //double tau_val = Double.parseDouble(line.getOptionValue("tau"));
            tmGibbsSampler = new ToTGibbsSampler(topics, iters, a, b, docFile, vocFile, dkFile);
        } else if (modelLower.equals("tiot")) {
            String timeFile = line.getOptionValue("dk");
            String citationFile = line.getOptionValue("cm");
            double p = Double.parseDouble(line.getOptionValue("p"));
            //int k = Integer.parseInt(line.getOptionValue("K"));
            tmGibbsSampler = new TIOTGibbsSampler(topics, iters, a, b, p, docFile, vocFile, timeFile,
                    citationFile);
        } else {
            System.err.println("Invalid model type selection. Must be lda, at, tot or atot.");
            System.exit(ExitStatus.ILLEGAL_ARGUMENT);
        }

        long startTime = System.nanoTime();
        tmGibbsSampler.fit();
        TMOutcome outcome = tmGibbsSampler.get_outcome();
        long endTime = System.nanoTime();
        long duration = (endTime - startTime);
        System.out.println("Overall elapsed time: " + duration / 1000000000. + " seconds");

        tmGibbsSampler.showTopics(10);
        outcome.showTopicDistribution();

        String oDir = line.getOptionValue("odir");
        if (!oDir.endsWith("/")) {
            oDir = oDir + "/";
        }
        // append name to `oDir`
        oDir = oDir + name + "-";

        if (modelLower.contains("tot")) {
            // topic over time (tot and atot) has beta distribution parameters to write
            Utils.write2DArray(((ToTOutcome) outcome).getPsi(), oDir + "psi-" + modelLower + ".csv");
        }

        if (modelLower.contains("tiot")) {
            // topic over time (tot and atot) has beta distribution parameters to write
            Utils.write2DArray(((TIOTOutcome) outcome).getPsi(), oDir + "psi-" + modelLower + ".csv");
            double[][][] ga = ((TIOTOutcome) outcome).getGa();
            for (int t = 0; t < ga.length; t++) {
                Utils.write2DArray(ga[t], oDir + "gamma-" + t + "-" + modelLower + ".csv");
            }
        }

        Utils.write2DArray(outcome.getPhi(), oDir + "phi-" + modelLower + ".csv");
        Utils.write2DArray(outcome.getTheta(), oDir + "theta-" + modelLower + ".csv");

        System.out.println("Output files saved to " + oDir);
    } catch (ParseException exp) {
        // oops, something went wrong
        System.err.println("Parsing failed. Reason: " + exp.getMessage());
    }

}

From source file:net.itransformers.idiscover.discoverylisteners.TopologyDeviceLogger.java

public static void main(String[] args) throws FileNotFoundException, JAXBException {
    String path = "tmp1";
    File dir = new File(path);
    String[] files = dir.list(new FilenameFilter() {
        public boolean accept(File dir, String name) {
            return (name.startsWith("device") && name.endsWith(".xml"));
        }/* w w w . j a  v  a2s . c om*/
    });
    TopologyDeviceLogger logger = new TopologyDeviceLogger(path);
    String host = "10.33.0.5";
    String snmpROComm = "public";
    Map<String, String> resourceParams = new HashMap<String, String>();
    resourceParams.put("community", snmpROComm);
    resourceParams.put("version", "1");
    Resource resource = new Resource(host, null, resourceParams);

    for (String fileName : files) {
        FileInputStream is = new FileInputStream(path + File.separator + fileName);
        DiscoveredDeviceData discoveredDeviceData = null;
        try {
            discoveredDeviceData = JaxbMarshalar.unmarshal(DiscoveredDeviceData.class, is);
        } finally {
            try {
                is.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        String deviceName = fileName.substring("device-".length(), fileName.length() - ".xml".length());
        logger.handleDevice(deviceName, null, discoveredDeviceData, resource);
    }

}