Example usage for java.util List size

List of usage examples for java.util List size

Introduction

In this page you can find the example usage for java.util List size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this list.

Usage

From source file:dk.statsbiblioteket.jpar2.filecompare.FileCompare.java

/**
 * Main method. //from w w w  .  j a v  a2 s  . c  o  m
 * @param args
 */
public static void main(String[] args) {

    Options options = new Options();
    options.addOption("s", "slices", true, "The number of slices to use in the comparison");

    CommandLineParser parser = new PosixParser();
    try {
        CommandLine cmd = parser.parse(options, args);
        args = cmd.getArgs();
        if (!cmd.hasOption("s") || args.length != 2) {
            System.exit(2);
        }
        int slices = Integer.parseInt(cmd.getOptionValue("s").trim());

        File f1 = new File(args[0]);
        File f2 = new File(args[1]);
        if (f1.length() == f2.length()) {
            int sliceSize = (int) (f1.length() / slices);//rounding here...

            DataFile df1 = new DataFile(f1, sliceSize);
            DataFile df2 = new DataFile(f2, sliceSize);

            List<Integer> defectIndexes = df1.compareWithIndex(df2);

            for (int index : defectIndexes) {
                System.out.println("index " + index + ", from " + index * sliceSize + " to "
                        + (index + 1) * sliceSize + " is defect");
            }
            if (defectIndexes.size() == 0) {
                System.out.println("Files are identical");
            }

        } else {
            System.out.println("Files differ in length, cannot help you");
        }
    } catch (ParseException e) {
        e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
    } catch (IOException e) {
        e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
    }

}

From source file:com.sindicetech.siren.demo.loader.Loader.java

/** checks parameters, instantiate loader and starts load */
public static void main(String[] args) {
    CommandLineParser cmdLineParser = new BasicParser();
    CommandLine cmd = null;//ww  w. j a v  a  2s. c  o  m
    Options options = buildOptions();
    try {
        cmd = cmdLineParser.parse(options, args);
    } catch (ParseException e) {
        showHelpExit(options);
    }
    if (cmd.hasOption(HELP_OPT)) {
        showExtendHelpExit(options);
    }

    List<File> filesToProcess = Loader.checkInputFilesAndFolders(cmd.getOptionValues(INPUT_FILE_OPT));
    if (filesToProcess.size() == 0) {
        logger.error("no file to process");
        System.exit(-1);
    }
    Loader loader = new Loader(new HttpSolrServer(cmd.getOptionValue(URL_OPT, DEFAULT_SOLR_URL)),
            Loader.retrieveAndChekBatchSize(cmd.getOptionValue(BATCH_OPT)),
            cmd.getOptionValue(EXT_OPT, DEFAULT_JSON_EXTENSION), !cmd.hasOption(NO_EXT_CHECK_OPT),
            cmd.hasOption(COMMIT_EACH_OPT), cmd.hasOption(FILENAME_AS_ID_OPT));
    loader.loadFiles(filesToProcess);
}

From source file:com.ds.test.ClientFormLogin.java

public static void main(String[] args) throws Exception {

    DefaultHttpClient httpclient = new DefaultHttpClient();
    try {//from  w  w  w.  ja v a2s  .  c om
        HttpGet httpget = new HttpGet("http://www.iteye.com/login");

        HttpResponse response = httpclient.execute(httpget);
        HttpEntity entity = response.getEntity();

        System.out.println("Login form get: " + response.getStatusLine());

        System.out.println("cookies:");
        List<Cookie> cookies = httpclient.getCookieStore().getCookies();
        if (cookies.isEmpty()) {
            System.out.println("None");
        } else {
            for (int i = 0; i < cookies.size(); i++) {
                System.out.println(cookies.get(i).toString());
            }
        }

        /* HeaderIterator hi = response.headerIterator();
         while(hi.hasNext()){
            System.out.println(hi.next());
         }
                 
         EntityUtils.consume(entity);
         */

        String token = parseHtml(EntityUtils.toString(entity));

        httpget.releaseConnection();

        System.out.println("********************************************************");

        HttpPost httpost = new HttpPost("http://www.iteye.com/login");

        List<NameValuePair> nvps = new ArrayList<NameValuePair>();
        nvps.add(new BasicNameValuePair("name", ""));
        nvps.add(new BasicNameValuePair("password", ""));
        nvps.add(new BasicNameValuePair("authenticity_token", token));

        httpost.setEntity(new UrlEncodedFormEntity(nvps, Consts.UTF_8));

        response = httpclient.execute(httpost);
        entity = response.getEntity();

        System.out.println("Login form get: " + response.getStatusLine());
        EntityUtils.consume(entity);

        System.out.println("Post logon cookies:");
        cookies = httpclient.getCookieStore().getCookies();
        if (cookies.isEmpty()) {
            System.out.println("None");
        } else {
            for (int i = 0; i < cookies.size(); i++) {
                System.out.println(cookies.get(i).toString());
            }
        }

        System.out.println("********************************************************");

        HttpGet httpget2 = new HttpGet("http://www.iteye.com/login");

        HttpResponse response2 = httpclient.execute(httpget2);
        HttpEntity entity2 = response2.getEntity();

        System.out.println("Login form get: " + response2.getStatusLine());

        print(response2);

    } finally {
        // When HttpClient instance is no longer needed,
        // shut down the connection manager to ensure
        // immediate deallocation of all system resources
        httpclient.getConnectionManager().shutdown();
    }
}

From source file:HelloSmartsheet.java

public static void main(String[] args) {
    HttpURLConnection connection = null;
    StringBuilder response = new StringBuilder();

    //We are using Jackson JSON parser to deserialize the JSON. See http://wiki.fasterxml.com/JacksonHome
    //Feel free to use which ever library you prefer.
    ObjectMapper mapper = new ObjectMapper();
    mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);

    try {//from  www.j a  v a 2s.  co m

        System.out.println("STARTING HelloSmartsheet...");
        //Create a BufferedReader to read user input.
        BufferedReader in = new BufferedReader(new InputStreamReader(System.in));

        System.out.print("Enter Smartsheet API access token:");
        String accessToken = in.readLine();
        System.out.println("Fetching list of your sheets...");
        //Create a connection and fetch the list of sheets
        connection = (HttpURLConnection) new URL(GET_SHEETS_URL).openConnection();
        connection.addRequestProperty("Authorization", "Bearer " + accessToken);
        BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
        String line;
        //Read the response line by line.
        while ((line = reader.readLine()) != null) {
            response.append(line);
        }
        reader.close();
        //Use Jackson to conver the JSON string to a List of Sheets
        List<Sheet> sheets = mapper.readValue(response.toString(), new TypeReference<List<Sheet>>() {
        });
        if (sheets.size() == 0) {
            System.out.println("You don't have any sheets.  Goodbye!");
            return;
        }
        System.out.println("Total sheets: " + sheets.size());
        int i = 1;
        for (Sheet sheet : sheets) {
            System.out.println(i++ + ": " + sheet.name);
        }
        System.out.print("Enter the number of the sheet you want to share: ");

        //Prompt the user to provide the sheet number, the email address, and the access level
        Integer sheetNumber = Integer.parseInt(in.readLine().trim()); //NOTE: for simplicity, error handling and input validation is neglected.
        Sheet chosenSheet = sheets.get(sheetNumber - 1);

        System.out.print("Enter an email address to share " + chosenSheet.getName() + " to: ");
        String email = in.readLine();

        System.out.print("Choose an access level (VIEWER, EDITOR, EDITOR_SHARE, ADMIN) for " + email + ": ");
        String accessLevel = in.readLine();

        //Create a share object
        Share share = new Share();
        share.setEmail(email);
        share.setAccessLevel(accessLevel);

        System.out.println("Sharing " + chosenSheet.name + " to " + email + " as " + accessLevel + ".");

        //Create a connection. Note the SHARE_SHEET_URL uses /sheet as opposed to /sheets (with an 's')
        connection = (HttpURLConnection) new URL(SHARE_SHEET_URL.replace(SHEET_ID, "" + chosenSheet.getId()))
                .openConnection();
        connection.setDoOutput(true);
        connection.addRequestProperty("Authorization", "Bearer " + accessToken);
        connection.addRequestProperty("Content-Type", "application/json");

        OutputStreamWriter writer = new OutputStreamWriter(connection.getOutputStream());
        //Serialize the Share object
        writer.write(mapper.writeValueAsString(share));
        writer.close();

        //Read the response and parse the JSON
        reader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
        response = new StringBuilder();
        while ((line = reader.readLine()) != null) {
            response.append(line);
        }

        Result result = mapper.readValue(response.toString(), Result.class);
        System.out.println("Sheet shared successfully, share ID " + result.result.id);
        System.out.println("Press any key to quit.");
        in.read();

    } catch (IOException e) {
        BufferedReader reader = new BufferedReader(
                new InputStreamReader(((HttpURLConnection) connection).getErrorStream()));
        String line;
        try {
            response = new StringBuilder();
            while ((line = reader.readLine()) != null) {
                response.append(line);
            }
            reader.close();
            Result result = mapper.readValue(response.toString(), Result.class);
            System.out.println(result.message);
        } catch (IOException e1) {
            e1.printStackTrace();
        }

    } catch (Exception e) {
        System.out.println("Something broke: " + e.getMessage());
        e.printStackTrace();
    }

}

From source file:com.jaspersoft.jasperserver.export.RemoveDuplicatedDisplayName.java

public static void main(String[] args) {

    Parameters params = null;/*from w  w w  . java 2 s .c om*/
    boolean success = false;
    try {

        GenericApplicationContext ctx = new GenericApplicationContext();
        XmlBeanDefinitionReader configReader = new XmlBeanDefinitionReader(ctx);
        List resourceXML = getPaths(args[2]);
        if (args != null && args.length > 0) {
            for (int i = 0; i < resourceXML.size(); i++) {
                org.springframework.core.io.Resource resource = classPathResourceFactory
                        .create((String) resourceXML.get(i));
                configReader.loadBeanDefinitions(resource);
            }
        }
        ctx.refresh();
        if (args.length > 3) {
            if ("UPDATE".equals(args[3])) {
                updateRepo = true;
            }
        }

        // write to file
        //

        try {
            CommandBean commandBean = (CommandBean) ctx.getBean("removeDuplicateDisplayName",
                    CommandBean.class);
            Charset encoding = Charset.forName(
                    ((RemoveDuplicatedDisplayName) commandBean).getEncodingProvider().getCharacterEncoding());
            osw = new OutputStreamWriter(new FileOutputStream("remove_duplicated_display_name_report.txt"),
                    encoding);
            commandBean.process(params);

        } finally {
            osw.close();
        }
        success = true;
    } catch (Exception e) {
        e.printStackTrace(System.err);
    }
    System.exit(success ? 0 : -1);
}

From source file:com.act.lcms.AnimateNetCDFAroundMass.java

public static void main(String[] args) throws Exception {
    if (args.length < 7 || !areNCFiles(Arrays.copyOfRange(args, 5, args.length))) {
        throw new RuntimeException(
                "Needs: \n" + "(1) mass value, e.g., 132.0772 \n" + "(2) time value, e.g., 39.2, (seconds), \n"
                        + "(3) minimum Mz Precision, 0.04 \n" + "(4) max z axis, e.g., 20000 \n"
                        + "(5) prefix for .data and rendered .pdf \n" + "(6..) 2 or more NetCDF .nc files");
    }//  w  ww  .ja  va 2s .c o m

    Double mz = Double.parseDouble(args[0]);
    Double time = Double.parseDouble(args[1]);
    Double minMzPrecision = Double.parseDouble(args[2]);
    Double maxZAxis = Double.parseDouble(args[3]);
    String outPrefix = args[4];

    // the mz values go from 50-950, we start with a big window and exponentially narrow down
    double mzWin = 100;
    // time values go from 0-450, we start with a big window and exponentially narrow down
    double timeWin = 50;

    // the factor by which to zoom in every step (has to be >1, a value of 2 is good)
    double factor = 1.2;

    // the animation frame count
    int frame = 1;

    AnimateNetCDFAroundMass c = new AnimateNetCDFAroundMass();
    String[] netCDFFnames = Arrays.copyOfRange(args, 5, args.length);
    List<List<XYZ>> spectra = c.getSpectra(netCDFFnames, time, timeWin, mz, mzWin);

    for (List<XYZ> s : spectra) {
        System.out.format("%d xyz datapoints in (initial narrowed) spectra\n", s.size());
    }

    String[] labels = new String[netCDFFnames.length];
    for (int i = 0; i < labels.length; i++)
        labels[i] = "Dataset: " + i;
    // you could set labels to netCDFFnames to get precise labels on the graphs

    Gnuplotter plotter = new Gnuplotter();
    String fmt = "png";

    List<String> outImgFiles = new ArrayList<>(), outDataFiles = new ArrayList<>();
    while (mzWin > minMzPrecision) {

        // exponentially narrow windows down
        mzWin /= factor;
        timeWin /= factor;

        List<List<XYZ>> windowedSpectra = c.getSpectraInWindowAll(spectra, time, timeWin, mz, mzWin);

        String frameid = String.format("%03d", frame);
        String outPDF = outPrefix + frameid + "." + fmt;
        String outDATA = outPrefix + frameid + ".data";
        outImgFiles.add(outPDF);
        outDataFiles.add(outDATA);
        frame++;

        // Write data output to outfile
        PrintStream out = new PrintStream(new FileOutputStream(outDATA));

        // print out the spectra to outDATA
        for (List<XYZ> windowOfSpectra : windowedSpectra) {
            for (XYZ xyz : windowOfSpectra) {
                out.format("%.4f\t%.4f\t%.4f\n", xyz.time, xyz.mz, xyz.intensity);
                out.flush();
            }
            // delimit this dataset from the rest
            out.print("\n\n");
        }

        // close the .data
        out.close();

        // render outDATA to outPDF using gnuplot
        plotter.plotMulti3D(outDATA, outPDF, fmt, labels, maxZAxis);
    }

    String outImgs = outPrefix + "*." + fmt;
    plotter.makeAnimatedGIF(outImgs, outPrefix + ".gif");
    // all the frames are now in the animated gif, remove the intermediate files
    for (String f : outDataFiles)
        new File(f).delete();
    for (String f : outImgFiles)
        new File(f).delete();
}

From source file:com.cloudera.recordbreaker.analyzer.FSCrawler.java

public static void main(String argv[]) throws Exception {
    if (argv.length < 4) {
        System.err.println("Usage: FSCrawler <metadataStoreDir> <schemaDbDir> (--crawl <dir>)");
        return;//from   w w  w  . ja  v  a2s  .co  m
    }
    int i = 0;
    File metadataStoreDir = new File(argv[i++]).getCanonicalFile();
    File schemadbdir = new File(argv[i++]).getCanonicalFile();
    String op = argv[i++];
    FSAnalyzer fsa = new FSAnalyzer(metadataStoreDir, schemadbdir);

    try {
        if ("--crawl".equals(op)) {
            File crawlTarget = new File(argv[i++]).getCanonicalFile();
            System.err.println("About to crawl " + crawlTarget);
            FSCrawler crawler = new FSCrawler(fsa);
            crawler.blockingCrawl(new URI("file://" + crawlTarget));
        } else if ("--test".equals(op)) {
            List<SchemaSummary> summaryList = fsa.getSchemaSummaries();
            System.err.println("Schema summary list has " + summaryList.size() + " entries");
        }
    } finally {
        fsa.close();
    }
}

From source file:com.wso2telco.dep.reportingservice.dao.TaxDAO.java

/**
 * The main method./* w  w  w  .j av a 2s . c  o m*/
 *
 * @param args the arguments
 * @throws Exception the exception
 */
public static void main(String[] args) throws Exception {

    TaxDAO taxDAO = new TaxDAO();

    try {
        List<Tax> taxList = taxDAO.getTaxesForSubscription(00, 25);
        for (int i = 0; i < taxList.size(); i++) {
            Tax tax = taxList.get(i);
            System.out.println(tax.getType() + " ~ " + tax.getEffective_from() + " ~ " + tax.getEffective_to()
                    + " ~ " + tax.getValue());
        }

        Map<String, List<APIRequestDTO>> reqMap = taxDAO.getAPIRequestTimesForApplication(
                "yx1eZTmtbBaYqfIuEYMVgIKonSga", (short) 2014, (short) 1, "admin");
        System.out.println(reqMap);

    } catch (APIManagementException e) {
        e.printStackTrace();
    } catch (APIMgtUsageQueryServiceClientException e) {
        e.printStackTrace();
    }
}

From source file:gobblin.test.TestWorker.java

@SuppressWarnings("all")
public static void main(String[] args) throws Exception {
    // Build command-line options
    Option configOption = OptionBuilder.withArgName("framework config file")
            .withDescription("Configuration properties file for the framework").hasArgs().withLongOpt("config")
            .create('c');
    Option jobConfigsOption = OptionBuilder.withArgName("job config files")
            .withDescription("Comma-separated list of job configuration files").hasArgs()
            .withLongOpt("jobconfigs").create('j');
    Option modeOption = OptionBuilder.withArgName("run mode")
            .withDescription("Test mode (schedule|run); 'schedule' means scheduling the jobs, "
                    + "whereas 'run' means running the jobs immediately")
            .hasArg().withLongOpt("mode").create('m');
    Option helpOption = OptionBuilder.withArgName("help").withDescription("Display usage information")
            .withLongOpt("help").create('h');

    Options options = new Options();
    options.addOption(configOption);//from w  ww  . j a va 2 s .com
    options.addOption(jobConfigsOption);
    options.addOption(modeOption);
    options.addOption(helpOption);

    // Parse command-line options
    CommandLineParser parser = new BasicParser();
    CommandLine cmd = parser.parse(options, args);

    if (cmd.hasOption('h')) {
        printUsage(options);
        System.exit(0);
    }

    // Start the test worker with the given configuration properties
    Configuration config = new PropertiesConfiguration(cmd.getOptionValue('c'));
    Properties properties = ConfigurationConverter.getProperties(config);
    TestWorker testWorker = new TestWorker(properties);
    testWorker.start();

    // Job running mode
    Mode mode = Mode.valueOf(cmd.getOptionValue('m').toUpperCase());

    // Get the list of job configuration files
    List<String> jobConfigFiles = Lists
            .newArrayList(Splitter.on(',').omitEmptyStrings().trimResults().split(cmd.getOptionValue('j')));

    CountDownLatch latch = new CountDownLatch(jobConfigFiles.size());
    for (String jobConfigFile : jobConfigFiles) {
        // For each job, load the job configuration, then run or schedule the job.
        Properties jobProps = new Properties();
        jobProps.load(new FileReader(jobConfigFile));
        jobProps.putAll(properties);
        testWorker.runJob(jobProps, mode, new TestJobListener(latch));
    }
    // Wait for all jobs to finish
    latch.await();

    testWorker.stop();
}

From source file:edu.uci.ics.crawler4j.weatherCrawler.BasicCrawlController.java

public static void main(String[] args) {
    String folder = ConfigUtils.getFolder();
    String crawlerCount = ConfigUtils.getCrawlerCount();
    args = new String[2];
    if (StringUtils.isBlank(folder) || StringUtils.isBlank(crawlerCount)) {
        args[0] = "weather";
        args[1] = "10";
        System.out.println("No parameters in config.properties .......");
        System.out.println("[weather] will be used as rootFolder (it will contain intermediate crawl data)");
        System.out.println("[10] will be used as numberOfCralwers (number of concurrent threads)");
    } else {/*from  w w  w  .j a  v  a 2  s .c o m*/

        args[0] = folder;
        args[1] = crawlerCount;
    }

    /*
     * crawlStorageFolder is a folder where intermediate crawl data is
     * stored.
     */
    String crawlStorageFolder = args[0];

    /*
     * numberOfCrawlers shows the number of concurrent threads that should
     * be initiated for crawling.
     */
    int numberOfCrawlers = Integer.parseInt(args[1]);

    CrawlConfig config = new CrawlConfig();

    if (crawlStorageFolder != null && IO.deleteFolderContents(new File(crawlStorageFolder)))
        System.out.println("");
    config.setCrawlStorageFolder(crawlStorageFolder + "/d" + System.currentTimeMillis());

    /*
     * Be polite: Make sure that we don't send more than 1 request per
     * second (1000 milliseconds between requests).
     */
    config.setPolitenessDelay(1000);

    config.setConnectionTimeout(1000 * 60);
    // config1.setPolitenessDelay(1000);

    /*
     * You can set the maximum crawl depth here. The default value is -1 for
     * unlimited depth
     */
    config.setMaxDepthOfCrawling(StringUtils.isBlank(ConfigUtils.getCrawlerDepth()) ? 40
            : Integer.valueOf(ConfigUtils.getCrawlerDepth()));
    // config1.setMaxDepthOfCrawling(0);

    /*
     * You can set the maximum number of pages to crawl. The default value
     * is -1 for unlimited number of pages
     */
    config.setMaxPagesToFetch(100000);
    // config1.setMaxPagesToFetch(10000);

    /*
     * Do you need to set a proxy? If so, you can use:
     * config.setProxyHost("proxyserver.example.com");
     * config.setProxyPort(8080);
     * 
     * If your proxy also needs authentication:
     * config.setProxyUsername(username); config.getProxyPassword(password);
     */

    if (ConfigUtils.getValue("useProxy", "false").equalsIgnoreCase("true")) {

        System.out.println("?============");
        List<ProxySetting> proxys = ConfigUtils.getProxyList();

        ProxySetting proxy = proxys.get(RandomUtils.nextInt(proxys.size() - 1));

        /* test the proxy is alaviable or not */
        while (!TestProxy.testProxyAvailable(proxy)) {
            proxy = proxys.get(RandomUtils.nextInt(proxys.size() - 1));
        }
        System.out.println("??" + proxy.getIp() + ":" + proxy.getPort());
        config.setProxyHost(proxy.getIp());
        config.setProxyPort(proxy.getPort());
        //      config.setProxyHost("127.0.0.1");
        //      config.setProxyPort(8087);
    } else {
        System.out.println("??============");
    }

    /*
     * This config parameter can be used to set your crawl to be resumable
     * (meaning that you can resume the crawl from a previously
     * interrupted/crashed crawl). Note: if you enable resuming feature and
     * want to start a fresh crawl, you need to delete the contents of
     * rootFolder manually.
     */
    config.setResumableCrawling(false);
    // config1.setResumableCrawling(false);
    /*
     * Instantiate the controller for this crawl.
     */
    PageFetcher pageFetcher = new PageFetcher(config);
    // PageFetcher pageFetcher1 = new PageFetcher(config1);

    RobotstxtConfig robotstxtConfig = new RobotstxtConfig();
    RobotstxtServer robotstxtServer = new RobotstxtServer(robotstxtConfig, pageFetcher);

    try {

        /*
         * For each crawl, you need to add some seed urls. These are the
         * first URLs that are fetched and then the crawler starts following
         * links which are found in these pages
         */
        CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer);

        controller.addSeed(StringUtils.isBlank(ConfigUtils.getSeed()) ? "http://www.tianqi.com/chinacity.html"
                : ConfigUtils.getSeed());

        // controller.addSeed("http://www.ics.uci.edu/~lopes/");
        // controller.addSeed("http://www.ics.uci.edu/~welling/");

        /*
         * Start the crawl. This is a blocking operation, meaning that your
         * code will reach the line after this only when crawling is
         * finished.
         */

        String isDaily = null;

        isDaily = ConfigUtils.getValue("isDaily", "true");

        System.out
                .println("?=======" + ConfigUtils.getValue("table", "weather_data") + "=======");

        if (isDaily.equalsIgnoreCase("true")) {
            System.out.println("???==============");
            controller.start(BasicDailyCrawler.class, numberOfCrawlers);
        } else {
            System.out.println("???==============");
            controller.start(BasicCrawler.class, numberOfCrawlers);
        }

    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}