Example usage for java.lang Long MAX_VALUE

List of usage examples for java.lang Long MAX_VALUE

Introduction

In this page you can find the example usage for java.lang Long MAX_VALUE.

Prototype

long MAX_VALUE

To view the source code for java.lang Long MAX_VALUE.

Click Source Link

Document

A constant holding the maximum value a long can have, 263-1.

Usage

From source file:contract_net.main.java

/**
 * Starts the {@link main}.//from  w  w  w . j  a  v  a2 s . c  o m
 * @param args The first option may optionally indicate the end time of the
 *          simulation.
 */
public static void main(@Nullable String[] args) {
    final long endTime = args != null && args.length >= 1 ? Long.parseLong(args[0]) : Long.MAX_VALUE;

    final String graphFile = args != null && args.length >= 2 ? args[1] : MAP_FILE;
    run(true, endTime, graphFile, null /* new Display() */, null, null);
}

From source file:TestBufferStreamGenomicsDBImporter.java

/**
 * Sample driver code for testing Java VariantContext write API for GenomicsDB
 * The code shows two ways of using the API
 *   (a) Iterator<VariantContext>/*from   w  w  w  . ja va  2s.  c om*/
 *   (b) Directly adding VariantContext objects
 * If "-iterators" is passed as the second argument, method (a) is used.
 */
public static void main(final String[] args) throws IOException, GenomicsDBException, ParseException {
    if (args.length < 2) {
        System.err.println("For loading: [-iterators] <loader.json> "
                + "<stream_name_to_file.json> [bufferCapacity rank lbRowIdx ubRowIdx useMultiChromosomeIterator]");
        System.exit(-1);
    }
    int argsLoaderFileIdx = 0;
    if (args[0].equals("-iterators"))
        argsLoaderFileIdx = 1;
    //Buffer capacity
    long bufferCapacity = (args.length >= argsLoaderFileIdx + 3) ? Integer.parseInt(args[argsLoaderFileIdx + 2])
            : 1024;
    //Specify rank (or partition idx) of this process
    int rank = (args.length >= argsLoaderFileIdx + 4) ? Integer.parseInt(args[argsLoaderFileIdx + 3]) : 0;
    //Specify smallest row idx from which to start loading.
    // This is useful for incremental loading into existing array
    long lbRowIdx = (args.length >= argsLoaderFileIdx + 5) ? Long.parseLong(args[argsLoaderFileIdx + 4]) : 0;
    //Specify largest row idx up to which loading should be performed - for completeness
    long ubRowIdx = (args.length >= argsLoaderFileIdx + 6) ? Long.parseLong(args[argsLoaderFileIdx + 5])
            : Long.MAX_VALUE - 1;
    //Boolean to use MultipleChromosomeIterator
    boolean useMultiChromosomeIterator = (args.length >= argsLoaderFileIdx + 7)
            ? Boolean.parseBoolean(args[argsLoaderFileIdx + 6])
            : false;
    //<loader.json> first arg
    String loaderJSONFile = args[argsLoaderFileIdx];
    GenomicsDBImporter loader = new GenomicsDBImporter(loaderJSONFile, rank, lbRowIdx, ubRowIdx);
    //<stream_name_to_file.json> - useful for the driver only
    //JSON file that contains "stream_name": "vcf_file_path" entries
    FileReader mappingReader = new FileReader(args[argsLoaderFileIdx + 1]);
    JSONParser parser = new JSONParser();
    LinkedHashMap streamNameToFileName = (LinkedHashMap) parser.parse(mappingReader, new LinkedHashFactory());
    ArrayList<VCFFileStreamInfo> streamInfoVec = new ArrayList<VCFFileStreamInfo>();
    long rowIdx = 0;
    for (Object currObj : streamNameToFileName.entrySet()) {
        Map.Entry<String, String> entry = (Map.Entry<String, String>) currObj;
        VCFFileStreamInfo currInfo = new VCFFileStreamInfo(entry.getValue(), loaderJSONFile, rank,
                useMultiChromosomeIterator);

        /** The following 2 lines are not mandatory - use initializeSampleInfoMapFromHeader()
         * iff you know for sure that sample names in the VCF header are globally unique
         * across all streams/files. If not, you have 2 options:
         *   (a) specify your own mapping from sample index in the header to SampleInfo object
         *       (unique_name, rowIdx) OR
         *   (b) specify the mapping in the callset_mapping_file (JSON) and pass null to
         *       addSortedVariantContextIterator()
         */
        LinkedHashMap<Integer, GenomicsDBImporter.SampleInfo> sampleIndexToInfo = new LinkedHashMap<Integer, GenomicsDBImporter.SampleInfo>();
        rowIdx = GenomicsDBImporter.initializeSampleInfoMapFromHeader(sampleIndexToInfo, currInfo.mVCFHeader,
                rowIdx);
        int streamIdx = -1;
        if (args[0].equals("-iterators"))
            streamIdx = loader.addSortedVariantContextIterator(entry.getKey(), currInfo.mVCFHeader,
                    currInfo.mIterator, bufferCapacity, VariantContextWriterBuilder.OutputType.BCF_STREAM,
                    sampleIndexToInfo); //pass sorted VC iterators
        else
            //use buffers - VCs will be provided by caller
            streamIdx = loader.addBufferStream(entry.getKey(), currInfo.mVCFHeader, bufferCapacity,
                    VariantContextWriterBuilder.OutputType.BCF_STREAM, sampleIndexToInfo);
        currInfo.mStreamIdx = streamIdx;
        streamInfoVec.add(currInfo);
    }
    if (args[0].equals("-iterators")) {
        //Much simpler interface if using Iterator<VariantContext>
        loader.importBatch();
        assert loader.isDone();
    } else {
        //Must be called after all iterators/streams added - no more iterators/streams
        // can be added once this function is called
        loader.setupGenomicsDBImporter();
        //Counts and tracks buffer streams for which new data must be supplied
        //Initialized to all the buffer streams
        int numExhaustedBufferStreams = streamInfoVec.size();
        int[] exhaustedBufferStreamIdxs = new int[numExhaustedBufferStreams];
        for (int i = 0; i < numExhaustedBufferStreams; ++i)
            exhaustedBufferStreamIdxs[i] = i;
        while (!loader.isDone()) {
            //Add data for streams that were exhausted in the previous round
            for (int i = 0; i < numExhaustedBufferStreams; ++i) {
                VCFFileStreamInfo currInfo = streamInfoVec.get(exhaustedBufferStreamIdxs[i]);
                boolean added = true;
                while (added && (currInfo.mIterator.hasNext() || currInfo.mNextVC != null)) {
                    if (currInfo.mNextVC != null)
                        added = loader.add(currInfo.mNextVC, currInfo.mStreamIdx);
                    if (added)
                        if (currInfo.mIterator.hasNext())
                            currInfo.mNextVC = currInfo.mIterator.next();
                        else
                            currInfo.mNextVC = null;
                }
            }
            loader.importBatch();
            numExhaustedBufferStreams = (int) loader.getNumExhaustedBufferStreams();
            for (int i = 0; i < numExhaustedBufferStreams; ++i)
                exhaustedBufferStreamIdxs[i] = loader.getExhaustedBufferStreamIndex(i);
        }
    }
}

From source file:com.alertlogic.aws.kinesis.test1.StreamWriter.java

/**
 * Start a number of threads and send randomly generated {@link HttpReferrerPair}s to a Kinesis Stream until the
 * program is terminated./* w  w w .j  a v  a2 s .com*/
 *
 * @param args Expecting 3 arguments: A numeric value indicating the number of threads to use to send
 *        data to Kinesis and the name of the stream to send records to, and the AWS region in which these resources
 *        exist or should be created.
 * @throws InterruptedException If this application is interrupted while sending records to Kinesis.
 */
public static void main(String[] args) throws InterruptedException {
    if (args.length != 3) {
        System.err.println(
                "Usage: " + StreamWriter.class.getSimpleName() + " <number of threads> <stream name> <region>");
        System.exit(1);
    }

    int numberOfThreads = Integer.parseInt(args[0]);
    String streamName = args[1];
    Region region = SampleUtils.parseRegion(args[2]);

    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration());
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig);
    kinesis.setRegion(region);

    // The more resources we declare the higher write IOPS we need on our DynamoDB table.
    // We write a record for each resource every interval.
    // If interval = 500ms, resource count = 7 we need: (1000/500 * 7) = 14 write IOPS minimum.
    List<String> resources = new ArrayList<>();
    resources.add("/index.html");

    // These are the possible referrers to use when generating pairs
    List<String> referrers = new ArrayList<>();
    referrers.add("http://www.amazon.com");
    referrers.add("http://www.google.com");
    referrers.add("http://www.yahoo.com");
    referrers.add("http://www.bing.com");
    referrers.add("http://www.stackoverflow.com");
    referrers.add("http://www.reddit.com");

    HttpReferrerPairFactory pairFactory = new HttpReferrerPairFactory(resources, referrers);

    // Creates a stream to write to with 2 shards if it doesn't exist
    StreamUtils streamUtils = new StreamUtils(kinesis);
    streamUtils.createStreamIfNotExists(streamName, 2);
    LOG.info(String.format("%s stream is ready for use", streamName));

    final HttpReferrerKinesisPutter putter = new HttpReferrerKinesisPutter(pairFactory, kinesis, streamName);

    ExecutorService es = Executors.newCachedThreadPool();

    Runnable pairSender = new Runnable() {
        @Override
        public void run() {
            try {
                putter.sendPairsIndefinitely(DELAY_BETWEEN_RECORDS_IN_MILLIS, TimeUnit.MILLISECONDS);
            } catch (Exception ex) {
                LOG.warn(
                        "Thread encountered an error while sending records. Records will no longer be put by this thread.",
                        ex);
            }
        }
    };

    for (int i = 0; i < numberOfThreads; i++) {
        es.submit(pairSender);
    }

    LOG.info(String.format("Sending pairs with a %dms delay between records with %d thread(s).",
            DELAY_BETWEEN_RECORDS_IN_MILLIS, numberOfThreads));

    es.shutdown();
    es.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
}

From source file:com.amazonaws.services.kinesis.samples.datavis.HttpReferrerStreamWriter.java

/**
 * Start a number of threads and send randomly generated {@link HttpReferrerPair}s to a Kinesis Stream until the
 * program is terminated.//from w  w w. j  a  v a  2 s  .co  m
 *
 * @param args Expecting 3 arguments: A numeric value indicating the number of threads to use to send
 *        data to Kinesis and the name of the stream to send records to, and the AWS region in which these resources
 *        exist or should be created.
 * @throws InterruptedException If this application is interrupted while sending records to Kinesis.
 */
public static void main(String[] args) throws InterruptedException {
    if (args.length != 3) {
        System.err.println("Usage: " + HttpReferrerStreamWriter.class.getSimpleName()
                + " <number of threads> <stream name> <region>");
        System.exit(1);
    }

    int numberOfThreads = Integer.parseInt(args[0]);
    String streamName = args[1];
    Region region = SampleUtils.parseRegion(args[2]);

    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration());
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig);
    kinesis.setRegion(region);

    // The more resources we declare the higher write IOPS we need on our DynamoDB table.
    // We write a record for each resource every interval.
    // If interval = 500ms, resource count = 7 we need: (1000/500 * 7) = 14 write IOPS minimum.
    List<String> resources = new ArrayList<>();
    resources.add("/index.html");

    // These are the possible referrers to use when generating pairs
    List<String> referrers = new ArrayList<>();
    referrers.add("http://www.amazon.com");
    referrers.add("http://www.google.com");
    referrers.add("http://www.yahoo.com");
    referrers.add("http://www.bing.com");
    referrers.add("http://www.stackoverflow.com");
    referrers.add("http://www.reddit.com");

    HttpReferrerPairFactory pairFactory = new HttpReferrerPairFactory(resources, referrers);

    // Creates a stream to write to with 2 shards if it doesn't exist
    StreamUtils streamUtils = new StreamUtils(kinesis);
    streamUtils.createStreamIfNotExists(streamName, 2);
    LOG.info(String.format("%s stream is ready for use", streamName));

    final HttpReferrerKinesisPutter putter = new HttpReferrerKinesisPutter(pairFactory, kinesis, streamName);

    ExecutorService es = Executors.newCachedThreadPool();

    Runnable pairSender = new Runnable() {
        @Override
        public void run() {
            try {
                putter.sendPairsIndefinitely(DELAY_BETWEEN_RECORDS_IN_MILLIS, TimeUnit.MILLISECONDS);
            } catch (Exception ex) {
                LOG.warn(
                        "Thread encountered an error while sending records. Records will no longer be put by this thread.",
                        ex);
            }
        }
    };

    for (int i = 0; i < numberOfThreads; i++) {
        es.submit(pairSender);
    }

    LOG.info(String.format("Sending pairs with a %dms delay between records with %d thread(s).",
            DELAY_BETWEEN_RECORDS_IN_MILLIS, numberOfThreads));

    es.shutdown();
    es.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
}

From source file:AntColonyPDP.Environment.java

/**
 * Starts the {@link TaxiExample}./*from  ww w  . j a va 2s  . c o  m*/
 * @param args The first option may optionally indicate the end time of the
 *          simulation.
 */
public static void main(@Nullable String[] args) {
    final long endTime = args != null && args.length >= 1 ? Long.parseLong(args[0]) : Long.MAX_VALUE;

    run(TESTING, endTime, "gradient field", null /* new Display() */, null, null);
}

From source file:com.twitter.hraven.hadoopJobMonitor.jmx.WhiteList.java

public static void main(String[] args) throws Exception {
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName name = new ObjectName("com.twitter.hraven.hadoopJobMonitor.jmx:type=WhiteList");
    WhiteList.init("/tmp");
    WhiteList mbean = WhiteList.getInstance();
    mbs.registerMBean(mbean, name);/*  w  ww .  jav a  2 s  . co m*/
    System.out.println("Waiting forever...");
    Thread.sleep(Long.MAX_VALUE);
}

From source file:DIA_Umpire_To_Skyline.DIA_Umpire_To_Skyline.java

/**
 * @param args the command line arguments
 *//*w  w w  .java  2  s.co  m*/
public static void main(String[] args) throws FileNotFoundException, IOException, Exception {
    System.out.println(
            "=================================================================================================");
    System.out.println("DIA-Umpire_To_Skyline (version: " + UmpireInfo.GetInstance().Version + ")");
    if (args.length < 1) {
        System.out.println(
                "command format error, it should be like: java -jar -Xmx20G DIA_Umpire_To_Skyline.jar Path NoThreads");
        System.out.println("command : java -jar -Xmx20G DIA_Umpire_To_Skyline.jar Path [Option]\n");
        System.out.println("\nOptions");
        System.out.println("\t-t\tNo. of threads, Ex: -t4 (using four threads, default value)");
        System.out.println(
                "\t-cP\tPath of msconvert.exe for mzXML conversion, Ex: -cP (using four threads, default value)");
        return;
    }
    try {
        ConsoleLogger.SetConsoleLogger(Level.DEBUG);
        ConsoleLogger.SetFileLogger(Level.DEBUG,
                FilenameUtils.getFullPath(args[0]) + "diaumpire_to_skyline.log");
    } catch (Exception e) {
        System.out.println("Logger initialization failed");
    }

    Logger.getRootLogger().info("Path:" + args[0]);
    String msconvertpath = "C:/inetpub/tpp-bin/msconvert";

    String WorkFolder = args[0];
    int NoCPUs = 4;

    for (int i = 1; i < args.length; i++) {
        if (args[i].startsWith("-")) {
            if (args[i].startsWith("-cP")) {
                msconvertpath = args[i].substring(3);
                Logger.getRootLogger().info("MSConvert path: " + msconvertpath);
            }
            if (args[i].startsWith("-t")) {
                NoCPUs = Integer.parseInt(args[i].substring(2));
                Logger.getRootLogger().info("No. of threads: " + NoCPUs);
            }
        }
    }

    HashMap<String, File> AssignFiles = new HashMap<>();

    try {
        File folder = new File(WorkFolder);
        if (!folder.exists()) {
            Logger.getRootLogger().info("Path: " + folder.getAbsolutePath() + " cannot be found.");
        }
        for (final File fileEntry : folder.listFiles()) {
            if (fileEntry.isFile() && fileEntry.getAbsolutePath().toLowerCase().endsWith(".mzxml")
                    && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q1.mzxml")
                    && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q2.mzxml")
                    && !fileEntry.getAbsolutePath().toLowerCase().endsWith("q3.mzxml")) {
                AssignFiles.put(fileEntry.getAbsolutePath(), fileEntry);
            }
            if (fileEntry.isDirectory()) {
                for (final File fileEntry2 : fileEntry.listFiles()) {
                    if (fileEntry2.isFile() && fileEntry2.getAbsolutePath().toLowerCase().endsWith(".mzxml")
                            && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q1.mzxml")
                            && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q2.mzxml")
                            && !fileEntry2.getAbsolutePath().toLowerCase().endsWith("q3.mzxml")) {
                        AssignFiles.put(fileEntry2.getAbsolutePath(), fileEntry2);
                    }
                }
            }
        }

        Logger.getRootLogger().info("No. of files assigned :" + AssignFiles.size());
        for (File fileEntry : AssignFiles.values()) {
            Logger.getRootLogger().info(fileEntry.getAbsolutePath());
        }

        ExecutorService executorPool = null;
        executorPool = Executors.newFixedThreadPool(3);

        for (File fileEntry : AssignFiles.values()) {
            String mzXMLFile = fileEntry.getAbsolutePath();
            FileThread thread = new FileThread(mzXMLFile, NoCPUs, msconvertpath);
            executorPool.execute(thread);
        }
        executorPool.shutdown();
        try {
            executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        } catch (InterruptedException e) {
            Logger.getRootLogger().info("interrupted..");
        }
    } catch (Exception e) {
        Logger.getRootLogger().error(e.getMessage());
        throw e;
    }
    Logger.getRootLogger().info("Job done");
    Logger.getRootLogger().info(
            "=================================================================================================");

}

From source file:fr.tpt.s3.mcdag.bench.MainBench.java

public static void main(String[] args) throws IOException, InterruptedException {

    // Command line options
    Options options = new Options();

    Option input = new Option("i", "input", true, "MC-DAG XML models");
    input.setRequired(true);/*from   w w  w  .j ava 2  s .  c om*/
    input.setArgs(Option.UNLIMITED_VALUES);
    options.addOption(input);

    Option output = new Option("o", "output", true, "Folder where results have to be written.");
    output.setRequired(true);
    options.addOption(output);

    Option uUti = new Option("u", "utilization", true, "Utilization.");
    uUti.setRequired(true);
    options.addOption(uUti);

    Option output2 = new Option("ot", "output-total", true, "File where total results are being written");
    output2.setRequired(true);
    options.addOption(output2);

    Option oCores = new Option("c", "cores", true, "Cores given to the test");
    oCores.setRequired(true);
    options.addOption(oCores);

    Option oLvls = new Option("l", "levels", true, "Levels tested for the system");
    oLvls.setRequired(true);
    options.addOption(oLvls);

    Option jobs = new Option("j", "jobs", true, "Number of threads to be launched.");
    jobs.setRequired(false);
    options.addOption(jobs);

    Option debug = new Option("d", "debug", false, "Debug logs.");
    debug.setRequired(false);
    options.addOption(debug);

    /*
     * Parsing of the command line
     */
    CommandLineParser parser = new DefaultParser();
    HelpFormatter formatter = new HelpFormatter();
    CommandLine cmd;

    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e) {
        System.err.println(e.getMessage());
        formatter.printHelp("Benchmarks MultiDAG", options);
        System.exit(1);
        return;
    }

    String inputFilePath[] = cmd.getOptionValues("input");
    String outputFilePath = cmd.getOptionValue("output");
    String outputFilePathTotal = cmd.getOptionValue("output-total");
    double utilization = Double.parseDouble(cmd.getOptionValue("utilization"));
    boolean boolDebug = cmd.hasOption("debug");
    int nbLvls = Integer.parseInt(cmd.getOptionValue("levels"));
    int nbJobs = 1;
    int nbFiles = inputFilePath.length;

    if (cmd.hasOption("jobs"))
        nbJobs = Integer.parseInt(cmd.getOptionValue("jobs"));

    int nbCores = Integer.parseInt(cmd.getOptionValue("cores"));

    /*
     *  While files need to be allocated
     *  run the tests in the pool of threads
     */

    // For dual-criticality systems we call a specific thread
    if (nbLvls == 2) {

        System.out.println(">>>>>>>>>>>>>>>>>>>>> NB levels " + nbLvls);

        int i_files2 = 0;
        String outFile = outputFilePath.substring(0, outputFilePath.lastIndexOf('.'))
                .concat("-schedulability.csv");
        PrintWriter writer = new PrintWriter(outFile, "UTF-8");
        writer.println(
                "Thread; File; FSched (%); FPreempts; FAct; LSched (%); LPreempts; LAct; ESched (%); EPreempts; EAct; HSched(%); HPreempts; HAct; Utilization");
        writer.close();

        ExecutorService executor2 = Executors.newFixedThreadPool(nbJobs);
        while (i_files2 != nbFiles) {
            BenchThreadDualCriticality bt2 = new BenchThreadDualCriticality(inputFilePath[i_files2], outFile,
                    nbCores, boolDebug);

            executor2.execute(bt2);
            i_files2++;
        }

        executor2.shutdown();
        executor2.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);

        int fedTotal = 0;
        int laxTotal = 0;
        int edfTotal = 0;
        int hybridTotal = 0;
        int fedPreempts = 0;
        int laxPreempts = 0;
        int edfPreempts = 0;
        int hybridPreempts = 0;
        int fedActiv = 0;
        int laxActiv = 0;
        int edfActiv = 0;
        int hybridActiv = 0;
        // Read lines in file and do average
        int i = 0;
        File f = new File(outFile);
        @SuppressWarnings("resource")
        Scanner line = new Scanner(f);
        while (line.hasNextLine()) {
            String s = line.nextLine();
            if (i > 0) { // To skip the first line
                try (Scanner inLine = new Scanner(s).useDelimiter("; ")) {
                    int j = 0;

                    while (inLine.hasNext()) {
                        String val = inLine.next();
                        if (j == 2) {
                            fedTotal += Integer.parseInt(val);
                        } else if (j == 3) {
                            fedPreempts += Integer.parseInt(val);
                        } else if (j == 4) {
                            fedActiv += Integer.parseInt(val);
                        } else if (j == 5) {
                            laxTotal += Integer.parseInt(val);
                        } else if (j == 6) {
                            laxPreempts += Integer.parseInt(val);
                        } else if (j == 7) {
                            laxActiv += Integer.parseInt(val);
                        } else if (j == 8) {
                            edfTotal += Integer.parseInt(val);
                        } else if (j == 9) {
                            edfPreempts += Integer.parseInt(val);
                        } else if (j == 10) {
                            edfActiv += Integer.parseInt(val);
                        } else if (j == 11) {
                            hybridTotal += Integer.parseInt(val);
                        } else if (j == 12) {
                            hybridPreempts += Integer.parseInt(val);
                        } else if (j == 13) {
                            hybridActiv += Integer.parseInt(val);
                        }
                        j++;
                    }
                }
            }
            i++;
        }

        // Write percentage
        double fedPerc = (double) fedTotal / nbFiles;
        double laxPerc = (double) laxTotal / nbFiles;
        double edfPerc = (double) edfTotal / nbFiles;
        double hybridPerc = (double) hybridTotal / nbFiles;

        double fedPercPreempts = (double) fedPreempts / fedActiv;
        double laxPercPreempts = (double) laxPreempts / laxActiv;
        double edfPercPreempts = (double) edfPreempts / edfActiv;
        double hybridPercPreempts = (double) hybridPreempts / hybridActiv;

        Writer wOutput = new BufferedWriter(new FileWriter(outputFilePathTotal, true));
        wOutput.write(Thread.currentThread().getName() + "; " + utilization + "; " + fedPerc + "; "
                + fedPreempts + "; " + fedActiv + "; " + fedPercPreempts + "; " + laxPerc + "; " + laxPreempts
                + "; " + laxActiv + "; " + laxPercPreempts + "; " + edfPerc + "; " + edfPreempts + "; "
                + edfActiv + "; " + edfPercPreempts + "; " + hybridPerc + "; " + hybridPreempts + "; "
                + hybridActiv + "; " + hybridPercPreempts + "\n");
        wOutput.close();

    } else if (nbLvls > 2) {
        int i_files2 = 0;
        String outFile = outputFilePath.substring(0, outputFilePath.lastIndexOf('.'))
                .concat("-schedulability.csv");
        PrintWriter writer = new PrintWriter(outFile, "UTF-8");
        writer.println(
                "Thread; File; LSched (%); LPreempts; LAct; ESched (%); EPreempts; EAct; HSched(%); HPreempts; HAct; Utilization");
        writer.close();

        ExecutorService executor2 = Executors.newFixedThreadPool(nbJobs);
        while (i_files2 != nbFiles) {
            BenchThreadNLevels bt2 = new BenchThreadNLevels(inputFilePath[i_files2], outFile, nbCores,
                    boolDebug);

            executor2.execute(bt2);
            i_files2++;
        }

        executor2.shutdown();
        executor2.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);

        int laxTotal = 0;
        int edfTotal = 0;
        int hybridTotal = 0;
        int laxPreempts = 0;
        int edfPreempts = 0;
        int hybridPreempts = 0;
        int laxActiv = 0;
        int edfActiv = 0;
        int hybridActiv = 0;
        // Read lines in file and do average
        int i = 0;
        File f = new File(outFile);
        @SuppressWarnings("resource")
        Scanner line = new Scanner(f);
        while (line.hasNextLine()) {
            String s = line.nextLine();
            if (i > 0) { // To skip the first line
                try (Scanner inLine = new Scanner(s).useDelimiter("; ")) {
                    int j = 0;

                    while (inLine.hasNext()) {
                        String val = inLine.next();
                        if (j == 2) {
                            laxTotal += Integer.parseInt(val);
                        } else if (j == 3) {
                            laxPreempts += Integer.parseInt(val);
                        } else if (j == 4) {
                            laxActiv += Integer.parseInt(val);
                        } else if (j == 5) {
                            edfTotal += Integer.parseInt(val);
                        } else if (j == 6) {
                            edfPreempts += Integer.parseInt(val);
                        } else if (j == 7) {
                            edfActiv += Integer.parseInt(val);
                        } else if (j == 8) {
                            hybridTotal += Integer.parseInt(val);
                        } else if (j == 9) {
                            hybridPreempts += Integer.parseInt(val);
                        } else if (j == 10) {
                            hybridActiv += Integer.parseInt(val);
                        }
                        j++;
                    }
                }
            }
            i++;
        }

        // Write percentage
        double laxPerc = (double) laxTotal / nbFiles;
        double edfPerc = (double) edfTotal / nbFiles;
        double hybridPerc = (double) hybridTotal / nbFiles;

        double laxPercPreempts = (double) laxPreempts / laxActiv;
        double edfPercPreempts = (double) edfPreempts / edfActiv;
        double hybridPercPreempts = (double) hybridPreempts / hybridActiv;

        Writer wOutput = new BufferedWriter(new FileWriter(outputFilePathTotal, true));
        wOutput.write(Thread.currentThread().getName() + "; " + utilization + "; " + laxPerc + "; "
                + laxPreempts + "; " + laxActiv + "; " + laxPercPreempts + "; " + edfPerc + "; " + edfPreempts
                + "; " + edfActiv + "; " + edfPercPreempts + "; " + hybridPerc + "; " + hybridPreempts + "; "
                + hybridActiv + "; " + hybridPercPreempts + "\n");
        wOutput.close();

    } else {
        System.err.println("Wrong number of levels");
        System.exit(-1);
    }

    System.out.println("[BENCH Main] Done benchmarking U = " + utilization + " Levels " + nbLvls);
}

From source file:io.anserini.index.IndexTweets.java

@SuppressWarnings("static-access")
public static void main(String[] args) throws Exception {
    Options options = new Options();

    options.addOption(new Option(HELP_OPTION, "show help"));
    options.addOption(new Option(OPTIMIZE_OPTION, "merge indexes into a single segment"));
    options.addOption(new Option(STORE_TERM_VECTORS_OPTION, "store term vectors"));

    options.addOption(OptionBuilder.withArgName("dir").hasArg().withDescription("source collection directory")
            .create(COLLECTION_OPTION));
    options.addOption(/*  w  w  w  . j a va  2  s .  c o m*/
            OptionBuilder.withArgName("dir").hasArg().withDescription("index location").create(INDEX_OPTION));
    options.addOption(OptionBuilder.withArgName("file").hasArg().withDescription("file with deleted tweetids")
            .create(DELETES_OPTION));
    options.addOption(OptionBuilder.withArgName("id").hasArg().withDescription("max id").create(MAX_ID_OPTION));

    CommandLine cmdline = null;
    CommandLineParser parser = new GnuParser();
    try {
        cmdline = parser.parse(options, args);
    } catch (ParseException exp) {
        System.err.println("Error parsing command line: " + exp.getMessage());
        System.exit(-1);
    }

    if (cmdline.hasOption(HELP_OPTION) || !cmdline.hasOption(COLLECTION_OPTION)
            || !cmdline.hasOption(INDEX_OPTION)) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp(IndexTweets.class.getName(), options);
        System.exit(-1);
    }

    String collectionPath = cmdline.getOptionValue(COLLECTION_OPTION);
    String indexPath = cmdline.getOptionValue(INDEX_OPTION);

    final FieldType textOptions = new FieldType();
    textOptions.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
    textOptions.setStored(true);
    textOptions.setTokenized(true);
    if (cmdline.hasOption(STORE_TERM_VECTORS_OPTION)) {
        textOptions.setStoreTermVectors(true);
    }

    LOG.info("collection: " + collectionPath);
    LOG.info("index: " + indexPath);

    LongOpenHashSet deletes = null;
    if (cmdline.hasOption(DELETES_OPTION)) {
        deletes = new LongOpenHashSet();
        File deletesFile = new File(cmdline.getOptionValue(DELETES_OPTION));
        if (!deletesFile.exists()) {
            System.err.println("Error: " + deletesFile + " does not exist!");
            System.exit(-1);
        }
        LOG.info("Reading deletes from " + deletesFile);

        FileInputStream fin = new FileInputStream(deletesFile);
        byte[] ignoreBytes = new byte[2];
        fin.read(ignoreBytes); // "B", "Z" bytes from commandline tools
        BufferedReader br = new BufferedReader(new InputStreamReader(new CBZip2InputStream(fin)));

        String s;
        while ((s = br.readLine()) != null) {
            if (s.contains("\t")) {
                deletes.add(Long.parseLong(s.split("\t")[0]));
            } else {
                deletes.add(Long.parseLong(s));
            }
        }
        br.close();
        fin.close();
        LOG.info("Read " + deletes.size() + " tweetids from deletes file.");
    }

    long maxId = Long.MAX_VALUE;
    if (cmdline.hasOption(MAX_ID_OPTION)) {
        maxId = Long.parseLong(cmdline.getOptionValue(MAX_ID_OPTION));
        LOG.info("index: " + maxId);
    }

    long startTime = System.currentTimeMillis();
    File file = new File(collectionPath);
    if (!file.exists()) {
        System.err.println("Error: " + file + " does not exist!");
        System.exit(-1);
    }

    StatusStream stream = new JsonStatusCorpusReader(file);

    Directory dir = FSDirectory.open(Paths.get(indexPath));
    final IndexWriterConfig config = new IndexWriterConfig(ANALYZER);

    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE);

    IndexWriter writer = new IndexWriter(dir, config);
    int cnt = 0;
    Status status;
    try {
        while ((status = stream.next()) != null) {
            if (status.getText() == null) {
                continue;
            }

            // Skip deletes tweetids.
            if (deletes != null && deletes.contains(status.getId())) {
                continue;
            }

            if (status.getId() > maxId) {
                continue;
            }

            cnt++;
            Document doc = new Document();
            doc.add(new LongPoint(StatusField.ID.name, status.getId()));
            doc.add(new StoredField(StatusField.ID.name, status.getId()));
            doc.add(new LongPoint(StatusField.EPOCH.name, status.getEpoch()));
            doc.add(new StoredField(StatusField.EPOCH.name, status.getEpoch()));
            doc.add(new TextField(StatusField.SCREEN_NAME.name, status.getScreenname(), Store.YES));

            doc.add(new Field(StatusField.TEXT.name, status.getText(), textOptions));

            doc.add(new IntPoint(StatusField.FRIENDS_COUNT.name, status.getFollowersCount()));
            doc.add(new StoredField(StatusField.FRIENDS_COUNT.name, status.getFollowersCount()));
            doc.add(new IntPoint(StatusField.FOLLOWERS_COUNT.name, status.getFriendsCount()));
            doc.add(new StoredField(StatusField.FOLLOWERS_COUNT.name, status.getFriendsCount()));
            doc.add(new IntPoint(StatusField.STATUSES_COUNT.name, status.getStatusesCount()));
            doc.add(new StoredField(StatusField.STATUSES_COUNT.name, status.getStatusesCount()));

            long inReplyToStatusId = status.getInReplyToStatusId();
            if (inReplyToStatusId > 0) {
                doc.add(new LongPoint(StatusField.IN_REPLY_TO_STATUS_ID.name, inReplyToStatusId));
                doc.add(new StoredField(StatusField.IN_REPLY_TO_STATUS_ID.name, inReplyToStatusId));
                doc.add(new LongPoint(StatusField.IN_REPLY_TO_USER_ID.name, status.getInReplyToUserId()));
                doc.add(new StoredField(StatusField.IN_REPLY_TO_USER_ID.name, status.getInReplyToUserId()));
            }

            String lang = status.getLang();
            if (!lang.equals("unknown")) {
                doc.add(new TextField(StatusField.LANG.name, status.getLang(), Store.YES));
            }

            long retweetStatusId = status.getRetweetedStatusId();
            if (retweetStatusId > 0) {
                doc.add(new LongPoint(StatusField.RETWEETED_STATUS_ID.name, retweetStatusId));
                doc.add(new StoredField(StatusField.RETWEETED_STATUS_ID.name, retweetStatusId));
                doc.add(new LongPoint(StatusField.RETWEETED_USER_ID.name, status.getRetweetedUserId()));
                doc.add(new StoredField(StatusField.RETWEETED_USER_ID.name, status.getRetweetedUserId()));
                doc.add(new IntPoint(StatusField.RETWEET_COUNT.name, status.getRetweetCount()));
                doc.add(new StoredField(StatusField.RETWEET_COUNT.name, status.getRetweetCount()));
                if (status.getRetweetCount() < 0 || status.getRetweetedStatusId() < 0) {
                    LOG.warn("Error parsing retweet fields of " + status.getId());
                }
            }

            writer.addDocument(doc);
            if (cnt % 100000 == 0) {
                LOG.info(cnt + " statuses indexed");
            }
        }

        LOG.info(String.format("Total of %s statuses added", cnt));

        if (cmdline.hasOption(OPTIMIZE_OPTION)) {
            LOG.info("Merging segments...");
            writer.forceMerge(1);
            LOG.info("Done!");
        }

        LOG.info("Total elapsed time: " + (System.currentTimeMillis() - startTime) + "ms");
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        writer.close();
        dir.close();
        stream.close();
    }
}

From source file:cc.twittertools.index.IndexStatuses.java

@SuppressWarnings("static-access")
public static void main(String[] args) throws Exception {
    Options options = new Options();

    options.addOption(new Option(HELP_OPTION, "show help"));
    options.addOption(new Option(OPTIMIZE_OPTION, "merge indexes into a single segment"));
    options.addOption(new Option(STORE_TERM_VECTORS_OPTION, "store term vectors"));

    options.addOption(OptionBuilder.withArgName("dir").hasArg().withDescription("source collection directory")
            .create(COLLECTION_OPTION));
    options.addOption(/*from www .  j  a  v  a 2s  .  c  o  m*/
            OptionBuilder.withArgName("dir").hasArg().withDescription("index location").create(INDEX_OPTION));
    options.addOption(OptionBuilder.withArgName("file").hasArg().withDescription("file with deleted tweetids")
            .create(DELETES_OPTION));
    options.addOption(OptionBuilder.withArgName("id").hasArg().withDescription("max id").create(MAX_ID_OPTION));

    CommandLine cmdline = null;
    CommandLineParser parser = new GnuParser();
    try {
        cmdline = parser.parse(options, args);
    } catch (ParseException exp) {
        System.err.println("Error parsing command line: " + exp.getMessage());
        System.exit(-1);
    }

    if (cmdline.hasOption(HELP_OPTION) || !cmdline.hasOption(COLLECTION_OPTION)
            || !cmdline.hasOption(INDEX_OPTION)) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp(IndexStatuses.class.getName(), options);
        System.exit(-1);
    }

    String collectionPath = cmdline.getOptionValue(COLLECTION_OPTION);
    String indexPath = cmdline.getOptionValue(INDEX_OPTION);

    final FieldType textOptions = new FieldType();
    textOptions.setIndexed(true);
    textOptions.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
    textOptions.setStored(true);
    textOptions.setTokenized(true);
    if (cmdline.hasOption(STORE_TERM_VECTORS_OPTION)) {
        textOptions.setStoreTermVectors(true);
    }

    LOG.info("collection: " + collectionPath);
    LOG.info("index: " + indexPath);

    LongOpenHashSet deletes = null;
    if (cmdline.hasOption(DELETES_OPTION)) {
        deletes = new LongOpenHashSet();
        File deletesFile = new File(cmdline.getOptionValue(DELETES_OPTION));
        if (!deletesFile.exists()) {
            System.err.println("Error: " + deletesFile + " does not exist!");
            System.exit(-1);
        }
        LOG.info("Reading deletes from " + deletesFile);

        FileInputStream fin = new FileInputStream(deletesFile);
        byte[] ignoreBytes = new byte[2];
        fin.read(ignoreBytes); // "B", "Z" bytes from commandline tools
        BufferedReader br = new BufferedReader(new InputStreamReader(new CBZip2InputStream(fin)));

        String s;
        while ((s = br.readLine()) != null) {
            if (s.contains("\t")) {
                deletes.add(Long.parseLong(s.split("\t")[0]));
            } else {
                deletes.add(Long.parseLong(s));
            }
        }
        br.close();
        fin.close();
        LOG.info("Read " + deletes.size() + " tweetids from deletes file.");
    }

    long maxId = Long.MAX_VALUE;
    if (cmdline.hasOption(MAX_ID_OPTION)) {
        maxId = Long.parseLong(cmdline.getOptionValue(MAX_ID_OPTION));
        LOG.info("index: " + maxId);
    }

    long startTime = System.currentTimeMillis();
    File file = new File(collectionPath);
    if (!file.exists()) {
        System.err.println("Error: " + file + " does not exist!");
        System.exit(-1);
    }

    StatusStream stream = new JsonStatusCorpusReader(file);

    Directory dir = FSDirectory.open(new File(indexPath));
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, IndexStatuses.ANALYZER);
    config.setOpenMode(OpenMode.CREATE);

    IndexWriter writer = new IndexWriter(dir, config);
    int cnt = 0;
    Status status;
    try {
        while ((status = stream.next()) != null) {
            if (status.getText() == null) {
                continue;
            }

            // Skip deletes tweetids.
            if (deletes != null && deletes.contains(status.getId())) {
                continue;
            }

            if (status.getId() > maxId) {
                continue;
            }

            cnt++;
            Document doc = new Document();
            doc.add(new LongField(StatusField.ID.name, status.getId(), Field.Store.YES));
            doc.add(new LongField(StatusField.EPOCH.name, status.getEpoch(), Field.Store.YES));
            doc.add(new TextField(StatusField.SCREEN_NAME.name, status.getScreenname(), Store.YES));

            doc.add(new Field(StatusField.TEXT.name, status.getText(), textOptions));

            doc.add(new IntField(StatusField.FRIENDS_COUNT.name, status.getFollowersCount(), Store.YES));
            doc.add(new IntField(StatusField.FOLLOWERS_COUNT.name, status.getFriendsCount(), Store.YES));
            doc.add(new IntField(StatusField.STATUSES_COUNT.name, status.getStatusesCount(), Store.YES));

            long inReplyToStatusId = status.getInReplyToStatusId();
            if (inReplyToStatusId > 0) {
                doc.add(new LongField(StatusField.IN_REPLY_TO_STATUS_ID.name, inReplyToStatusId,
                        Field.Store.YES));
                doc.add(new LongField(StatusField.IN_REPLY_TO_USER_ID.name, status.getInReplyToUserId(),
                        Field.Store.YES));
            }

            String lang = status.getLang();
            if (!lang.equals("unknown")) {
                doc.add(new TextField(StatusField.LANG.name, status.getLang(), Store.YES));
            }

            long retweetStatusId = status.getRetweetedStatusId();
            if (retweetStatusId > 0) {
                doc.add(new LongField(StatusField.RETWEETED_STATUS_ID.name, retweetStatusId, Field.Store.YES));
                doc.add(new LongField(StatusField.RETWEETED_USER_ID.name, status.getRetweetedUserId(),
                        Field.Store.YES));
                doc.add(new IntField(StatusField.RETWEET_COUNT.name, status.getRetweetCount(), Store.YES));
                if (status.getRetweetCount() < 0 || status.getRetweetedStatusId() < 0) {
                    LOG.warn("Error parsing retweet fields of " + status.getId());
                }
            }

            writer.addDocument(doc);
            if (cnt % 100000 == 0) {
                LOG.info(cnt + " statuses indexed");
            }
        }

        LOG.info(String.format("Total of %s statuses added", cnt));

        if (cmdline.hasOption(OPTIMIZE_OPTION)) {
            LOG.info("Merging segments...");
            writer.forceMerge(1);
            LOG.info("Done!");
        }

        LOG.info("Total elapsed time: " + (System.currentTimeMillis() - startTime) + "ms");
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        writer.close();
        dir.close();
        stream.close();
    }
}