Example usage for java.io BufferedReader readLine

List of usage examples for java.io BufferedReader readLine

Introduction

In this page you can find the example usage for java.io BufferedReader readLine.

Prototype

public String readLine() throws IOException 

Source Link

Document

Reads a line of text.

Usage

From source file:com.icantrap.collections.dawg.Dawg.java

public static void main(String[] args) throws IOException {
    Dawg dawg = Dawg.load(Dawg.class.getResourceAsStream("/twl06.dat"));

    InputStreamReader isr = new InputStreamReader(System.in);
    BufferedReader reader = new BufferedReader(isr);

    StopWatch stopWatch = new StopWatch();

    while (true) {
        System.out.print("letters:  ");
        String letters = reader.readLine();
        System.out.print("pattern:  ");
        String pattern = reader.readLine();

        stopWatch.reset();// ww  w.  j  av a  2  s  .c om
        stopWatch.start();
        Result[] results = dawg.subwords(letters.toUpperCase(), pattern.toUpperCase());
        stopWatch.stop();

        if (results != null) {
            System.out.println();

            for (Result result : results) {
                StringBuilder message = new StringBuilder(result.word);
                if (result.wildcardPositions != null) {
                    message.append(" with wildcards at");
                    for (int position : result.wildcardPositions)
                        message.append(" ").append(position);
                }
                System.out.println(message.toString());
                System.out.println();
            }

            System.out.println("Found " + results.length + " matches in " + stopWatch.getTime() + " ms.");
        }

        System.out.println();
    }
}

From source file:edu.nyu.vida.data_polygamy.scalar_function_computation.Aggregation.java

/**
 * @param args/*from  ww  w.j  a v  a 2  s.  c o m*/
 */
@SuppressWarnings({ "deprecation" })
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

    Options options = new Options();

    Option forceOption = new Option("f", "force", false,
            "force the computation of the aggregate functions " + "even if files already exist");
    forceOption.setRequired(false);
    options.addOption(forceOption);

    Option gOption = new Option("g", "group", true, "set group of datasets for which the aggregate functions"
            + " will be computed, followed by their temporal and spatial attribute indices");
    gOption.setRequired(true);
    gOption.setArgName("GROUP");
    gOption.setArgs(Option.UNLIMITED_VALUES);
    options.addOption(gOption);

    Option machineOption = new Option("m", "machine", true, "machine identifier");
    machineOption.setRequired(true);
    machineOption.setArgName("MACHINE");
    machineOption.setArgs(1);
    options.addOption(machineOption);

    Option nodesOption = new Option("n", "nodes", true, "number of nodes");
    nodesOption.setRequired(true);
    nodesOption.setArgName("NODES");
    nodesOption.setArgs(1);
    options.addOption(nodesOption);

    Option s3Option = new Option("s3", "s3", false, "data on Amazon S3");
    s3Option.setRequired(false);
    options.addOption(s3Option);

    Option awsAccessKeyIdOption = new Option("aws_id", "aws-id", true,
            "aws access key id; " + "this is required if the execution is on aws");
    awsAccessKeyIdOption.setRequired(false);
    awsAccessKeyIdOption.setArgName("AWS-ACCESS-KEY-ID");
    awsAccessKeyIdOption.setArgs(1);
    options.addOption(awsAccessKeyIdOption);

    Option awsSecretAccessKeyOption = new Option("aws_key", "aws-id", true,
            "aws secrect access key; " + "this is required if the execution is on aws");
    awsSecretAccessKeyOption.setRequired(false);
    awsSecretAccessKeyOption.setArgName("AWS-SECRET-ACCESS-KEY");
    awsSecretAccessKeyOption.setArgs(1);
    options.addOption(awsSecretAccessKeyOption);

    Option bucketOption = new Option("b", "s3-bucket", true,
            "bucket on s3; " + "this is required if the execution is on aws");
    bucketOption.setRequired(false);
    bucketOption.setArgName("S3-BUCKET");
    bucketOption.setArgs(1);
    options.addOption(bucketOption);

    Option helpOption = new Option("h", "help", false, "display this message");
    helpOption.setRequired(false);
    options.addOption(helpOption);

    HelpFormatter formatter = new HelpFormatter();
    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;

    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e) {
        formatter.printHelp("hadoop jar data-polygamy.jar "
                + "edu.nyu.vida.data_polygamy.scalar_function_computation.Aggregation", options, true);
        System.exit(0);
    }

    if (cmd.hasOption("h")) {
        formatter.printHelp("hadoop jar data-polygamy.jar "
                + "edu.nyu.vida.data_polygamy.scalar_function_computation.Aggregation", options, true);
        System.exit(0);
    }

    boolean s3 = cmd.hasOption("s3");
    String s3bucket = "";
    String awsAccessKeyId = "";
    String awsSecretAccessKey = "";

    if (s3) {
        if ((!cmd.hasOption("aws_id")) || (!cmd.hasOption("aws_key")) || (!cmd.hasOption("b"))) {
            System.out.println(
                    "Arguments 'aws_id', 'aws_key', and 'b'" + " are mandatory if execution is on AWS.");
            formatter.printHelp(
                    "hadoop jar data-polygamy.jar "
                            + "edu.nyu.vida.data_polygamy.scalar_function_computation.Aggregation",
                    options, true);
            System.exit(0);
        }
        s3bucket = cmd.getOptionValue("b");
        awsAccessKeyId = cmd.getOptionValue("aws_id");
        awsSecretAccessKey = cmd.getOptionValue("aws_key");
    }

    boolean snappyCompression = false;
    boolean bzip2Compression = false;
    String machine = cmd.getOptionValue("m");
    int nbNodes = Integer.parseInt(cmd.getOptionValue("n"));

    Configuration s3conf = new Configuration();
    if (s3) {
        s3conf.set("fs.s3.awsAccessKeyId", awsAccessKeyId);
        s3conf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey);
        s3conf.set("bucket", s3bucket);
    }

    String datasetNames = "";
    String datasetIds = "";
    String preProcessingDatasets = "";

    ArrayList<String> shortDataset = new ArrayList<String>();
    ArrayList<String> shortDatasetAggregation = new ArrayList<String>();
    HashMap<String, String> datasetTempAtt = new HashMap<String, String>();
    HashMap<String, String> datasetSpatialAtt = new HashMap<String, String>();
    HashMap<String, String> preProcessingDataset = new HashMap<String, String>();
    HashMap<String, String> datasetId = new HashMap<String, String>();

    boolean removeExistingFiles = cmd.hasOption("f");
    String[] datasetArgs = cmd.getOptionValues("g");

    for (int i = 0; i < datasetArgs.length; i += 3) {
        String dataset = datasetArgs[i];

        // getting pre-processing
        String tempPreProcessing = FrameworkUtils.searchPreProcessing(dataset, s3conf, s3);
        if (tempPreProcessing == null) {
            System.out.println("No pre-processing available for " + dataset);
            continue;
        }
        preProcessingDataset.put(dataset, tempPreProcessing);

        shortDataset.add(dataset);
        datasetTempAtt.put(dataset, ((datasetArgs[i + 1] == "null") ? null : datasetArgs[i + 1]));
        datasetSpatialAtt.put(dataset, ((datasetArgs[i + 2] == "null") ? null : datasetArgs[i + 2]));

        datasetId.put(dataset, null);
    }

    if (shortDataset.size() == 0) {
        System.out.println("No datasets to process.");
        System.exit(0);
    }

    // getting dataset id

    Path path = null;
    FileSystem fs = null;

    if (s3) {
        path = new Path(s3bucket + FrameworkUtils.datasetsIndexDir);
        fs = FileSystem.get(path.toUri(), s3conf);
    } else {
        fs = FileSystem.get(new Configuration());
        path = new Path(fs.getHomeDirectory() + "/" + FrameworkUtils.datasetsIndexDir);
    }
    BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(path)));
    String line = br.readLine();
    while (line != null) {
        String[] dt = line.split("\t");
        if (datasetId.containsKey(dt[0])) {
            datasetId.put(dt[0], dt[1]);
            datasetNames += dt[0] + ",";
            datasetIds += dt[1] + ",";
        }
        line = br.readLine();
    }
    br.close();
    if (s3)
        fs.close();

    datasetNames = datasetNames.substring(0, datasetNames.length() - 1);
    datasetIds = datasetIds.substring(0, datasetIds.length() - 1);
    Iterator<String> it = shortDataset.iterator();
    while (it.hasNext()) {
        String dataset = it.next();
        if (datasetId.get(dataset) == null) {
            System.out.println("No dataset id for " + dataset);
            System.exit(0);
        }
    }

    FrameworkUtils.createDir(s3bucket + FrameworkUtils.aggregatesDir, s3conf, s3);

    // getting smallest resolution

    HashMap<String, String> tempResMap = new HashMap<String, String>();
    HashMap<String, String> spatialResMap = new HashMap<String, String>();

    HashMap<String, String> datasetTemporalStrMap = new HashMap<String, String>();
    HashMap<String, String> datasetSpatialStrMap = new HashMap<String, String>();

    HashSet<String> input = new HashSet<String>();

    for (String dataset : shortDataset) {

        String[] datasetArray = preProcessingDataset.get(dataset).split("-");

        String datasetTemporalStr = datasetArray[datasetArray.length - 2];
        int datasetTemporal = utils.temporalResolution(datasetTemporalStr);

        String datasetSpatialStr = datasetArray[datasetArray.length - 1];
        int datasetSpatial = utils.spatialResolution(datasetSpatialStr);

        // finding all possible resolutions

        String[] temporalResolutions = FrameworkUtils.getAggTempResolutions(datasetTemporal);
        String[] spatialResolutions = FrameworkUtils.getAggSpatialResolutions(datasetSpatial);

        String temporalResolution = "";
        String spatialResolution = "";

        String tempRes = "";
        String spatialRes = "";

        boolean dataAdded = false;

        for (int i = 0; i < temporalResolutions.length; i++) {
            for (int j = 0; j < spatialResolutions.length; j++) {

                temporalResolution = temporalResolutions[i];
                spatialResolution = spatialResolutions[j];

                String aggregatesOutputFileName = s3bucket + FrameworkUtils.aggregatesDir + "/" + dataset + "/";

                if (removeExistingFiles) {
                    FrameworkUtils.removeFile(aggregatesOutputFileName, s3conf, s3);
                }

                if (!FrameworkUtils.fileExists(aggregatesOutputFileName, s3conf, s3)) {

                    dataAdded = true;

                    tempRes += temporalResolution + "-";
                    spatialRes += spatialResolution + "-";
                }
            }
        }

        if (dataAdded) {
            input.add(s3bucket + FrameworkUtils.preProcessingDir + "/" + preProcessingDataset.get(dataset));
            shortDatasetAggregation.add(dataset);

            tempResMap.put(dataset, tempRes.substring(0, tempRes.length() - 1));
            spatialResMap.put(dataset, spatialRes.substring(0, spatialRes.length() - 1));

            datasetTemporalStrMap.put(dataset, datasetTemporalStr);
            datasetSpatialStrMap.put(dataset, datasetSpatialStr);
        }
    }

    if (input.isEmpty()) {
        System.out.println("All the input datasets have aggregates.");
        System.out.println("Use -f in the beginning of the command line to force the computation.");
        System.exit(0);
    }

    it = input.iterator();
    while (it.hasNext()) {
        preProcessingDatasets += it.next() + ",";
    }

    Job aggJob = null;
    String aggregatesOutputDir = s3bucket + FrameworkUtils.aggregatesDir + "/tmp/";
    String jobName = "aggregates";

    FrameworkUtils.removeFile(aggregatesOutputDir, s3conf, s3);

    Configuration aggConf = new Configuration();
    Machine machineConf = new Machine(machine, nbNodes);

    aggConf.set("dataset-name", datasetNames);
    aggConf.set("dataset-id", datasetIds);

    for (int i = 0; i < shortDatasetAggregation.size(); i++) {
        String dataset = shortDatasetAggregation.get(i);
        String id = datasetId.get(dataset);
        aggConf.set("dataset-" + id + "-temporal-resolutions", tempResMap.get(dataset));
        aggConf.set("dataset-" + id + "-spatial-resolutions", spatialResMap.get(dataset));
        aggConf.set("dataset-" + id + "-temporal-att", datasetTempAtt.get(dataset));
        aggConf.set("dataset-" + id + "-spatial-att", datasetSpatialAtt.get(dataset));
        aggConf.set("dataset-" + id + "-temporal", datasetTemporalStrMap.get(dataset));
        aggConf.set("dataset-" + id + "-spatial", datasetSpatialStrMap.get(dataset));

        if (s3)
            aggConf.set("dataset-" + id,
                    s3bucket + FrameworkUtils.preProcessingDir + "/" + preProcessingDataset.get(dataset));
        else
            aggConf.set("dataset-" + id, FileSystem.get(new Configuration()).getHomeDirectory() + "/"
                    + FrameworkUtils.preProcessingDir + "/" + preProcessingDataset.get(dataset));
    }

    aggConf.set("mapreduce.tasktracker.map.tasks.maximum", String.valueOf(machineConf.getMaximumTasks()));
    aggConf.set("mapreduce.tasktracker.reduce.tasks.maximum", String.valueOf(machineConf.getMaximumTasks()));
    aggConf.set("mapreduce.jobtracker.maxtasks.perjob", "-1");
    aggConf.set("mapreduce.reduce.shuffle.parallelcopies", "20");
    aggConf.set("mapreduce.input.fileinputformat.split.minsize", "0");
    aggConf.set("mapreduce.task.io.sort.mb", "200");
    aggConf.set("mapreduce.task.io.sort.factor", "100");
    machineConf.setMachineConfiguration(aggConf);

    if (s3) {
        machineConf.setMachineConfiguration(aggConf);
        aggConf.set("fs.s3.awsAccessKeyId", awsAccessKeyId);
        aggConf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey);
    }

    if (snappyCompression) {
        aggConf.set("mapreduce.map.output.compress", "true");
        aggConf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec");
        //aggConf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec");
    }
    if (bzip2Compression) {
        aggConf.set("mapreduce.map.output.compress", "true");
        aggConf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec");
        //aggConf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec");
    }

    aggJob = new Job(aggConf);
    aggJob.setJobName(jobName);

    aggJob.setMapOutputKeyClass(SpatioTemporalWritable.class);
    aggJob.setMapOutputValueClass(AggregationArrayWritable.class);
    aggJob.setOutputKeyClass(SpatioTemporalWritable.class);
    aggJob.setOutputValueClass(FloatArrayWritable.class);
    //aggJob.setOutputKeyClass(Text.class);
    //aggJob.setOutputValueClass(Text.class);

    aggJob.setMapperClass(AggregationMapper.class);
    aggJob.setCombinerClass(AggregationCombiner.class);
    aggJob.setReducerClass(AggregationReducer.class);
    aggJob.setNumReduceTasks(machineConf.getNumberReduces());

    aggJob.setInputFormatClass(SequenceFileInputFormat.class);
    //aggJob.setOutputFormatClass(SequenceFileOutputFormat.class);
    LazyOutputFormat.setOutputFormatClass(aggJob, SequenceFileOutputFormat.class);
    //LazyOutputFormat.setOutputFormatClass(aggJob, TextOutputFormat.class);
    SequenceFileOutputFormat.setCompressOutput(aggJob, true);
    SequenceFileOutputFormat.setOutputCompressionType(aggJob, CompressionType.BLOCK);

    FileInputFormat.setInputDirRecursive(aggJob, true);
    FileInputFormat.setInputPaths(aggJob,
            preProcessingDatasets.substring(0, preProcessingDatasets.length() - 1));
    FileOutputFormat.setOutputPath(aggJob, new Path(aggregatesOutputDir));

    aggJob.setJarByClass(Aggregation.class);

    long start = System.currentTimeMillis();
    aggJob.submit();
    aggJob.waitForCompletion(true);
    System.out.println(jobName + "\t" + (System.currentTimeMillis() - start));

    // moving files to right place
    for (String dataset : shortDatasetAggregation) {
        String from = s3bucket + FrameworkUtils.aggregatesDir + "/tmp/" + dataset + "/";
        String to = s3bucket + FrameworkUtils.aggregatesDir + "/" + dataset + "/";
        FrameworkUtils.renameFile(from, to, s3conf, s3);
    }

}

From source file:com.kse.bigdata.main.Driver.java

public static void main(String[] args) throws Exception {
    /**********************************************************************************
     **    Merge the source files into one.                                          **
    /**    Should change the directories of each file before executing the program   **
    ***********************************************************************************/
    //        String inputFileDirectory = "/media/bk/??/BigData_Term_Project/Debug";
    //        String resultFileDirectory = "/media/bk/??/BigData_Term_Project/debug.csv";
    //        File resultFile = new File(resultFileDirectory);
    //        if(!resultFile.exists())
    //            new SourceFileMerger(inputFileDirectory, resultFileDirectory).mergeFiles();

    /**********************************************************************************
     * Hadoop Operation.//from w  w  w  . j a v  a 2 s .c o m
     * Befort Start, Check the Length of Sequence We Want to Predict.
     **********************************************************************************/

    Configuration conf = new Configuration();

    //Enable MapReduce intermediate compression as Snappy
    conf.setBoolean("mapred.compress.map.output", true);
    conf.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.SnappyCodec");

    //Enable Profiling
    //conf.setBoolean("mapred.task.profile", true);

    String testPath = null;
    String inputPath = null;
    String outputPath = null;

    int sampleSize = 1;
    ArrayList<String> results = new ArrayList<String>();

    for (int index = 0; index < args.length; index++) {

        /*
         * Mandatory command
         */
        //Extract input path string from command line.
        if (args[index].equals("-in"))
            inputPath = args[index + 1];

        //Extract output path string from command line.
        if (args[index].equals("-out"))
            outputPath = args[index + 1];

        //Extract test data path string from command line.
        if (args[index].equals("-test"))
            testPath = args[index + 1];

        /*
         * Optional command
         */
        //Extract a number of neighbors.
        if (args[index].equals("-nn"))
            conf.setInt(Reduce.NUMBER_OF_NEAREAST_NEIGHBOR, Integer.parseInt(args[index + 1]));

        //Whether job uses normalization or not.
        if (args[index].equals("-norm"))
            conf.setBoolean(Map.NORMALIZATION, true);

        //Extract the number of sample size to test.
        if (args[index].equals("-s"))
            sampleSize = Integer.valueOf(args[index + 1]);

        //Whether job uses mean or median
        //[Default : mean]
        if (args[index].equals("-med"))
            conf.setBoolean(Reduce.MEDIAN, true);
    }

    String outputFileName = "part-r-00000";
    SequenceSampler sampler = new SequenceSampler(testPath, sampleSize);
    LinkedList<Sequence> testSequences = sampler.getRandomSample();

    //        Test Sequence
    //        String testSeqString = "13.591-13.674-13.778-13.892-13.958-14.049-14.153-14.185-14.169-14.092-13.905-13.702-13.438-13.187-13.0-12.914-12.868-12.766-12.62-12.433-12.279-12.142-12.063-12.025-100";
    //        Sequence testSeq = new Sequence(testSeqString);
    //        LinkedList<Sequence> testSequences = new LinkedList<>();
    //        testSequences.add(testSeq);

    for (Sequence seq : testSequences) {

        /*
         ********************  Hadoop Launch ***********************
         */

        System.out.println(seq.getTailString());

        conf.set(Map.INPUT_SEQUENCE, seq.toString());

        Job job = new Job(conf);
        job.setJarByClass(Driver.class);
        job.setJobName("term-project-driver");

        job.setMapperClass(Map.class);
        job.setMapOutputKeyClass(NullWritable.class);
        job.setMapOutputValueClass(Text.class);

        //          Should think another way to implement the combiner class
        //          Current Implementation is not helpful to Job.
        //          job.setCombinerClass(Combiner.class);

        //Set 1 for number of reduce task for keeping 100 most neighbors in sorted set.
        job.setNumReduceTasks(1);
        job.setReducerClass(Reduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        FileInputFormat.setInputPaths(job, new Path(inputPath));
        FileOutputFormat.setOutputPath(job, new Path(outputPath));

        job.waitForCompletion(true);

        /*
         * if job finishes, get result of the job and store it in results(list).
         */
        try {
            FileSystem hdfs = FileSystem.get(new Configuration());
            BufferedReader fileReader = new BufferedReader(
                    new InputStreamReader(hdfs.open(new Path(outputPath + "/" + outputFileName))));

            String line;
            while ((line = fileReader.readLine()) != null) {
                results.add(seq.getSeqString() + " " + line);
            }

            fileReader.close();

            hdfs.delete(new Path(outputPath), true);
            hdfs.close();

        } catch (IOException e) {
            e.printStackTrace();
            System.exit(1);
        }
    }

    /*
     * if all jobs finish, store results of jobs to output/result.txt file.
     */
    String finalOutputPath = "output/result.csv";
    try {
        FileSystem hdfs = FileSystem.get(new Configuration());
        Path file = new Path(finalOutputPath);
        if (hdfs.exists(file)) {
            hdfs.delete(file, true);
        }

        OutputStream os = hdfs.create(file);
        PrintWriter printWriter = new PrintWriter(new OutputStreamWriter(os, "UTF-8"));

        //CSV File Header
        printWriter.println("Actual,Predicted,MER,MAE");
        printWriter.flush();

        for (String result : results) {
            String[] tokens = result.split("\\s+");

            printWriter.println(tokens[0] + "," + tokens[1] + "," + tokens[2] + "," + tokens[3]);
            printWriter.flush();
        }

        printWriter.close();
        hdfs.close();
    } catch (IOException e) {
        e.printStackTrace();
        System.exit(1);
    }

}

From source file:com.bluemarsh.jswat.console.Main.java

/**
 * Kicks off the application./*from   w ww  .j av  a2 s . c  om*/
 *
 * @param  args  the command line arguments.
 */
public static void main(String[] args) {
    //
    // An attempt was made early on to use the Console class in java.io,
    // but that is not designed to handle asynchronous output from
    // multiple threads, so we just use the usual System.out for output
    // and System.in for input. Note that automatic flushing is used to
    // ensure output is shown in a timely fashion.
    //

    //
    // Where console mode seems to work:
    // - bash
    // - emacs
    //
    // Where console mode does not seem to work:
    // - NetBeans: output from event listeners is never shown and the
    //   cursor sometimes lags behind the output
    //

    // Turn on flushing so printing the prompt will flush
    // all buffered output generated from other threads.
    PrintWriter output = new PrintWriter(System.out, true);

    // Make sure we have the JPDA classes.
    try {
        Bootstrap.virtualMachineManager();
    } catch (NoClassDefFoundError ncdfe) {
        output.println(NbBundle.getMessage(Main.class, "MSG_Main_NoJPDA"));
        System.exit(1);
    }

    // Ensure we can create the user directory by requesting the
    // platform service. Simply asking for it has the desired effect.
    PlatformProvider.getPlatformService();

    // Define the logging configuration.
    LogManager manager = LogManager.getLogManager();
    InputStream is = Main.class.getResourceAsStream("logging.properties");
    try {
        manager.readConfiguration(is);
    } catch (IOException ioe) {
        ioe.printStackTrace();
        System.exit(1);
    }

    // Print out some useful debugging information.
    logSystemDetails();

    // Add a shutdown hook to make sure we exit cleanly.
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {

        @Override
        public void run() {
            // Save the command aliases.
            CommandParser parser = CommandProvider.getCommandParser();
            parser.saveSettings();
            // Save the runtimes to persistent storage.
            RuntimeManager rm = RuntimeProvider.getRuntimeManager();
            rm.saveRuntimes();
            // Save the sessions to persistent storage and have them
            // close down in preparation to exit.
            SessionManager sm = SessionProvider.getSessionManager();
            sm.saveSessions(true);
        }
    }));

    // Initialize the command parser and load the aliases.
    CommandParser parser = CommandProvider.getCommandParser();
    parser.loadSettings();
    parser.setOutput(output);

    // Create an OutputAdapter to display debuggee output.
    OutputAdapter adapter = new OutputAdapter(output);
    SessionManager sessionMgr = SessionProvider.getSessionManager();
    sessionMgr.addSessionManagerListener(adapter);
    // Create a SessionWatcher to monitor the session status.
    SessionWatcher swatcher = new SessionWatcher();
    sessionMgr.addSessionManagerListener(swatcher);
    // Create a BreakpointWatcher to monitor the breakpoints.
    BreakpointWatcher bwatcher = new BreakpointWatcher();
    sessionMgr.addSessionManagerListener(bwatcher);

    // Add the watchers and adapters to the open sessions.
    Iterator<Session> iter = sessionMgr.iterateSessions();
    while (iter.hasNext()) {
        Session s = iter.next();
        s.addSessionListener(adapter);
        s.addSessionListener(swatcher);
    }

    // Find and run the RC file.
    try {
        runStartupFile(parser, output);
    } catch (IOException ioe) {
        logger.log(Level.SEVERE, null, ioe);
    }

    // Process command line arguments.
    try {
        processArguments(args);
    } catch (ParseException pe) {
        // Report the problem and keep going.
        System.err.println("Option parsing failed: " + pe.getMessage());
        logger.log(Level.SEVERE, null, pe);
    }

    // Display a helpful greeting.
    output.println(NbBundle.getMessage(Main.class, "MSG_Main_Welcome"));
    if (jdbEmulationMode) {
        output.println(NbBundle.getMessage(Main.class, "MSG_Main_Jdb_Emulation"));
    }

    // Enter the main loop of processing user input.
    BufferedReader input = new BufferedReader(new InputStreamReader(System.in));
    while (true) {
        // Keep the prompt format identical to jdb for compatibility
        // with emacs and other possible wrappers.
        output.print("> ");
        output.flush();
        try {
            String command = input.readLine();
            // A null value indicates end of stream.
            if (command != null) {
                performCommand(output, parser, command);
            }
            // Sleep briefly to give the event processing threads,
            // and the automatic output flushing, a chance to catch
            // up before printing the input prompt again.
            Thread.sleep(250);
        } catch (InterruptedException ie) {
            logger.log(Level.WARNING, null, ie);
        } catch (IOException ioe) {
            logger.log(Level.SEVERE, null, ioe);
            output.println(NbBundle.getMessage(Main.class, "ERR_Main_IOError", ioe));
        } catch (Exception x) {
            // Don't ever let an internal bug (e.g. in the command parser)
            // hork the console, as it really irritates people.
            logger.log(Level.SEVERE, null, x);
            output.println(NbBundle.getMessage(Main.class, "ERR_Main_Exception", x));
        }
    }
}

From source file:com.l2jfree.loginserver.tools.L2AccountManager.java

/**
 * Launches the interactive account manager.
 * //from  ww  w  .  ja  va2  s  .c o m
 * @param args ignored
 */
public static void main(String[] args) {
    // LOW rework this crap
    Util.printSection("Account Management");

    _log.info("Please choose:");
    //_log.info("list - list registered accounts");
    _log.info("reg - register a new account");
    _log.info("rem - remove a registered account");
    _log.info("prom - promote a registered account");
    _log.info("dem - demote a registered account");
    _log.info("ban - ban a registered account");
    _log.info("unban - unban a registered account");
    _log.info("quit - exit this application");

    BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
    L2AccountManager acm = new L2AccountManager();

    String line;
    try {
        while ((line = br.readLine()) != null) {
            line = line.trim();
            Connection con = null;
            switch (acm.getState()) {
            case USER_NAME:
                line = line.toLowerCase();
                try {
                    con = L2Database.getConnection();
                    PreparedStatement ps = con
                            .prepareStatement("SELECT superuser FROM account WHERE username LIKE ?");
                    ps.setString(1, line);
                    ResultSet rs = ps.executeQuery();
                    if (!rs.next()) {
                        acm.setUser(line);

                        _log.info("Desired password:");
                        acm.setState(ManagerState.PASSWORD);
                    } else {
                        _log.info("User name already in use.");
                        acm.setState(ManagerState.INITIAL_CHOICE);
                    }
                    rs.close();
                    ps.close();
                } catch (SQLException e) {
                    _log.error("Could not access database!", e);
                    acm.setState(ManagerState.INITIAL_CHOICE);
                } finally {
                    L2Database.close(con);
                }
                break;
            case PASSWORD:
                try {
                    MessageDigest sha = MessageDigest.getInstance("SHA");
                    byte[] pass = sha.digest(line.getBytes("US-ASCII"));
                    acm.setPass(HexUtil.bytesToHexString(pass));
                } catch (NoSuchAlgorithmException e) {
                    _log.fatal("SHA1 is not available!", e);
                    Shutdown.exit(TerminationStatus.ENVIRONMENT_MISSING_COMPONENT_OR_SERVICE);
                } catch (UnsupportedEncodingException e) {
                    _log.fatal("ASCII is not available!", e);
                    Shutdown.exit(TerminationStatus.ENVIRONMENT_MISSING_COMPONENT_OR_SERVICE);
                }
                _log.info("Super user: [y/n]");
                acm.setState(ManagerState.SUPERUSER);
                break;
            case SUPERUSER:
                try {
                    if (line.length() != 1)
                        throw new IllegalArgumentException("One char required.");
                    else if (line.charAt(0) == 'y')
                        acm.setSuper(true);
                    else if (line.charAt(0) == 'n')
                        acm.setSuper(false);
                    else
                        throw new IllegalArgumentException("Invalid choice.");

                    _log.info("Date of birth: [yyyy-mm-dd]");
                    acm.setState(ManagerState.DOB);
                } catch (IllegalArgumentException e) {
                    _log.info("[y/n]?");
                }
                break;
            case DOB:
                try {
                    Date d = Date.valueOf(line);
                    if (d.after(new Date(System.currentTimeMillis())))
                        throw new IllegalArgumentException("Future date specified.");
                    acm.setDob(d);

                    _log.info("Ban reason ID or nothing:");
                    acm.setState(ManagerState.SUSPENDED);
                } catch (IllegalArgumentException e) {
                    _log.info("[yyyy-mm-dd] in the past:");
                }
                break;
            case SUSPENDED:
                try {
                    if (line.length() > 0) {
                        int id = Integer.parseInt(line);
                        acm.setBan(L2BanReason.getById(id));
                    } else
                        acm.setBan(null);

                    try {
                        con = L2Database.getConnection();
                        PreparedStatement ps = con.prepareStatement(
                                "INSERT INTO account (username, password, superuser, birthDate, banReason) VALUES (?, ?, ?, ?, ?)");
                        ps.setString(1, acm.getUser());
                        ps.setString(2, acm.getPass());
                        ps.setBoolean(3, acm.isSuper());
                        ps.setDate(4, acm.getDob());
                        L2BanReason lbr = acm.getBan();
                        if (lbr == null)
                            ps.setNull(5, Types.INTEGER);
                        else
                            ps.setInt(5, lbr.getId());
                        ps.executeUpdate();
                        _log.info("Account " + acm.getUser() + " has been registered.");
                        ps.close();
                    } catch (SQLException e) {
                        _log.error("Could not register an account!", e);
                    } finally {
                        L2Database.close(con);
                    }
                    acm.setState(ManagerState.INITIAL_CHOICE);
                } catch (NumberFormatException e) {
                    _log.info("Ban reason ID or nothing:");
                }
                break;
            case REMOVE:
                acm.setUser(line.toLowerCase());
                try {
                    con = L2Database.getConnection();
                    PreparedStatement ps = con.prepareStatement("DELETE FROM account WHERE username LIKE ?");
                    ps.setString(1, acm.getUser());
                    int cnt = ps.executeUpdate();
                    if (cnt > 0)
                        _log.info("Account " + acm.getUser() + " has been removed.");
                    else
                        _log.info("Account " + acm.getUser() + " does not exist!");
                    ps.close();
                } catch (SQLException e) {
                    _log.error("Could not remove an account!", e);
                } finally {
                    L2Database.close(con);
                }
                acm.setState(ManagerState.INITIAL_CHOICE);
                break;
            case PROMOTE:
                acm.setUser(line.toLowerCase());
                try {
                    con = L2Database.getConnection();
                    PreparedStatement ps = con
                            .prepareStatement("UPDATE account SET superuser = ? WHERE username LIKE ?");
                    ps.setBoolean(1, true);
                    ps.setString(2, acm.getUser());
                    int cnt = ps.executeUpdate();
                    if (cnt > 0)
                        _log.info("Account " + acm.getUser() + " has been promoted.");
                    else
                        _log.info("Account " + acm.getUser() + " does not exist!");
                    ps.close();
                } catch (SQLException e) {
                    _log.error("Could not promote an account!", e);
                } finally {
                    L2Database.close(con);
                }
                acm.setState(ManagerState.INITIAL_CHOICE);
                break;
            case DEMOTE:
                acm.setUser(line.toLowerCase());
                try {
                    con = L2Database.getConnection();
                    PreparedStatement ps = con
                            .prepareStatement("UPDATE account SET superuser = ? WHERE username LIKE ?");
                    ps.setBoolean(1, false);
                    ps.setString(2, acm.getUser());
                    int cnt = ps.executeUpdate();
                    if (cnt > 0)
                        _log.info("Account " + acm.getUser() + " has been demoted.");
                    else
                        _log.info("Account " + acm.getUser() + " does not exist!");
                    ps.close();
                } catch (SQLException e) {
                    _log.error("Could not demote an account!", e);
                } finally {
                    L2Database.close(con);
                }
                acm.setState(ManagerState.INITIAL_CHOICE);
                break;
            case UNBAN:
                acm.setUser(line.toLowerCase());
                try {
                    con = L2Database.getConnection();
                    PreparedStatement ps = con
                            .prepareStatement("UPDATE account SET banReason = ? WHERE username LIKE ?");
                    ps.setNull(1, Types.INTEGER);
                    ps.setString(2, acm.getUser());
                    int cnt = ps.executeUpdate();
                    if (cnt > 0)
                        _log.info("Account " + acm.getUser() + " has been unbanned.");
                    else
                        _log.info("Account " + acm.getUser() + " does not exist!");
                    ps.close();
                } catch (SQLException e) {
                    _log.error("Could not demote an account!", e);
                } finally {
                    L2Database.close(con);
                }
                acm.setState(ManagerState.INITIAL_CHOICE);
                break;
            case BAN:
                line = line.toLowerCase();
                try {
                    con = L2Database.getConnection();
                    PreparedStatement ps = con
                            .prepareStatement("SELECT superuser FROM account WHERE username LIKE ?");
                    ps.setString(1, line);
                    ResultSet rs = ps.executeQuery();
                    if (rs.next()) {
                        acm.setUser(line);

                        _log.info("Ban reason ID:");
                        acm.setState(ManagerState.REASON);
                    } else {
                        _log.info("Account does not exist.");
                        acm.setState(ManagerState.INITIAL_CHOICE);
                    }
                    rs.close();
                    ps.close();
                } catch (SQLException e) {
                    _log.error("Could not access database!", e);
                    acm.setState(ManagerState.INITIAL_CHOICE);
                } finally {
                    L2Database.close(con);
                }
                break;
            case REASON:
                try {
                    int ban = Integer.parseInt(line);
                    con = L2Database.getConnection();
                    PreparedStatement ps = con
                            .prepareStatement("UPDATE account SET banReason = ? WHERE username LIKE ?");
                    ps.setInt(1, ban);
                    ps.setString(2, acm.getUser());
                    ps.executeUpdate();
                    _log.info("Account " + acm.getUser() + " has been banned.");
                    ps.close();
                } catch (NumberFormatException e) {
                    _log.info("Ban reason ID:");
                } catch (SQLException e) {
                    _log.error("Could not ban an account!", e);
                } finally {
                    L2Database.close(con);
                }
                acm.setState(ManagerState.INITIAL_CHOICE);
                break;
            default:
                line = line.toLowerCase();
                if (line.equals("reg")) {
                    _log.info("Desired user name:");
                    acm.setState(ManagerState.USER_NAME);
                } else if (line.equals("rem")) {
                    _log.info("User name:");
                    acm.setState(ManagerState.REMOVE);
                } else if (line.equals("prom")) {
                    _log.info("User name:");
                    acm.setState(ManagerState.PROMOTE);
                } else if (line.equals("dem")) {
                    _log.info("User name:");
                    acm.setState(ManagerState.DEMOTE);
                } else if (line.equals("unban")) {
                    _log.info("User name:");
                    acm.setState(ManagerState.UNBAN);
                } else if (line.equals("ban")) {
                    _log.info("User name:");
                    acm.setState(ManagerState.BAN);
                } else if (line.equals("quit"))
                    Shutdown.exit(TerminationStatus.MANUAL_SHUTDOWN);
                else
                    _log.info("Incorrect command.");
                break;
            }
        }
    } catch (IOException e) {
        _log.fatal("Could not process input!", e);
    } finally {
        IOUtils.closeQuietly(br);
    }
}

From source file:edu.cmu.lti.oaqa.knn4qa.apps.FilterTranTable.java

public static void main(String[] args) {
    Options options = new Options();

    options.addOption(INPUT_PARAM, null, true, INPUT_DESC);
    options.addOption(OUTPUT_PARAM, null, true, OUTPUT_DESC);
    options.addOption(CommonParams.MEM_FWD_INDEX_PARAM, null, true, CommonParams.MEM_FWD_INDEX_DESC);
    options.addOption(CommonParams.GIZA_ITER_QTY_PARAM, null, true, CommonParams.GIZA_ITER_QTY_PARAM);
    options.addOption(CommonParams.GIZA_ROOT_DIR_PARAM, null, true, CommonParams.GIZA_ROOT_DIR_PARAM);
    options.addOption(CommonParams.MIN_PROB_PARAM, null, true, CommonParams.MIN_PROB_DESC);
    options.addOption(CommonParams.MAX_WORD_QTY_PARAM, null, true, CommonParams.MAX_WORD_QTY_PARAM);

    CommandLineParser parser = new org.apache.commons.cli.GnuParser();

    try {/* w  w  w .j  a  v  a 2s .  c o  m*/
        CommandLine cmd = parser.parse(options, args);

        String outputFile = null;

        outputFile = cmd.getOptionValue(OUTPUT_PARAM);
        if (null == outputFile) {
            Usage("Specify 'A name of the output file'", options);
        }

        String gizaRootDir = cmd.getOptionValue(CommonParams.GIZA_ROOT_DIR_PARAM);
        if (null == gizaRootDir) {
            Usage("Specify '" + CommonParams.GIZA_ROOT_DIR_DESC + "'", options);
        }

        String gizaIterQty = cmd.getOptionValue(CommonParams.GIZA_ITER_QTY_PARAM);

        if (null == gizaIterQty) {
            Usage("Specify '" + CommonParams.GIZA_ITER_QTY_DESC + "'", options);
        }

        float minProb = 0;

        String tmpf = cmd.getOptionValue(CommonParams.MIN_PROB_PARAM);

        if (tmpf != null) {
            minProb = Float.parseFloat(tmpf);
        }

        int maxWordQty = Integer.MAX_VALUE;

        String tmpi = cmd.getOptionValue(CommonParams.MAX_WORD_QTY_PARAM);

        if (null != tmpi) {
            maxWordQty = Integer.parseInt(tmpi);
        }

        String memFwdIndxName = cmd.getOptionValue(CommonParams.MEM_FWD_INDEX_PARAM);
        if (null == memFwdIndxName) {
            Usage("Specify '" + CommonParams.MEM_FWD_INDEX_DESC + "'", options);
        }

        System.out.println("Filtering index: " + memFwdIndxName + " max # of frequent words: " + maxWordQty
                + " min. probability:" + minProb);

        VocabularyFilterAndRecoder filter = new FrequentIndexWordFilterAndRecoder(memFwdIndxName, maxWordQty);

        String srcVocFile = CompressUtils.findFileVariant(gizaRootDir + "/source.vcb");

        System.out.println("Source vocabulary file: " + srcVocFile);

        GizaVocabularyReader srcVoc = new GizaVocabularyReader(srcVocFile, filter);

        String dstVocFile = CompressUtils.findFileVariant(gizaRootDir + "/target.vcb");

        System.out.println("Target vocabulary file: " + dstVocFile);

        GizaVocabularyReader dstVoc = new GizaVocabularyReader(CompressUtils.findFileVariant(dstVocFile),
                filter);

        String inputFile = CompressUtils.findFileVariant(gizaRootDir + "/output.t1." + gizaIterQty);

        BufferedReader finp = new BufferedReader(
                new InputStreamReader(CompressUtils.createInputStream(inputFile)));

        BufferedWriter fout = new BufferedWriter(
                new OutputStreamWriter(CompressUtils.createOutputStream(outputFile)));

        try {
            String line;
            int prevSrcId = -1;
            int wordQty = 0;
            long addedQty = 0;
            long totalQty = 0;
            boolean isNotFiltered = false;

            for (totalQty = 0; (line = finp.readLine()) != null;) {
                ++totalQty;
                // Skip empty lines
                line = line.trim();
                if (line.isEmpty())
                    continue;

                GizaTranRec rec = new GizaTranRec(line);

                if (rec.mSrcId != prevSrcId) {
                    ++wordQty;
                }
                if (totalQty % REPORT_INTERVAL_QTY == 0) {
                    System.out.println(String.format(
                            "Processed %d lines (%d source word entries) from '%s', added %d lines", totalQty,
                            wordQty, inputFile, addedQty));
                }

                // isNotFiltered should be set after procOneWord
                if (rec.mSrcId != prevSrcId) {
                    if (rec.mSrcId == 0)
                        isNotFiltered = true;
                    else {
                        String wordSrc = srcVoc.getWord(rec.mSrcId);
                        isNotFiltered = filter == null || (wordSrc != null && filter.checkWord(wordSrc));
                    }
                }

                prevSrcId = rec.mSrcId;

                if (rec.mProb >= minProb && isNotFiltered) {
                    String wordDst = dstVoc.getWord(rec.mDstId);

                    if (filter == null || (wordDst != null && filter.checkWord(wordDst))) {
                        fout.write(String.format(rec.mSrcId + " " + rec.mDstId + " " + rec.mProb));
                        fout.newLine();
                        addedQty++;
                    }
                }
            }

            System.out.println(
                    String.format("Processed %d lines (%d source word entries) from '%s', added %d lines",
                            totalQty, wordQty, inputFile, addedQty));

        } finally {
            finp.close();
            fout.close();
        }
    } catch (ParseException e) {
        Usage("Cannot parse arguments", options);
    } catch (Exception e) {
        e.printStackTrace();
        System.err.println("Terminating due to an exception: " + e);
        System.exit(1);
    }
}

From source file:edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniques.java

/**
 * @param args//from  w  w w . j  ava 2  s.  c  om
 * @throws ParseException 
 */
@SuppressWarnings({ "deprecation" })
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

    Options options = new Options();

    Option forceOption = new Option("f", "force", false,
            "force the computation of the relationship " + "even if files already exist");
    forceOption.setRequired(false);
    options.addOption(forceOption);

    Option g1Option = new Option("g1", "first-group", true, "set first group of datasets");
    g1Option.setRequired(true);
    g1Option.setArgName("FIRST GROUP");
    g1Option.setArgs(Option.UNLIMITED_VALUES);
    options.addOption(g1Option);

    Option g2Option = new Option("g2", "second-group", true, "set second group of datasets");
    g2Option.setRequired(false);
    g2Option.setArgName("SECOND GROUP");
    g2Option.setArgs(Option.UNLIMITED_VALUES);
    options.addOption(g2Option);

    Option machineOption = new Option("m", "machine", true, "machine identifier");
    machineOption.setRequired(true);
    machineOption.setArgName("MACHINE");
    machineOption.setArgs(1);
    options.addOption(machineOption);

    Option nodesOption = new Option("n", "nodes", true, "number of nodes");
    nodesOption.setRequired(true);
    nodesOption.setArgName("NODES");
    nodesOption.setArgs(1);
    options.addOption(nodesOption);

    Option s3Option = new Option("s3", "s3", false, "data on Amazon S3");
    s3Option.setRequired(false);
    options.addOption(s3Option);

    Option awsAccessKeyIdOption = new Option("aws_id", "aws-id", true,
            "aws access key id; " + "this is required if the execution is on aws");
    awsAccessKeyIdOption.setRequired(false);
    awsAccessKeyIdOption.setArgName("AWS-ACCESS-KEY-ID");
    awsAccessKeyIdOption.setArgs(1);
    options.addOption(awsAccessKeyIdOption);

    Option awsSecretAccessKeyOption = new Option("aws_key", "aws-id", true,
            "aws secrect access key; " + "this is required if the execution is on aws");
    awsSecretAccessKeyOption.setRequired(false);
    awsSecretAccessKeyOption.setArgName("AWS-SECRET-ACCESS-KEY");
    awsSecretAccessKeyOption.setArgs(1);
    options.addOption(awsSecretAccessKeyOption);

    Option bucketOption = new Option("b", "s3-bucket", true,
            "bucket on s3; " + "this is required if the execution is on aws");
    bucketOption.setRequired(false);
    bucketOption.setArgName("S3-BUCKET");
    bucketOption.setArgs(1);
    options.addOption(bucketOption);

    Option helpOption = new Option("h", "help", false, "display this message");
    helpOption.setRequired(false);
    options.addOption(helpOption);

    HelpFormatter formatter = new HelpFormatter();
    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;

    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e) {
        formatter.printHelp(
                "hadoop jar data-polygamy.jar "
                        + "edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniques",
                options, true);
        System.exit(0);
    }

    if (cmd.hasOption("h")) {
        formatter.printHelp(
                "hadoop jar data-polygamy.jar "
                        + "edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniques",
                options, true);
        System.exit(0);
    }

    boolean s3 = cmd.hasOption("s3");
    String s3bucket = "";
    String awsAccessKeyId = "";
    String awsSecretAccessKey = "";

    if (s3) {
        if ((!cmd.hasOption("aws_id")) || (!cmd.hasOption("aws_key")) || (!cmd.hasOption("b"))) {
            System.out.println(
                    "Arguments 'aws_id', 'aws_key', and 'b'" + " are mandatory if execution is on AWS.");
            formatter.printHelp(
                    "hadoop jar data-polygamy.jar "
                            + "edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniques",
                    options, true);
            System.exit(0);
        }
        s3bucket = cmd.getOptionValue("b");
        awsAccessKeyId = cmd.getOptionValue("aws_id");
        awsSecretAccessKey = cmd.getOptionValue("aws_key");
    }

    boolean snappyCompression = false;
    boolean bzip2Compression = false;
    String machine = cmd.getOptionValue("m");
    int nbNodes = Integer.parseInt(cmd.getOptionValue("n"));

    Configuration s3conf = new Configuration();
    if (s3) {
        s3conf.set("fs.s3.awsAccessKeyId", awsAccessKeyId);
        s3conf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey);
        s3conf.set("bucket", s3bucket);
    }

    Path path = null;
    FileSystem fs = FileSystem.get(new Configuration());

    ArrayList<String> shortDataset = new ArrayList<String>();
    ArrayList<String> firstGroup = new ArrayList<String>();
    ArrayList<String> secondGroup = new ArrayList<String>();
    HashMap<String, String> datasetAgg = new HashMap<String, String>();

    boolean removeExistingFiles = cmd.hasOption("f");

    String[] firstGroupCmd = cmd.getOptionValues("g1");
    String[] secondGroupCmd = cmd.hasOption("g2") ? cmd.getOptionValues("g2") : new String[0];
    addDatasets(firstGroupCmd, firstGroup, shortDataset, datasetAgg, path, fs, s3conf, s3, s3bucket);
    addDatasets(secondGroupCmd, secondGroup, shortDataset, datasetAgg, path, fs, s3conf, s3, s3bucket);

    if (shortDataset.size() == 0) {
        System.out.println("No datasets to process.");
        System.exit(0);
    }

    if (firstGroup.isEmpty()) {
        System.out.println("First group of datasets (G1) is empty. " + "Doing G1 = G2.");
        firstGroup.addAll(secondGroup);
    }

    if (secondGroup.isEmpty()) {
        System.out.println("Second group of datasets (G2) is empty. " + "Doing G2 = G1.");
        secondGroup.addAll(firstGroup);
    }

    // getting dataset ids

    String datasetNames = "";
    String datasetIds = "";
    HashMap<String, String> datasetId = new HashMap<String, String>();
    Iterator<String> it = shortDataset.iterator();
    while (it.hasNext()) {
        datasetId.put(it.next(), null);
    }

    if (s3) {
        path = new Path(s3bucket + FrameworkUtils.datasetsIndexDir);
        fs = FileSystem.get(path.toUri(), s3conf);
    } else {
        path = new Path(fs.getHomeDirectory() + "/" + FrameworkUtils.datasetsIndexDir);
    }
    BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(path)));
    String line = br.readLine();
    while (line != null) {
        String[] dt = line.split("\t");
        if (datasetId.containsKey(dt[0])) {
            datasetId.put(dt[0], dt[1]);
            datasetNames += dt[0] + ",";
            datasetIds += dt[1] + ",";
        }
        line = br.readLine();
    }
    br.close();
    if (s3)
        fs.close();

    datasetNames = datasetNames.substring(0, datasetNames.length() - 1);
    datasetIds = datasetIds.substring(0, datasetIds.length() - 1);
    it = shortDataset.iterator();
    while (it.hasNext()) {
        String dataset = it.next();
        if (datasetId.get(dataset) == null) {
            System.out.println("No dataset id for " + dataset);
            System.exit(0);
        }
    }

    String firstGroupStr = "";
    String secondGroupStr = "";
    for (String dataset : firstGroup) {
        firstGroupStr += datasetId.get(dataset) + ",";
    }
    for (String dataset : secondGroup) {
        secondGroupStr += datasetId.get(dataset) + ",";
    }
    firstGroupStr = firstGroupStr.substring(0, firstGroupStr.length() - 1);
    secondGroupStr = secondGroupStr.substring(0, secondGroupStr.length() - 1);

    FrameworkUtils.createDir(s3bucket + FrameworkUtils.correlationTechniquesDir, s3conf, s3);

    String dataAttributesInputDirs = "";
    String noRelationship = "";

    HashSet<String> dirs = new HashSet<String>();

    String dataset1;
    String dataset2;
    String datasetId1;
    String datasetId2;
    for (int i = 0; i < firstGroup.size(); i++) {
        for (int j = 0; j < secondGroup.size(); j++) {

            if (Integer.parseInt(datasetId.get(firstGroup.get(i))) < Integer
                    .parseInt(datasetId.get(secondGroup.get(j)))) {
                dataset1 = firstGroup.get(i);
                dataset2 = secondGroup.get(j);
            } else {
                dataset1 = secondGroup.get(j);
                dataset2 = firstGroup.get(i);
            }

            datasetId1 = datasetId.get(dataset1);
            datasetId2 = datasetId.get(dataset2);

            if (dataset1.equals(dataset2))
                continue;
            String correlationOutputFileName = s3bucket + FrameworkUtils.correlationTechniquesDir + "/"
                    + dataset1 + "-" + dataset2 + "/";

            if (removeExistingFiles) {
                FrameworkUtils.removeFile(correlationOutputFileName, s3conf, s3);
            }
            if (!FrameworkUtils.fileExists(correlationOutputFileName, s3conf, s3)) {
                dirs.add(s3bucket + FrameworkUtils.aggregatesDir + "/" + dataset1);
                dirs.add(s3bucket + FrameworkUtils.aggregatesDir + "/" + dataset2);
            } else {
                noRelationship += datasetId1 + "-" + datasetId2 + ",";
            }
        }
    }

    if (dirs.isEmpty()) {
        System.out.println("All the relationships were already computed.");
        System.out.println("Use -f in the beginning of the command line to force the computation.");
        System.exit(0);
    }

    for (String dir : dirs) {
        dataAttributesInputDirs += dir + ",";
    }

    Configuration conf = new Configuration();
    Machine machineConf = new Machine(machine, nbNodes);

    String jobName = "correlation";
    String correlationOutputDir = s3bucket + FrameworkUtils.correlationTechniquesDir + "/tmp/";

    FrameworkUtils.removeFile(correlationOutputDir, s3conf, s3);

    for (int i = 0; i < shortDataset.size(); i++) {
        conf.set("dataset-" + datasetId.get(shortDataset.get(i)) + "-agg", datasetAgg.get(shortDataset.get(i)));
    }
    for (int i = 0; i < shortDataset.size(); i++) {
        conf.set("dataset-" + datasetId.get(shortDataset.get(i)) + "-agg-size",
                Integer.toString(datasetAgg.get(shortDataset.get(i)).split(",").length));
    }
    conf.set("dataset-keys", datasetIds);
    conf.set("dataset-names", datasetNames);
    conf.set("first-group", firstGroupStr);
    conf.set("second-group", secondGroupStr);
    conf.set("main-dataset-id", datasetId.get(shortDataset.get(0)));
    if (noRelationship.length() > 0) {
        conf.set("no-relationship", noRelationship.substring(0, noRelationship.length() - 1));
    }

    conf.set("mapreduce.tasktracker.map.tasks.maximum", String.valueOf(machineConf.getMaximumTasks()));
    conf.set("mapreduce.tasktracker.reduce.tasks.maximum", String.valueOf(machineConf.getMaximumTasks()));
    conf.set("mapreduce.jobtracker.maxtasks.perjob", "-1");
    conf.set("mapreduce.reduce.shuffle.parallelcopies", "20");
    conf.set("mapreduce.input.fileinputformat.split.minsize", "0");
    conf.set("mapreduce.task.io.sort.mb", "200");
    conf.set("mapreduce.task.io.sort.factor", "100");
    conf.set("mapreduce.task.timeout", "2400000");

    if (s3) {
        machineConf.setMachineConfiguration(conf);
        conf.set("fs.s3.awsAccessKeyId", awsAccessKeyId);
        conf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey);
        conf.set("bucket", s3bucket);
    }

    if (snappyCompression) {
        conf.set("mapreduce.map.output.compress", "true");
        conf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec");
        //conf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec");
    }
    if (bzip2Compression) {
        conf.set("mapreduce.map.output.compress", "true");
        conf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec");
        //conf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec");
    }

    Job job = new Job(conf);
    job.setJobName(jobName);

    job.setMapOutputKeyClass(PairAttributeWritable.class);
    job.setMapOutputValueClass(SpatioTemporalValueWritable.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    job.setMapperClass(CorrelationTechniquesMapper.class);
    job.setReducerClass(CorrelationTechniquesReducer.class);
    job.setNumReduceTasks(machineConf.getNumberReduces());

    job.setInputFormatClass(SequenceFileInputFormat.class);
    LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class);

    FileInputFormat.setInputDirRecursive(job, true);
    FileInputFormat.setInputPaths(job,
            dataAttributesInputDirs.substring(0, dataAttributesInputDirs.length() - 1));
    FileOutputFormat.setOutputPath(job, new Path(correlationOutputDir));

    job.setJarByClass(CorrelationTechniques.class);

    long start = System.currentTimeMillis();
    job.submit();
    job.waitForCompletion(true);
    System.out.println(jobName + "\t" + (System.currentTimeMillis() - start));

    // moving files to right place
    for (int i = 0; i < firstGroup.size(); i++) {
        for (int j = 0; j < secondGroup.size(); j++) {

            if (Integer.parseInt(datasetId.get(firstGroup.get(i))) < Integer
                    .parseInt(datasetId.get(secondGroup.get(j)))) {
                dataset1 = firstGroup.get(i);
                dataset2 = secondGroup.get(j);
            } else {
                dataset1 = secondGroup.get(j);
                dataset2 = firstGroup.get(i);
            }

            if (dataset1.equals(dataset2))
                continue;

            String from = s3bucket + FrameworkUtils.correlationTechniquesDir + "/tmp/" + dataset1 + "-"
                    + dataset2 + "/";
            String to = s3bucket + FrameworkUtils.correlationTechniquesDir + "/" + dataset1 + "-" + dataset2
                    + "/";
            FrameworkUtils.renameFile(from, to, s3conf, s3);
        }
    }
}

From source file:Who.java

public static void main(String[] v) {
    Socket s = null;//from w  w  w  .j  av  a 2  s . c om
    PrintWriter out = null;
    BufferedReader in = null;
    try {
        // Connect to port 79 (the standard finger port) on the host.
        String hostname = "www.java2s.com";
        s = new Socket(hostname, 79);
        // Set up the streams
        out = new PrintWriter(new OutputStreamWriter(s.getOutputStream()));
        in = new BufferedReader(new InputStreamReader(s.getInputStream()));

        // Send a blank line to the finger server, telling it that we want
        // a listing of everyone logged on instead of information about an
        // individual user.
        out.print("\n");
        out.flush(); // Send it out

        // Now read the server's response
        // The server should send lines terminated with \n or \r.
        String line;
        while ((line = in.readLine()) != null) {
            System.out.println(line);
        }
        System.out.println("Who's Logged On: " + hostname);
    } catch (IOException e) {
        System.out.println("Who's Logged On: Error");
    }
    // Close the streams!
    finally {
        try {
            in.close();
            out.close();
            s.close();
        } catch (Exception e) {
        }
    }
}

From source file:edu.oregonstate.eecs.mcplan.domains.toy.SavingProblem.java

public static void main(final String[] argv) throws NumberFormatException, IOException {
    final RandomGenerator rng = new MersenneTwister(42);
    final int T = 30;
    //      final int price_min = -8;
    //      final int price_max = 4;
    //      final int invest_period = 6;
    //      final int loan_period = 4;

    final int price_min = -4;
    final int price_max = 4;
    final int maturity_period = 3;
    final int invest_period = 4;
    final int loan_period = 4;

    final Parameters params = new Parameters(T, price_min, price_max, maturity_period, invest_period,
            loan_period);//from  w w w  . ja  v a 2 s .  c o  m
    final Actions actions = new Actions(params);
    final FsssModel model = new FsssModel(rng, params);

    State s = model.initialState();
    while (!s.isTerminal()) {
        System.out.println(s);
        System.out.println("R(s): " + model.reward(s));
        actions.setState(s, 0);
        final ArrayList<Action> action_list = Fn.takeAll(actions);
        for (int i = 0; i < action_list.size(); ++i) {
            System.out.println(i + ": " + action_list.get(i));
        }
        System.out.print(">>> ");
        final BufferedReader cin = new BufferedReader(new InputStreamReader(System.in));
        final int choice = Integer.parseInt(cin.readLine());
        final Action a = action_list.get(choice);
        System.out.println("R(s, a): " + model.reward(s, a));
        s = model.sampleTransition(s, a);
    }

    // Estimate the value of a "good" policy.
    // Note: The "good" policy is to Invest when you can, and Sell if the
    // price is >= 2. This is not necessarily optimal because:
    //    1. You should Borrow once the episode will end before the loan must be repaid
    //   2. For some values of invest_period, you should pass on a low price
    //      early in the period to try to get a better one later.
    //      final int Ngames = 10000;
    //      double V = 0;
    //      int Ninvest = 0;
    //      for( int i = 0; i < Ngames; ++i ) {
    //         State s = model.initialState();
    //         double Vi = model.reward( s );
    //         while( !s.isTerminal() ) {
    //            final Action a;
    //
    //            // "Good" policy
    //            if( s.investment == 0 ) {
    //               a = new InvestAction();
    //               Ninvest += 1;
    //            }
    //            else if( s.investment > 0 && s.price >= 2 ) {
    //               if( s.invest_t < (params.invest_period - 1) || s.price > 2 ) {
    //                  a = new SellAction();
    //               }
    //               else {
    //                  a = new SaveAction();
    //               }
    ////               a = new SellAction();
    //            }
    //            else {
    //               a = new SaveAction();
    //            }
    //
    //            // "Borrow" policy
    ////            if( s.loan == 0 ) {
    ////               a = new BorrowAction();
    ////            }
    ////            else {
    ////               a = new SaveAction();
    ////            }
    //
    //            final double ra = model.reward( s, a );
    //            s = model.sampleTransition( s, a );
    //            Vi += ra + model.reward( s );
    //         }
    //         V += Vi;
    //      }
    //
    //      final double Vavg = V / Ngames;
    //      final double Navg = (Ninvest / ((double) Ngames));
    //      System.out.println( "Avg. value: " + Vavg );
    //      System.out.println( "Avg. Invest actions: " + Navg );
    //      System.out.println( "V(Invest) ~= " + ( 1 + (Vavg - params.T)/Navg ) );
}

From source file:ExecDemoPartial.java

public static void main(String argv[]) throws IOException {

    BufferedReader is; // reader for output of process
    String line;//w  w  w .  j  a  v a2 s . c  o m

    final Process p = Runtime.getRuntime().exec(PROGRAM);

    Thread waiter = new Thread() {
        public void run() {
            try {
                p.waitFor();
            } catch (InterruptedException ex) {
                // OK, just quit this thread.
                return;
            }
            System.out.println("Program terminated!");
            done = true;
        }
    };
    waiter.start();

    // getInputStream gives an Input stream connected to
    // the process p's standard output (and vice versa). We use
    // that to construct a BufferedReader so we can readLine() it.
    is = new BufferedReader(new InputStreamReader(p.getInputStream()));

    while (!done && ((line = is.readLine()) != null))
        System.out.println(line);

    return;
}