List of usage examples for java.lang Double parseDouble
public static double parseDouble(String s) throws NumberFormatException
From source file:com.cloudplugs.util.Json.java
public static Object cast(String str) throws JSONException, NumberFormatException { if (str == null || str.length() == 0) return null; switch (str.charAt(0)) { case '{': case '[': case '"': return new JSONTokener(str).nextValue(); case 'n': if ("null".equals(str)) return null; return str; case 't': if ("true".equals(str)) return true; return str; case 'f': if ("false".equals(str)) return false; return str; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '-': return Double.parseDouble(str); default:/*from w ww . j ava 2 s . c o m*/ return str; } }
From source file:com.intel.cosbench.driver.generator.BimodalIntGenerator.java
private static BimodalIntGenerator tryParse(String pattern) { pattern = StringUtils.substringBetween(pattern, "(", ")"); String[] args = StringUtils.split(pattern, ','); double mean1 = Double.parseDouble(args[0]); double stdDeviation1 = Double.parseDouble(args[1]); double mean2 = Double.parseDouble(args[2]); double stdDeviation2 = Double.parseDouble(args[3]); double coinFlip = Double.parseDouble(args[4]); return new BimodalIntGenerator(mean1, stdDeviation1, mean2, stdDeviation2, coinFlip); }
From source file:Main.java
/** * @param rawValue//from ww w.j a v a 2 s. c o m * @return * @throws Exception */ public static double decodeRawValueToSingleDouble(String rawValue) throws Exception { String[] values = getSingleValue(rawValue); if ("D".equals(values[0]) || "U".equals(values[0])) return Double.parseDouble(values[1]); throw new Exception("Data type is not 'D' (double) or 'U' (units [points]), found '" + values[0] + "'"); }
From source file:com.clican.pluto.fsm.engine.vote.PercentVoteTask.java
protected void handleTask(Task task, Event event) { double percent = 0.5d; if (StringUtils.isNotEmpty(params.get("percent"))) { percent = Double.parseDouble(params.get("percent")); }/*www . j ava 2 s . c o m*/ Calendar current = Calendar.getInstance(); EventType eventType = EventType.convert(event.getEventType()); float totalTasks = event.getState().getTaskSet().size(); Integer votedAgreeNumber = (Integer) this.getVariableValue(Parameters.VOTED_AGREE_NUMBER.getParameter(), event.getState(), false); if (votedAgreeNumber == null) { votedAgreeNumber = 0; } VoteResult voteResult = null; if (eventType == EventType.TASK) { voteResult = VoteResult .convert((String) this.getVariableValue(Parameters.VOTE_RESULT.getParameter(), event, false)); if (log.isDebugEnabled()) { log.debug("PercentVoteTask, get a voteResult[" + voteResult + "]"); } taskDao.setVariable(task, Parameters.VOTE_RESULT.getParameter(), voteResult.getVoteResult()); } else { if (log.isDebugEnabled()) { log.debug("PercentVoteTask, get a timeout event"); } voteResult = VoteResult.DISCLAIM; // ? taskDao.setVariable(task, Parameters.VOTE_RESULT.getParameter(), VoteResult.DISCLAIM.getVoteResult()); } if (voteResult == VoteResult.AGREE) { votedAgreeNumber++; stateDao.setVariable(event.getState(), Parameters.VOTED_AGREE_NUMBER.getParameter(), votedAgreeNumber); } List<Task> notCompletedTaskList = taskDao.findActiveTasksByState(event.getState()); if (votedAgreeNumber / totalTasks >= percent) { voteResult = VoteResult.AGREE; for (Task notCompletedTask : notCompletedTaskList) { if (notCompletedTask.getId().equals(task.getId())) { continue; } notCompletedTask.setCompleteTime(current.getTime()); taskDao.setVariable(notCompletedTask, Parameters.VOTE_RESULT.getParameter(), VoteResult.DISCLAIM.getVoteResult()); taskDao.save(notCompletedTask); } } else if ((votedAgreeNumber + notCompletedTaskList.size() - 1) / totalTasks < percent) { voteResult = VoteResult.DISAGREE; for (Task notCompletedTask : notCompletedTaskList) { if (notCompletedTask.getId().equals(task.getId())) { continue; } notCompletedTask.setCompleteTime(current.getTime()); taskDao.setVariable(notCompletedTask, Parameters.VOTE_RESULT.getParameter(), VoteResult.DISCLAIM.getVoteResult()); taskDao.save(notCompletedTask); } } stateDao.setVariable(event.getState(), Parameters.VOTE_RESULT.getParameter(), voteResult.getVoteResult()); recordLastVoteResultAndSuggestion(task.getState().getSession().getId(), voteResult.getVoteResult(), (String) this.getVariableValue(Parameters.AUDIT_SUGGESTION.getParameter(), event, false), task.getAssignee()); task.setCompleteTime(current.getTime()); taskDao.save(task); }
From source file:Data.Utilities.java
public static Double distDiff(GPSRec rec1, AccessPoint rec2) { return haversine(Double.parseDouble(rec1.getLatitude()), Double.parseDouble(rec1.getLongitude()), Double.parseDouble(rec2.getLatitude()), Double.parseDouble(rec2.getLongitude())); }
From source file:com.sk89q.craftbook.sponge.util.ParsingUtil.java
public static Vector3d parseUnsafeBlockLocation(String line) throws NumberFormatException, ArrayIndexOutOfBoundsException { line = StringUtils.replace(StringUtils.replace(StringUtils.replace(line, "!", ""), "^", ""), "&", ""); double offsetX = 0, offsetY, offsetZ = 0; if (line.contains("=")) line = RegexUtil.EQUALS_PATTERN.split(line)[1]; String[] split = RegexUtil.COLON_PATTERN.split(line); if (split.length > 1) { offsetX = Double.parseDouble(split[0]); offsetY = Double.parseDouble(split[1]); offsetZ = Double.parseDouble(split[2]); } else//from w ww .j a v a2 s. c o m offsetY = Double.parseDouble(line); return new Vector3d(offsetX, offsetY, offsetZ); }
From source file:edu.nyu.vida.data_polygamy.feature_identification.IndexCreation.java
/** * @param args//w w w. j a v a 2 s. c om */ @SuppressWarnings({ "deprecation" }) public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException { Options options = new Options(); Option forceOption = new Option("f", "force", false, "force the computation of the index and events " + "even if files already exist"); forceOption.setRequired(false); options.addOption(forceOption); Option thresholdOption = new Option("t", "use-custom-thresholds", false, "use custom thresholds for regular and rare events, defined in HDFS_HOME/" + FrameworkUtils.thresholdDir + " file"); thresholdOption.setRequired(false); options.addOption(thresholdOption); Option gOption = new Option("g", "group", true, "set group of datasets for which the indices and events" + " will be computed"); gOption.setRequired(true); gOption.setArgName("GROUP"); gOption.setArgs(Option.UNLIMITED_VALUES); options.addOption(gOption); Option machineOption = new Option("m", "machine", true, "machine identifier"); machineOption.setRequired(true); machineOption.setArgName("MACHINE"); machineOption.setArgs(1); options.addOption(machineOption); Option nodesOption = new Option("n", "nodes", true, "number of nodes"); nodesOption.setRequired(true); nodesOption.setArgName("NODES"); nodesOption.setArgs(1); options.addOption(nodesOption); Option s3Option = new Option("s3", "s3", false, "data on Amazon S3"); s3Option.setRequired(false); options.addOption(s3Option); Option awsAccessKeyIdOption = new Option("aws_id", "aws-id", true, "aws access key id; " + "this is required if the execution is on aws"); awsAccessKeyIdOption.setRequired(false); awsAccessKeyIdOption.setArgName("AWS-ACCESS-KEY-ID"); awsAccessKeyIdOption.setArgs(1); options.addOption(awsAccessKeyIdOption); Option awsSecretAccessKeyOption = new Option("aws_key", "aws-id", true, "aws secrect access key; " + "this is required if the execution is on aws"); awsSecretAccessKeyOption.setRequired(false); awsSecretAccessKeyOption.setArgName("AWS-SECRET-ACCESS-KEY"); awsSecretAccessKeyOption.setArgs(1); options.addOption(awsSecretAccessKeyOption); Option bucketOption = new Option("b", "s3-bucket", true, "bucket on s3; " + "this is required if the execution is on aws"); bucketOption.setRequired(false); bucketOption.setArgName("S3-BUCKET"); bucketOption.setArgs(1); options.addOption(bucketOption); Option helpOption = new Option("h", "help", false, "display this message"); helpOption.setRequired(false); options.addOption(helpOption); HelpFormatter formatter = new HelpFormatter(); CommandLineParser parser = new PosixParser(); CommandLine cmd = null; try { cmd = parser.parse(options, args); } catch (ParseException e) { formatter.printHelp("hadoop jar data-polygamy.jar " + "edu.nyu.vida.data_polygamy.feature_identification.IndexCreation", options, true); System.exit(0); } if (cmd.hasOption("h")) { formatter.printHelp("hadoop jar data-polygamy.jar " + "edu.nyu.vida.data_polygamy.feature_identification.IndexCreation", options, true); System.exit(0); } boolean s3 = cmd.hasOption("s3"); String s3bucket = ""; String awsAccessKeyId = ""; String awsSecretAccessKey = ""; if (s3) { if ((!cmd.hasOption("aws_id")) || (!cmd.hasOption("aws_key")) || (!cmd.hasOption("b"))) { System.out.println( "Arguments 'aws_id', 'aws_key', and 'b'" + " are mandatory if execution is on AWS."); formatter.printHelp("hadoop jar data-polygamy.jar " + "edu.nyu.vida.data_polygamy.feature_identification.IndexCreation", options, true); System.exit(0); } s3bucket = cmd.getOptionValue("b"); awsAccessKeyId = cmd.getOptionValue("aws_id"); awsSecretAccessKey = cmd.getOptionValue("aws_key"); } boolean snappyCompression = false; boolean bzip2Compression = false; String machine = cmd.getOptionValue("m"); int nbNodes = Integer.parseInt(cmd.getOptionValue("n")); Configuration s3conf = new Configuration(); if (s3) { s3conf.set("fs.s3.awsAccessKeyId", awsAccessKeyId); s3conf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey); s3conf.set("bucket", s3bucket); } String datasetNames = ""; String datasetIds = ""; ArrayList<String> shortDataset = new ArrayList<String>(); ArrayList<String> shortDatasetIndex = new ArrayList<String>(); HashMap<String, String> datasetAgg = new HashMap<String, String>(); HashMap<String, String> datasetId = new HashMap<String, String>(); HashMap<String, HashMap<Integer, Double>> datasetRegThreshold = new HashMap<String, HashMap<Integer, Double>>(); HashMap<String, HashMap<Integer, Double>> datasetRareThreshold = new HashMap<String, HashMap<Integer, Double>>(); Path path = null; FileSystem fs = FileSystem.get(new Configuration()); BufferedReader br; boolean removeExistingFiles = cmd.hasOption("f"); boolean isThresholdUserDefined = cmd.hasOption("t"); for (String dataset : cmd.getOptionValues("g")) { // getting aggregates String[] aggregate = FrameworkUtils.searchAggregates(dataset, s3conf, s3); if (aggregate.length == 0) { System.out.println("No aggregates found for " + dataset + "."); continue; } // getting aggregates header String aggregatesHeaderFileName = FrameworkUtils.searchAggregatesHeader(dataset, s3conf, s3); if (aggregatesHeaderFileName == null) { System.out.println("No aggregate header for " + dataset); continue; } String aggregatesHeader = s3bucket + FrameworkUtils.preProcessingDir + "/" + aggregatesHeaderFileName; shortDataset.add(dataset); datasetId.put(dataset, null); if (s3) { path = new Path(aggregatesHeader); fs = FileSystem.get(path.toUri(), s3conf); } else { path = new Path(fs.getHomeDirectory() + "/" + aggregatesHeader); } br = new BufferedReader(new InputStreamReader(fs.open(path))); datasetAgg.put(dataset, br.readLine().split("\t")[1]); br.close(); if (s3) fs.close(); } if (shortDataset.size() == 0) { System.out.println("No datasets to process."); System.exit(0); } // getting dataset id if (s3) { path = new Path(s3bucket + FrameworkUtils.datasetsIndexDir); fs = FileSystem.get(path.toUri(), s3conf); } else { path = new Path(fs.getHomeDirectory() + "/" + FrameworkUtils.datasetsIndexDir); } br = new BufferedReader(new InputStreamReader(fs.open(path))); String line = br.readLine(); while (line != null) { String[] dt = line.split("\t"); if (datasetId.containsKey(dt[0])) { datasetId.put(dt[0], dt[1]); datasetNames += dt[0] + ","; datasetIds += dt[1] + ","; } line = br.readLine(); } br.close(); datasetNames = datasetNames.substring(0, datasetNames.length() - 1); datasetIds = datasetIds.substring(0, datasetIds.length() - 1); Iterator<String> it = shortDataset.iterator(); while (it.hasNext()) { String dataset = it.next(); if (datasetId.get(dataset) == null) { System.out.println("No dataset id for " + dataset); System.exit(0); } } // getting user defined thresholds if (isThresholdUserDefined) { if (s3) { path = new Path(s3bucket + FrameworkUtils.thresholdDir); fs = FileSystem.get(path.toUri(), s3conf); } else { path = new Path(fs.getHomeDirectory() + "/" + FrameworkUtils.thresholdDir); } br = new BufferedReader(new InputStreamReader(fs.open(path))); line = br.readLine(); while (line != null) { // getting dataset name String dataset = line.trim(); HashMap<Integer, Double> regThresholds = new HashMap<Integer, Double>(); HashMap<Integer, Double> rareThresholds = new HashMap<Integer, Double>(); line = br.readLine(); while ((line != null) && (line.split("\t").length > 1)) { // getting attribute ids and thresholds String[] keyVals = line.trim().split("\t"); int att = Integer.parseInt(keyVals[0].trim()); regThresholds.put(att, Double.parseDouble(keyVals[1].trim())); rareThresholds.put(att, Double.parseDouble(keyVals[2].trim())); line = br.readLine(); } datasetRegThreshold.put(dataset, regThresholds); datasetRareThreshold.put(dataset, rareThresholds); } br.close(); } if (s3) fs.close(); // datasets that will use existing merge tree ArrayList<String> useMergeTree = new ArrayList<String>(); // creating index for each spatio-temporal resolution FrameworkUtils.createDir(s3bucket + FrameworkUtils.indexDir, s3conf, s3); HashSet<String> input = new HashSet<String>(); for (String dataset : shortDataset) { String indexCreationOutputFileName = s3bucket + FrameworkUtils.indexDir + "/" + dataset + "/"; String mergeTreeFileName = s3bucket + FrameworkUtils.mergeTreeDir + "/" + dataset + "/"; if (removeExistingFiles) { FrameworkUtils.removeFile(indexCreationOutputFileName, s3conf, s3); FrameworkUtils.removeFile(mergeTreeFileName, s3conf, s3); FrameworkUtils.createDir(mergeTreeFileName, s3conf, s3); } else if (datasetRegThreshold.containsKey(dataset)) { FrameworkUtils.removeFile(indexCreationOutputFileName, s3conf, s3); if (FrameworkUtils.fileExists(mergeTreeFileName, s3conf, s3)) { useMergeTree.add(dataset); } } if (!FrameworkUtils.fileExists(indexCreationOutputFileName, s3conf, s3)) { input.add(s3bucket + FrameworkUtils.aggregatesDir + "/" + dataset); shortDatasetIndex.add(dataset); } } if (input.isEmpty()) { System.out.println("All the input datasets have indices."); System.out.println("Use -f in the beginning of the command line to force the computation."); System.exit(0); } String aggregateDatasets = ""; it = input.iterator(); while (it.hasNext()) { aggregateDatasets += it.next() + ","; } Job icJob = null; Configuration icConf = new Configuration(); Machine machineConf = new Machine(machine, nbNodes); String jobName = "index"; String indexOutputDir = s3bucket + FrameworkUtils.indexDir + "/tmp/"; FrameworkUtils.removeFile(indexOutputDir, s3conf, s3); icConf.set("dataset-name", datasetNames); icConf.set("dataset-id", datasetIds); if (!useMergeTree.isEmpty()) { String useMergeTreeStr = ""; for (String dt : useMergeTree) { useMergeTreeStr += dt + ","; } icConf.set("use-merge-tree", useMergeTreeStr.substring(0, useMergeTreeStr.length() - 1)); } for (int i = 0; i < shortDataset.size(); i++) { String dataset = shortDataset.get(i); String id = datasetId.get(dataset); icConf.set("dataset-" + id + "-aggregates", datasetAgg.get(dataset)); if (datasetRegThreshold.containsKey(dataset)) { HashMap<Integer, Double> regThresholds = datasetRegThreshold.get(dataset); String thresholds = ""; for (int att : regThresholds.keySet()) { thresholds += String.valueOf(att) + "-" + String.valueOf(regThresholds.get(att)) + ","; } icConf.set("regular-" + id, thresholds.substring(0, thresholds.length() - 1)); } if (datasetRareThreshold.containsKey(dataset)) { HashMap<Integer, Double> rareThresholds = datasetRareThreshold.get(dataset); String thresholds = ""; for (int att : rareThresholds.keySet()) { thresholds += String.valueOf(att) + "-" + String.valueOf(rareThresholds.get(att)) + ","; } icConf.set("rare-" + id, thresholds.substring(0, thresholds.length() - 1)); } } icConf.set("mapreduce.tasktracker.map.tasks.maximum", String.valueOf(machineConf.getMaximumTasks())); icConf.set("mapreduce.tasktracker.reduce.tasks.maximum", String.valueOf(machineConf.getMaximumTasks())); icConf.set("mapreduce.jobtracker.maxtasks.perjob", "-1"); icConf.set("mapreduce.reduce.shuffle.parallelcopies", "20"); icConf.set("mapreduce.input.fileinputformat.split.minsize", "0"); icConf.set("mapreduce.task.io.sort.mb", "200"); icConf.set("mapreduce.task.io.sort.factor", "100"); //icConf.set("mapreduce.task.timeout", "1800000"); machineConf.setMachineConfiguration(icConf); if (s3) { machineConf.setMachineConfiguration(icConf); icConf.set("fs.s3.awsAccessKeyId", awsAccessKeyId); icConf.set("fs.s3.awsSecretAccessKey", awsSecretAccessKey); icConf.set("bucket", s3bucket); } if (snappyCompression) { icConf.set("mapreduce.map.output.compress", "true"); icConf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec"); //icConf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.SnappyCodec"); } if (bzip2Compression) { icConf.set("mapreduce.map.output.compress", "true"); icConf.set("mapreduce.map.output.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec"); //icConf.set("mapreduce.output.fileoutputformat.compress.codec", "org.apache.hadoop.io.compress.BZip2Codec"); } icJob = new Job(icConf); icJob.setJobName(jobName); icJob.setMapOutputKeyClass(AttributeResolutionWritable.class); icJob.setMapOutputValueClass(SpatioTemporalFloatWritable.class); icJob.setOutputKeyClass(AttributeResolutionWritable.class); icJob.setOutputValueClass(TopologyTimeSeriesWritable.class); //icJob.setOutputKeyClass(Text.class); //icJob.setOutputValueClass(Text.class); icJob.setMapperClass(IndexCreationMapper.class); icJob.setReducerClass(IndexCreationReducer.class); icJob.setNumReduceTasks(machineConf.getNumberReduces()); icJob.setInputFormatClass(SequenceFileInputFormat.class); //icJob.setOutputFormatClass(SequenceFileOutputFormat.class); LazyOutputFormat.setOutputFormatClass(icJob, SequenceFileOutputFormat.class); //LazyOutputFormat.setOutputFormatClass(icJob, TextOutputFormat.class); SequenceFileOutputFormat.setCompressOutput(icJob, true); SequenceFileOutputFormat.setOutputCompressionType(icJob, CompressionType.BLOCK); FileInputFormat.setInputDirRecursive(icJob, true); FileInputFormat.setInputPaths(icJob, aggregateDatasets.substring(0, aggregateDatasets.length() - 1)); FileOutputFormat.setOutputPath(icJob, new Path(indexOutputDir)); icJob.setJarByClass(IndexCreation.class); long start = System.currentTimeMillis(); icJob.submit(); icJob.waitForCompletion(true); System.out.println(jobName + "\t" + (System.currentTimeMillis() - start)); // moving files to right place for (String dataset : shortDatasetIndex) { String from = s3bucket + FrameworkUtils.indexDir + "/tmp/" + dataset + "/"; String to = s3bucket + FrameworkUtils.indexDir + "/" + dataset + "/"; FrameworkUtils.renameFile(from, to, s3conf, s3); } }
From source file:ml.shifu.core.util.CommonUtils.java
public static double getDoubleOrElse(Object o, Double defaultValue) { try {//from w w w.ja v a2 s.c o m return Double.parseDouble(o.toString()); } catch (Exception e) { return defaultValue; } }
From source file:ch.unibas.fittingwizard.application.tools.FitOutputParser.java
public double parseRmseValue(File outputFile) { List<String> lines; try {/*from w w w. j a v a 2 s. co m*/ lines = FileUtils.readLines(outputFile); } catch (IOException e) { throw new RuntimeException("Could not read output file.", e); } boolean lineWithRmseContained = false; Double rmse = null; for (String line : lines) { lineWithRmseContained = line.contains("RMSE:"); if (lineWithRmseContained) { rmse = Double.parseDouble(getRmseString(line)); break; } } if (!lineWithRmseContained) { throw new RuntimeException( String.format("The output file %s did not contain a RMSE line.", outputFile.getAbsoluteFile())); } return rmse; }
From source file:eu.planets_project.pp.plato.xml.plato.NumericTransformerThresholdFactory.java
@Override public Object createObject(Attributes arg0) throws Exception { return Double.parseDouble(arg0.getValue("value")); }