List of usage examples for java.lang Float isNaN
public static boolean isNaN(float v)
From source file:FloatSpring.java
/** * Update the position of the spring. This updates the "position" as if there * were a damped spring stretched between the current position and the target * position. That is, the spring will tend to pull the position towards the * target, and if the spring is damped the position will eventually settle * onto the target.//from w w w.j a v a2 s. com * * @param target * The target towards which the spring is pulling the position * @param time * The elapsed time in seconds */ public void update(float target, float time) { // Set v to target - position, this is the required movement float v = position - target; // Multiply displacement by spring constant to get spring force, // then subtract damping force v = v * -springK - velocity * dampingK; // v is now a force, so assuming unit mass is is also acceleration. // multiply by elapsed time to get velocity change velocity += v * time; // If velocity isn't valid, zero it if (Float.isNaN(velocity) || Float.isInfinite(velocity)) { velocity = 0; } // Change the position at the new velocity, for elapsed time position += velocity * time; }
From source file:org.broad.igv.tools.ListAccumulator.java
public void finish() { if (isFinished) { return;/*from www .j a v a 2 s. c om*/ } mean = Float.isNaN(sum) ? Float.NaN : sum / basesCovered; if (values != null) { if (nPts == 1) { for (WindowFunction wf : quantileFunctions) { setValue(wf, mean); } } else { if (values.size() > 1) { computePercentiles(); } for (WindowFunction wf : quantileFunctions) { List<PercentileValue> pList = percentiles.get(wf); float v = Float.NaN; // <= Default, if (pList != null && pList.size() > 0) { double weightedSum = 0; double sumOfWeights = 0; for (PercentileValue pv : pList) { double weight = (double) pv.nPoints / nPts; sumOfWeights += weight; weightedSum += weight * pv.value; } v = (float) (weightedSum / sumOfWeights); } setValue(wf, v); } } } values = null; isFinished = true; }
From source file:com.cloudera.oryx.als.computation.local.ReadInputs.java
private void readInput() throws IOException { File[] inputFiles = inputDir.listFiles(IOUtils.CSV_COMPRESSED_FILTER); if (inputFiles == null || inputFiles.length == 0) { log.info("No input files in {}", inputDir); return;/*from w w w . j a va 2s . c o m*/ } Arrays.sort(inputFiles, ByLastModifiedComparator.INSTANCE); for (File inputFile : inputFiles) { log.info("Reading {}", inputFile); for (CharSequence line : new FileLineIterable(inputFile)) { String[] columns = DelimitedDataUtils.decode(line); String userIDString = columns[0]; long userID = isInbound ? idMapping.add(userIDString) : Long.parseLong(userIDString); String itemIDString = columns[1]; long itemID = isInbound ? idMapping.add(itemIDString) : Long.parseLong(itemIDString); float value; if (columns.length > 2) { String valueToken = columns[2]; value = valueToken.isEmpty() ? Float.NaN : LangUtils.parseFloat(valueToken); } else { value = 1.0f; } if (Float.isNaN(value)) { // Remove, not set MatrixUtils.remove(userID, itemID, RbyRow, RbyColumn); } else { MatrixUtils.addTo(userID, itemID, value, RbyRow, RbyColumn); } if (knownItemIDs != null) { LongSet itemIDs = knownItemIDs.get(userID); if (Float.isNaN(value)) { // Remove, not set if (itemIDs != null) { itemIDs.remove(itemID); if (itemIDs.isEmpty()) { knownItemIDs.remove(userID); } } } else { if (itemIDs == null) { itemIDs = new LongSet(); knownItemIDs.put(userID, itemIDs); } itemIDs.add(itemID); } } } } }
From source file:juicebox.matrix.SymmetricMatrix.java
private void computePercentiles() { // Statistics, other attributes DoubleArrayList flattenedDataList = new DoubleArrayList(data.length); for (float value : data) { if (!Float.isNaN(value) && value != 1) { flattenedDataList.add(value); }/*from w w w . j ava 2 s . c o m*/ } // Stats double[] flattenedData = flattenedDataList.toArray(); lowerValue = (float) StatUtils.percentile(flattenedData, 5); upperValue = (float) StatUtils.percentile(flattenedData, 95); }
From source file:org.broad.igv.Accumulator.java
public void finish() { if (isFinished) { return;//from w w w. j av a 2 s . co m } mean = Float.isNaN(sum) ? Float.NaN : sum / nPts; if (values != null) { if (nPts == 1) { for (WindowFunction wf : quantileFunctions) { setValue(wf, mean); } } else { if (values.size() > 1) { computePercentiles(); } for (WindowFunction wf : quantileFunctions) { List<PercentileValue> pList = percentiles.get(wf); float v = Float.NaN; // <= Default, if (pList != null && pList.size() > 0) { double weightedSum = 0; double sumOfWeights = 0; for (PercentileValue pv : pList) { double weight = (double) pv.nPoints / nPts; sumOfWeights += weight; weightedSum += weight * pv.value; } v = (float) (weightedSum / sumOfWeights); } setValue(wf, v); } } } values = null; isFinished = true; }
From source file:ml.shifu.shifu.core.dtrain.nn.NNWorker.java
@Override public void load(GuaguaWritableAdapter<LongWritable> currentKey, GuaguaWritableAdapter<Text> currentValue, WorkerContext<NNParams, NNParams> workerContext) { super.count += 1; if ((super.count) % 5000 == 0) { LOG.info("Read {} records.", super.count); }//from w w w . j a v a 2 s. c o m float[] inputs = new float[super.featureInputsCnt]; float[] ideal = new float[super.outputNodeCount]; if (super.isDry) { // dry train, use empty data. addDataPairToDataSet(0, new BasicFloatMLDataPair(new BasicFloatMLData(inputs), new BasicFloatMLData(ideal))); return; } long hashcode = 0; float significance = 1f; // use guava Splitter to iterate only once // use NNConstants.NN_DEFAULT_COLUMN_SEPARATOR to replace getModelConfig().getDataSetDelimiter(), super follows // the function in akka mode. int index = 0, inputsIndex = 0, outputIndex = 0; String[] fields = Lists.newArrayList(this.splitter.split(currentValue.getWritable().toString())) .toArray(new String[0]); int pos = 0; for (pos = 0; pos < fields.length;) { String input = fields[pos]; // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 0f) float floatValue = input.length() == 0 ? 0f : NumberFormatUtils.getFloat(input, 0f); // no idea about why NaN in input data, we should process it as missing value TODO , according to norm type floatValue = (Float.isNaN(floatValue) || Double.isNaN(floatValue)) ? 0f : floatValue; if (pos == fields.length - 1) { // do we need to check if not weighted directly set to 1f; if such logic non-weight at first, then // weight, how to process??? if (StringUtils.isBlank(modelConfig.getWeightColumnName())) { significance = 1f; // break here if we reach weight column which is last column break; } // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 1f) significance = input.length() == 0 ? 1f : NumberFormatUtils.getFloat(input, 1f); // if invalid weight, set it to 1f and warning in log if (Float.compare(significance, 0f) < 0) { LOG.warn( "The {} record in current worker weight {} is less than 0f, it is invalid, set it to 1.", count, significance); significance = 1f; } // the last field is significance, break here break; } else { ColumnConfig columnConfig = super.columnConfigList.get(index); if (columnConfig != null && columnConfig.isTarget()) { if (isLinearTarget || modelConfig.isRegression()) { ideal[outputIndex++] = floatValue; } else { if (modelConfig.getTrain().isOneVsAll()) { // if one vs all, set correlated idea value according to trainerId which means in trainer // with id 0, target 0 is treated with 1, other are 0. Such target value are set to index of // tags like [0, 1, 2, 3] compared with ["a", "b", "c", "d"] ideal[outputIndex++] = Float.compare(floatValue, trainerId) == 0 ? 1f : 0f; } else { if (modelConfig.getTags().size() == 2) { // if only 2 classes, output node is 1 node. if target = 0 means 0 is the index for // positive prediction, set positive to 1 and negative to 0 int ideaIndex = (int) floatValue; ideal[0] = ideaIndex == 0 ? 1f : 0f; } else { // for multiple classification int ideaIndex = (int) floatValue; ideal[ideaIndex] = 1f; } } } pos++; } else { if (subFeatureSet.contains(index)) { if (columnConfig.isMeta() || columnConfig.isForceRemove()) { // it shouldn't happen here pos += 1; } else if (columnConfig != null && columnConfig.isNumerical() && modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ONEHOT)) { for (int k = 0; k < columnConfig.getBinBoundary().size() + 1; k++) { String tval = fields[pos]; // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 0f) float fval = tval.length() == 0 ? 0f : NumberFormatUtils.getFloat(tval, 0f); // no idea about why NaN in input data, we should process it as missing value TODO , // according to norm type fval = (Float.isNaN(fval) || Double.isNaN(fval)) ? 0f : fval; inputs[inputsIndex++] = fval; pos++; } } else if (columnConfig != null && columnConfig.isCategorical() && (modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ZSCALE_ONEHOT) || modelConfig.getNormalizeType() .equals(ModelNormalizeConf.NormType.ONEHOT))) { for (int k = 0; k < columnConfig.getBinCategory().size() + 1; k++) { String tval = fields[pos]; // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 0f) float fval = tval.length() == 0 ? 0f : NumberFormatUtils.getFloat(tval, 0f); // no idea about why NaN in input data, we should process it as missing value TODO , // according to norm type fval = (Float.isNaN(fval) || Double.isNaN(fval)) ? 0f : fval; inputs[inputsIndex++] = fval; pos++; } } else { inputs[inputsIndex++] = floatValue; pos++; } hashcode = hashcode * 31 + Double.valueOf(floatValue).hashCode(); } else { if (!CommonUtils.isToNormVariable(columnConfig, hasCandidates, modelConfig.isRegression())) { pos += 1; } else if (columnConfig.isNumerical() && modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ONEHOT) && columnConfig.getBinBoundary() != null && columnConfig.getBinBoundary().size() > 0) { pos += (columnConfig.getBinBoundary().size() + 1); } else if (columnConfig.isCategorical() && (modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ZSCALE_ONEHOT) || modelConfig.getNormalizeType() .equals(ModelNormalizeConf.NormType.ONEHOT)) && columnConfig.getBinCategory().size() > 0) { pos += (columnConfig.getBinCategory().size() + 1); } else { pos += 1; } } } } index += 1; } if (index != this.columnConfigList.size() || pos != fields.length - 1) { throw new RuntimeException("Wrong data indexing. ColumnConfig index = " + index + ", while it should be " + columnConfigList.size() + ". " + "Data Pos = " + pos + ", while it should be " + (fields.length - 1)); } // output delimiter in norm can be set by user now and if user set a special one later changed, this exception // is helped to quick find such issue. if (inputsIndex != inputs.length) { String delimiter = workerContext.getProps().getProperty(Constants.SHIFU_OUTPUT_DATA_DELIMITER, Constants.DEFAULT_DELIMITER); throw new RuntimeException("Input length is inconsistent with parsing size. Input original size: " + inputs.length + ", parsing size:" + inputsIndex + ", delimiter:" + delimiter + "."); } // sample negative only logic here if (modelConfig.getTrain().getSampleNegOnly()) { if (this.modelConfig.isFixInitialInput()) { // if fixInitialInput, sample hashcode in 1-sampleRate range out if negative records int startHashCode = (100 / this.modelConfig.getBaggingNum()) * this.trainerId; // here BaggingSampleRate means how many data will be used in training and validation, if it is 0.8, we // should take 1-0.8 to check endHashCode int endHashCode = startHashCode + Double.valueOf((1d - this.modelConfig.getBaggingSampleRate()) * 100).intValue(); if ((modelConfig.isRegression() || (modelConfig.isClassification() && modelConfig.getTrain().isOneVsAll())) // regression or // onevsall && (int) (ideal[0] + 0.01d) == 0 // negative record && isInRange(hashcode, startHashCode, endHashCode)) { return; } } else { // if not fixed initial input, and for regression or onevsall multiple classification (regression also). // if negative record if ((modelConfig.isRegression() || (modelConfig.isClassification() && modelConfig.getTrain().isOneVsAll())) // regression or // onevsall && (int) (ideal[0] + 0.01d) == 0 // negative record && Double.compare(super.sampelNegOnlyRandom.nextDouble(), this.modelConfig.getBaggingSampleRate()) >= 0) { return; } } } FloatMLDataPair pair = new BasicFloatMLDataPair(new BasicFloatMLData(inputs), new BasicFloatMLData(ideal)); // up sampling logic, just add more weights while bagging sampling rate is still not changed if (modelConfig.isRegression() && isUpSampleEnabled() && Double.compare(ideal[0], 1d) == 0) { // Double.compare(ideal[0], 1d) == 0 means positive tags; sample + 1 to avoid sample count to 0 pair.setSignificance(significance * (super.upSampleRng.sample() + 1)); } else { pair.setSignificance(significance); } boolean isValidation = false; if (workerContext.getAttachment() != null && workerContext.getAttachment() instanceof Boolean) { isValidation = (Boolean) workerContext.getAttachment(); } boolean isInTraining = addDataPairToDataSet(hashcode, pair, isValidation); // do bagging sampling only for training data if (isInTraining) { float subsampleWeights = sampleWeights(pair.getIdealArray()[0]); if (isPositive(pair.getIdealArray()[0])) { this.positiveSelectedTrainCount += subsampleWeights * 1L; } else { this.negativeSelectedTrainCount += subsampleWeights * 1L; } // set weights to significance, if 0, significance will be 0, that is bagging sampling pair.setSignificance(pair.getSignificance() * subsampleWeights); } else { // for validation data, according bagging sampling logic, we may need to sampling validation data set, while // validation data set are only used to compute validation error, not to do real sampling is ok. } }
From source file:com.facebook.react.views.toolbar.ReactToolbarManager.java
@ReactProp(name = "contentInsetEnd", defaultFloat = Float.NaN) public void setContentInsetEnd(ReactToolbar view, float insetEnd) { int inset = Float.isNaN(insetEnd) ? getDefaultContentInsets(view.getContext())[1] : Math.round(PixelUtil.toPixelFromDIP(insetEnd)); view.setContentInsetsRelative(view.getContentInsetStart(), inset); }
From source file:com.androzic.GPSInfo.java
private void updateGPSInfo() { switch (application.gpsStatus) { case LocationService.GPS_OK: satsValue.setText(String.valueOf(application.gpsFSats) + "/" + String.valueOf(application.gpsTSats)); break;/* w ww .j av a2 s. c om*/ case LocationService.GPS_OFF: satsValue.setText(R.string.sat_stop); break; case LocationService.GPS_SEARCHING: satsValue.setText(String.valueOf(application.gpsFSats) + "/" + String.valueOf(application.gpsTSats)); satsValue.startAnimation(shake); break; } float hdop = application.getHDOP(); if (!Float.isNaN(hdop)) hdopValue.setText(String.format("%.1f", hdop)); float vdop = application.getVDOP(); if (!Float.isNaN(vdop)) vdopValue.setText(String.format("%.1f", vdop)); if (application.lastKnownLocation != null) { Date date = new Date(application.lastKnownLocation.getTime()); lastfixValue.setText(DateFormat.getDateFormat(application).format(date) + " " + DateFormat.getTimeFormat(application).format(date)); accuracyValue.setText(application.lastKnownLocation.hasAccuracy() ? StringFormatter.distanceH(application.lastKnownLocation.getAccuracy(), "%.1f", 1000) : "N/A"); } }
From source file:com.choicemaker.cm.modelmaker.gui.panels.HoldVsAccuracyPlotPanel.java
private void buildPanel() { XYSeriesCollection dataset = new XYSeriesCollection(); String title = ChoiceMakerCoreMessages.m.formatMessage("train.gui.modelmaker.panel.holdvsacc.title"); data = new XYSeries(title); dataset.addSeries(data);/*w ww .ja va2s. com*/ final PlotOrientation orientation = PlotOrientation.VERTICAL; chart = ChartFactory.createXYLineChart(title, ChoiceMakerCoreMessages.m.formatMessage("train.gui.modelmaker.panel.holdvsacc.cm.accuracy"), ChoiceMakerCoreMessages.m.formatMessage("train.gui.modelmaker.panel.holdvsacc.holdpercentage"), dataset, orientation, true, true, true); MouseListener tableMouseListener = new MouseAdapter() { public void mousePressed(MouseEvent e) { Point origin = e.getPoint(); JTable src = (JTable) e.getSource(); int row = src.rowAtPoint(origin); int col = src.columnAtPoint(origin); ModelMaker mm = parent.getModelMaker(); if (src == accuracyTable) { if (col < 2) { if (!Float.isNaN(accuracyData[row][2]) && !Float.isNaN(accuracyData[row][3])) mm.setThresholds(new Thresholds(accuracyData[row][2], accuracyData[row][3])); } else if (col == 2) { if (!Float.isNaN(accuracyData[row][2])) mm.setDifferThreshold(accuracyData[row][2]); } else { if (!Float.isNaN(accuracyData[row][3])) mm.setMatchThreshold(accuracyData[row][3]); } } else { if (col < 2) { if (!Float.isNaN(hrData[row][2]) && !Float.isNaN(hrData[row][3])) mm.setThresholds(new Thresholds(hrData[row][2], hrData[row][3])); } else if (col == 2) { if (!Float.isNaN(hrData[row][2])) mm.setDifferThreshold(hrData[row][2]); } else { if (!Float.isNaN(hrData[row][3])) mm.setMatchThreshold(hrData[row][3]); } } } }; chart.setBackgroundPaint(getBackground()); accuracyTable = new AccuracyTable(true, accuracies); accuracyTable.addMouseListener(tableMouseListener); accuracyPanel = getPanel(accuracyTable, ChoiceMakerCoreMessages.m.formatMessage("train.gui.modelmaker.panel.holdvsacc.table.hrvsacc")); hrTable = new AccuracyTable(false, hrs); hrTable.addMouseListener(tableMouseListener); hrPanel = getPanel(hrTable, ChoiceMakerCoreMessages.m.formatMessage("train.gui.modelmaker.panel.holdvsacc.table.accvshr")); accuracyTable.setEnabled(false); hrTable.setEnabled(false); }
From source file:com.pivotal.gfxd.demo.controller.DemoController.java
@RequestMapping(value = "/predicted-load", produces = "application/json", method = RequestMethod.GET) @ResponseBody/*from w ww. j a va 2 s .c o m*/ public ResponseEntity<TimestampValue> getPrediction() { long start = System.currentTimeMillis(); long startSeconds = start / 1000; float predictedLoad = predictionSvc.predictedLoad(startSeconds, TimeSlice.Interval.FIVE_MINUTE); LOG.debug("Prediction took " + (System.currentTimeMillis() - start) + "ms + " + predictedLoad + " for time " + startSeconds); start = System.currentTimeMillis(); float currentLoad = predictionSvc.currentLoad(startSeconds, TimeSlice.Interval.FIVE_MINUTE); LOG.debug("Current load query took " + (System.currentTimeMillis() - start) + "ms + " + currentLoad + " for time " + startSeconds); /** * This is a hack as sometimes our query will exceed what we have in memory. * This can easily happen during the few seconds as we transition between * time slices. */ if (Float.isNaN(currentLoad)) { currentLoad = lastCurrentLoad; } else { lastCurrentLoad = currentLoad; } /** * Respond with the current timestamp even though the data is calculated * from past data. Seeing that the client sent the delta, it can always * re-adjust if it wants to. */ TimestampValue tv = new TimestampValue(startSeconds); tv.add("predict", predictedLoad); tv.add("current", currentLoad); return new ResponseEntity(tv, HttpStatus.OK); }