List of usage examples for java.lang Double MAX_VALUE
double MAX_VALUE
To view the source code for java.lang Double MAX_VALUE.
Click Source Link
From source file:com.aestel.chemistry.openEye.fp.apps.SDFFPNNFinder.java
/** * For each compound in inFile comapre to each in refFile. *///w ww . j av a2 s. c o m private static void performReferenceSearch(String inFile, String refFile, String outFile, String tabOutput, SimComparatorFactory<OEMolBase, FPComparator, FPComparator> compFact, double minSim, int maxNeighbors, String idTag, int nCpu, String countAboveSimilarityStr, boolean outputDuplicates, boolean printAll) throws IOException { double countAboveSimilarity = Double.MAX_VALUE; if (countAboveSimilarityStr != null) countAboveSimilarity = Double.parseDouble(countAboveSimilarityStr); MultiThreadAlgortihm nnAlg; if (maxNeighbors > 1 || minSim > 0) { NNMultiFinderConsumerInterface c; if ("vTab".equalsIgnoreCase(tabOutput)) c = new NNMultiFinderVTConsumer(outFile, idTag != null); else if (outputDuplicates) c = new NNMultiFinderDuplConsumer(outFile, idTag != null, countAboveSimilarityStr); else c = new NNMultiFinderConsumer(outFile, idTag != null, countAboveSimilarityStr); nnAlg = new MultiNNFinder<FPComparator, FPComparator>(inFile, c, compFact, refFile, idTag, maxNeighbors, minSim, printAll, countAboveSimilarity); } else { NNFinderConsumerInterface c; if ("vTab".equalsIgnoreCase(tabOutput)) c = new NNFinderVTConsumer(outFile, idTag); else c = new NNFinderConsumer(outFile, idTag, countAboveSimilarityStr); nnAlg = new NNFinder<FPComparator, FPComparator>(inFile, c, compFact, refFile, idTag, countAboveSimilarity); } MultiThreadRunner runner = new MultiThreadRunner(nnAlg, nCpu); runner.run(); runner.close(); }
From source file:org.uma.jmetal.util.point.impl.ArrayPointTest.java
@Test public void shouldHashCodeReturnTheCorrectValue() { int dimension = 5; Point point = new ArrayPoint(dimension); point.setDimensionValue(0, 1.0);//w w w . j av a 2 s.co m point.setDimensionValue(1, -2.0); point.setDimensionValue(2, 45.5); point.setDimensionValue(3, -323.234); point.setDimensionValue(4, Double.MAX_VALUE); double[] array = { 1.0, -2.0, 45.5, -323.234, Double.MAX_VALUE }; assertEquals(Arrays.hashCode(array), point.hashCode()); }
From source file:dm_p2.KMEANS.java
public static void runKMeans(Map<Integer, List<Double>> linkedHashMap, int[] initialCentroidsRows, int numOfClusters, int numOfIterations) { int clusterCount = 0; // List<Gene> geneData; // for (int i = 0; i < numOfClusters; i++) { // geneData = new ArrayList<Gene>(); // clusters.add(geneData); // }//from ww w . j ava2s . c o m for (int k = 0; k < numOfClusters; k++) { Integer geneId = initialCentroidsRows[clusterCount++]; Gene gene = new Gene(geneId, linkedHashMap.get(geneId)); centroids.add(gene); //clusterCount++; } for (int num = 0; num < numOfIterations; num++) { Iterator itr = linkedHashMap.keySet().iterator(); clusterCount = 0; List<Gene> geneData1; for (int i = 0; i < numOfClusters; i++) { geneData1 = new ArrayList<Gene>(); clusters.add(geneData1); } while (itr.hasNext()) { double minDist = Double.MAX_VALUE; int minDistIndex = 0; Integer geneId = (Integer) itr.next(); Gene gene = new Gene(geneId, linkedHashMap.get(geneId)); List<Double> geneExpressionValues = new ArrayList<Double>(); geneExpressionValues = linkedHashMap.get(geneId); for (int j = 0; j < centroids.size(); j++) { double eucDistance = 0.0; List<Double> centroidExpressionValues = new ArrayList<Double>(); centroidExpressionValues = centroids.get(j).getExpression(); for (int i = 0; i < centroidExpressionValues.size(); i++) { double dist = geneExpressionValues.get(i) - centroidExpressionValues.get(i); eucDistance += Math.pow(dist, 2); } double dist = Math.sqrt(eucDistance); if (dist < minDist) { minDist = dist; minDistIndex = j; } } //System.out.println(minDist+"-"+gene.geneId+"-"+centroids.size()); clusters.get(minDistIndex).add(gene); // Storing the result of each iteration in a TreeMap needed for validation. // This TreeMap records the cluster to which each gene gets allocated geneToCluster.put(geneId, minDistIndex); } centroids.clear(); for (int i = 0; i < numOfClusters; i++) { ArrayList<Double> expValuesNew = new ArrayList<Double>(); expValuesNew = calculateCentroids(clusters.get(i)); Gene gene = new Gene(i, expValuesNew); centroids.add(i, gene); } if (num < numOfIterations - 1) clusters.clear(); } }
From source file:edu.stanford.cfuller.imageanalysistools.clustering.ObjectClustering.java
/** * Sets up a set of ClusterObjects and a set of Clusters from an Image mask with each object labeled with a unique greylevel. * * @param im The Image mask with each cluster object labeled with a unique greylevel. These must start at 1 and be consecutive. * @param clusterObjects A Vector of ClusterObjects that will contain the initialized ClusterObjects on return; this may be empty, and any contents will be erased. * @param clusters A Vector of Clusters that will contain the initialized Clusters on return; this may be empty, and any contents will be erased. * @param k The number of Clusters to generate. * @return The number of ClusterObjects in the Image. *//* www . jav a 2 s . c o m*/ public static int initializeObjectsAndClustersFromImage(Image im, Vector<ClusterObject> clusterObjects, Vector<Cluster> clusters, int k) { int n = 0; clusters.clear(); for (int j = 0; j < k; j++) { clusters.add(new Cluster()); clusters.get(j).setID(j + 1); } Histogram h = new Histogram(im); n = h.getMaxValue(); clusterObjects.clear(); for (int j = 0; j < n; j++) { clusterObjects.add(new ClusterObject()); clusterObjects.get(j).setCentroidComponents(0, 0, 0); clusterObjects.get(j).setnPixels(0); } for (ImageCoordinate i : im) { if (im.getValue(i) > 0) { ClusterObject current = clusterObjects.get((int) im.getValue(i) - 1); current.incrementnPixels(); current.setCentroid(current.getCentroid().add(new Vector3D(i.get(ImageCoordinate.X), i.get(ImageCoordinate.Y), i.get(ImageCoordinate.Z)))); } } for (int j = 0; j < n; j++) { ClusterObject current = clusterObjects.get(j); current.setCentroid(current.getCentroid().scalarMultiply(1.0 / current.getnPixels())); } //initialize clusters using kmeans++ strategy double[] probs = new double[n]; double[] cumulativeProbs = new double[n]; java.util.Arrays.fill(probs, 0); java.util.Arrays.fill(cumulativeProbs, 0); //choose the initial cluster int initialClusterObject = (int) Math.floor(n * RandomGenerator.rand()); clusters.get(0).setCentroid(clusterObjects.get(initialClusterObject).getCentroid()); clusters.get(0).getObjectSet().add(clusterObjects.get(initialClusterObject)); for (int j = 0; j < n; j++) { clusterObjects.get(j).setCurrentCluster(clusters.get(0)); } //assign the remainder of the clusters for (int j = 1; j < k; j++) { double probSum = 0; for (int m = 0; m < n; m++) { double minDist = Double.MAX_VALUE; Cluster bestCluster = null; for (int p = 0; p < j; p++) { double tempDist = clusterObjects.get(m).distanceTo(clusters.get(p)); if (tempDist < minDist) { minDist = tempDist; bestCluster = clusters.get(p); } } probs[m] = minDist; probSum += minDist; clusterObjects.get(m).setCurrentCluster(bestCluster); } for (int m = 0; m < n; m++) { probs[m] = probs[m] / probSum; if (m == 0) { cumulativeProbs[m] = probs[m]; } else { cumulativeProbs[m] = cumulativeProbs[m - 1] + probs[m]; } } double randNum = RandomGenerator.rand(); int nextCenter = 0; for (int m = 0; m < n; m++) { if (randNum < cumulativeProbs[m]) { nextCenter = m; break; } } clusters.get(j).setCentroid(clusterObjects.get(nextCenter).getCentroid()); } for (int m = 0; m < n; m++) { double minDist = Double.MAX_VALUE; Cluster bestCluster = null; for (int p = 0; p < k; p++) { double tempDist = clusterObjects.get(m).distanceTo(clusters.get(p)); if (tempDist < minDist) { minDist = tempDist; bestCluster = clusters.get(p); } } clusterObjects.get(m).setCurrentCluster(bestCluster); bestCluster.getObjectSet().add(clusterObjects.get(m)); } return n; }
From source file:com.joptimizer.algebra.Matrix1NormRescaler.java
/** * /* www.j a v a 2 s . c o m*/ * @param ASymm symm matrix filled in its subdiagonal elements * @param r the index of the row * @return */ public double getRowInfinityNorm(final DoubleMatrix2D ASymm, final int r) { final double[] maxValueHolder = new double[] { -Double.MAX_VALUE }; IntIntDoubleFunction myFunct = new IntIntDoubleFunction() { public double apply(int i, int j, double pij) { //logger.warn("(" + i + "," + j + ")=" + pij); maxValueHolder[0] = Math.max(maxValueHolder[0], Math.abs(pij)); return pij; } }; //view A row from starting element to diagonal DoubleMatrix2D AR = ASymm.viewPart(r, 0, 1, r + 1); AR.forEachNonZero(myFunct); //view A col from diagonal to final element DoubleMatrix2D AC = ASymm.viewPart(r, r, ASymm.rows() - r, 1); AC.forEachNonZero(myFunct); return maxValueHolder[0]; }
From source file:com.mycompany.trafficimportfileconverter2.Main2Controller.java
/** * Initializes the controller class.//from ww w. ja v a 2s. c o m */ @Override public void initialize(URL url, ResourceBundle rb) { prefs = Preferences.userNodeForPackage(this.getClass()); initDateArrays(); dirchooser = new DirectoryChooser(); outputDir.addListener(new ChangeListener<File>() { @Override public void changed(ObservableValue<? extends File> observable, File oldValue, File newValue) { lblOutputLoc.setText(newValue.getAbsolutePath()); prefs.put(OUTPUT_DIR_LOC, newValue.getAbsolutePath()); } }); setOutputDir(safeFileSet(new File(prefs.get(OUTPUT_DIR_LOC, "./")))); try { dirchooser.setInitialDirectory(getOutputDir()); } catch (Exception e) { System.out.println("Error setting init dir: " + e); e.printStackTrace(); } filechooser = new FileChooser(); try { filechooser.setInitialDirectory(new File(prefs.get(INPUT_DIR_LOC, "./"))); } catch (Exception e) { System.out.println("Error setting init directory of file chooser: " + e); e.printStackTrace(); } setExtension(prefs.get(EXTENSION, ".gen")); /* Save in preferences default getting file location every time the input file is reselected. */ inputFile.addListener(new ChangeListener<File>() { @Override public void changed(ObservableValue<? extends File> observable, File oldValue, File newValue) { lblInputFile.setText(newValue.getAbsolutePath()); prefs.put(INPUT_DIR_LOC, newValue.getParent()); reloadFileData(newValue); } }); txtAreaOutput.textProperty().addListener(new ChangeListener<Object>() { @Override public void changed(ObservableValue<?> observable, Object oldValue, Object newValue) { txtAreaOutput.setScrollTop(Double.MAX_VALUE); //this will scroll to the bottom //use Double.MIN_VALUE to scroll to the top } }); filechooser.getExtensionFilters().add(new ExtensionFilter("Tab Separated Values", "*.tsv", "*.TSV")); autoSearch(); log("Help document located:"); log(helpurl); }
From source file:com.ibm.bi.dml.hops.rewrite.RewriteMatrixMultChainOptimization.java
/** * mmChainDP(): Core method to perform dynamic programming on a given array * of matrix dimensions./*from w w w . ja v a2 s . c o m*/ * * Thomas H. Cormen, Charles E. Leiserson, Ronald L. Rivest, Clifford Stein * Introduction to Algorithms, Third Edition, MIT Press, page 395. */ private int[][] mmChainDP(double[] dimArray, int size) { double[][] dpMatrix = new double[size][size]; //min cost table int[][] split = new int[size][size]; //min cost index table //init minimum costs for chains of length 1 for (int i = 0; i < size; i++) { Arrays.fill(dpMatrix[i], 0); Arrays.fill(split[i], -1); } //compute cost-optimal chains for increasing chain sizes for (int l = 2; l <= size; l++) { // chain length for (int i = 0; i < size - l + 1; i++) { int j = i + l - 1; // find cost of (i,j) dpMatrix[i][j] = Double.MAX_VALUE; for (int k = i; k <= j - 1; k++) { //recursive cost computation double cost = dpMatrix[i][k] + dpMatrix[k + 1][j] + (dimArray[i] * dimArray[k + 1] * dimArray[j + 1]); //prune suboptimal if (cost < dpMatrix[i][j]) { dpMatrix[i][j] = cost; split[i][j] = k; } } if (LOG.isTraceEnabled()) { LOG.trace("mmchainopt [i=" + (i + 1) + ",j=" + (j + 1) + "]: costs = " + dpMatrix[i][j] + ", split = " + (split[i][j] + 1)); } } } return split; }
From source file:hu.unimiskolc.iit.distsys.MultiCloudUser.java
@Override public void tick(long fires) { for (int i = minindex; i < jobs.size(); i++) { final Job toprocess = jobs.get(i); long submittime = toprocess.getSubmittimeSecs() * 1000; if (minindex == jobs.size() - 3) { minindex += 0;/*from w w w. j a v a 2 s. c o m*/ } if (fires == submittime) { minindex++; ConstantConstraints cc = new ConstantConstraints(toprocess.nprocs, ExercisesBase.minProcessingCap, ExercisesBase.minMem / toprocess.nprocs); VirtualMachine vm = null; for (ProviderRecord pr : records) { vm = pr.getFreeVM(cc); if (vm != null) break; } if (vm == null) { ProviderRecord prBest = null; double priceSuccessCombined = Double.MAX_VALUE; for (ProviderRecord pr : records) { double currPriceCombo = ((IaaSForwarder) pr.service).getResourceQuote(cc) / pr.getSuccessRatio(); if (currPriceCombo < priceSuccessCombined) { priceSuccessCombined = currPriceCombo; prBest = pr; } } try { final ProviderRecord theProvider = prBest; final VirtualMachine theNewVM = theProvider.getNewVM(cc, 1)[0]; theNewVM.subscribeStateChange(new VirtualMachine.StateChange() { @Override public void stateChanged(VirtualMachine vm, State oldState, State newState) { switch (newState) { case RUNNING: // our VM is working great! // we dispatch the job for it allocateVMforJob(theNewVM, toprocess); // then cancel our listener case DESTROYED: case SHUTDOWN: case NONSERVABLE: // cancel our listener vm.unsubscribeStateChange(this); default: // do nothing } } }); // do not queue the VM forever if it is not scheduled in // 20 minutes then it is cancelled and the job is forgot // about forever new DeferredEvent(1200000) { @Override protected void eventAction() { if (theNewVM.getState().equals(VirtualMachine.State.DESTROYED) && theNewVM.getTotalProcessed() == 0) { try { theProvider.service.terminateVM(theNewVM, false); } catch (Exception e) { throw new RuntimeException(e); } } } }; } catch (VMManagementException e) { // The job has failed prematurely , we ignore it, but // the getNewVM function above reports the failure and // reduces the chances of choosing the particular // provider for a while. } catch (Exception e) { // This is unexpected behavior! throw new RuntimeException(e); } } else { // There was a VM already that is good for this job allocateVMforJob(vm, toprocess); } } else if (fires < submittime) { updateFrequency(submittime - fires); break; } else { minindex += 0; } } if (minindex == jobs.size()) { unsubscribe(); prepareForCompletion = true; } }
From source file:classif.gmm.DTWGMMSymbolicSequence.java
private void delcluster(int k) { centroidsPerCluster[k] = null;//from w ww .j a v a2s. com sigmasPerCluster[k] = Double.NaN; Sequence[] newcenter = new Sequence[nbClusters - 1]; double[] newsigma = new double[nbClusters - 1]; nbClusters = nbClusters - 1; int flag = 0; for (int i = 0; i < centroidsPerCluster.length; i++) { if (centroidsPerCluster[i] != null) { newcenter[i - flag] = centroidsPerCluster[i]; newsigma[i - flag] = sigmasPerCluster[i]; } else flag++; } centroidsPerCluster = newcenter; sigmasPerCluster = newsigma; ArrayList<Sequence>[] affectation = new ArrayList[nbClusters]; int[] clusterMap = new int[data.size()]; for (int i = 0; i < affectation.length; i++) { affectation[i] = new ArrayList<Sequence>(); } for (int j = 0; j < data.size(); j++) { double minDist = Double.MAX_VALUE; // for each cluster k for (int i = 0; i < centroidsPerCluster.length; i++) { // distance between cluster k and data point j if (centroidsPerCluster[i] != null) { double currentDist = centroidsPerCluster[i].distance(data.get(j)); if (currentDist < minDist) { clusterMap[j] = i; minDist = currentDist; } } } // affect data point j to cluster affected to j affectation[clusterMap[j]].add(data.get(j)); } for (int i = 0; i < nbClusters; i++) { if (centroidsPerCluster[i] != null) { // ~ if empty cluster // find the center nck[i] = affectation[i].size(); // compute sigma double sumOfSquares = centroidsPerCluster[i].sumOfSquares(affectation[i]); sigmasPerCluster[i] = Math.sqrt(sumOfSquares / nck[i]); } else System.err.println("ERROR"); } }
From source file:main.java.edu.mit.compbio.qrf.QRF_spark.java
public void doMain(String[] args) throws Exception { CmdLineParser parser = new CmdLineParser(this); //parser.setUsageWidth(80); try {//from ww w .j a va 2s.c om if (help || args.length < 2) throw new CmdLineException(USAGE); parser.parseArgument(args); } catch (CmdLineException e) { System.err.println(e.getMessage()); // print the list of available options parser.printUsage(System.err); System.err.println(); return; } //read input bed file, for each row, String modelFile = arguments.get(0); String inputFile = arguments.get(1); initiate(); SparkConf sparkConf = new SparkConf().setAppName("QRF_spark"); JavaSparkContext sc = new JavaSparkContext(sparkConf); JavaRDD<LabeledPoint> inputData = sc.textFile(inputFile).map(new Function<String, LabeledPoint>() { @Override public LabeledPoint call(String line) throws Exception { String[] tmp = line.split(sep); double[] ds = new double[featureCols.size()]; for (int i = 0; i < featureCols.size(); i++) { ds[i] = Double.parseDouble(tmp[featureCols.get(i) - 1]); } return new LabeledPoint(Double.parseDouble(tmp[labelCol - 1]), Vectors.dense(ds)); } }); // Prepare training documents, which are labeled. if (train) { JavaRDD<LabeledPoint>[] splits = inputData.randomSplit(folds, seed); Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>(); RandomForestModel bestModel = null; double bestR2 = Double.NEGATIVE_INFINITY; double bestMse = Double.MAX_VALUE; double mseSum = 0.0; RegressionMetrics rmBest = null; for (int i = 0; i < kFold; i++) { JavaRDD<LabeledPoint> testData = splits[i]; JavaRDD<LabeledPoint> trainingData = null; for (int j = 0; j < kFold; j++) { if (j == i) continue; if (trainingData != null) { trainingData.union(splits[j]); } else { trainingData = splits[j]; } } final RandomForestModel model = RandomForest.trainRegressor(trainingData, categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed); // Evaluate model on test instances and compute test error RDD<Tuple2<Object, Object>> predictionAndLabel = testData .map(new Function<LabeledPoint, Tuple2<Object, Object>>() { @Override public Tuple2<Object, Object> call(LabeledPoint p) { return new Tuple2<Object, Object>(model.predict(p.features()), p.label()); } }).rdd(); RegressionMetrics rm = new RegressionMetrics(predictionAndLabel); double r2 = rm.r2(); mseSum += rm.meanSquaredError(); if (r2 > bestR2) { bestModel = model; rmBest = rm; bestR2 = r2; bestMse = rm.meanSquaredError(); } } log.info("After cross validation, best model's MSE is: " + bestMse + "\tMean MSE is: " + mseSum / kFold + "\tVariance explained: " + rmBest.explainedVariance() + "\tR2: " + rmBest.r2()); bestModel.save(sc.sc(), modelFile); } else { if (outputFile == null) throw new IllegalArgumentException( "Need to provide output file name in -outputFile for Non training mode !!"); //load trained model log.info("Loading model ..."); final RandomForestModel model = RandomForestModel.load(sc.sc(), modelFile); inputData.map(new Function<LabeledPoint, String>() { @Override public String call(LabeledPoint p) { double predict = model.predict(p.features()); String tmp = null; for (double s : p.features().toArray()) { if (tmp == null) { tmp = String.valueOf(s); } else { tmp = tmp + "\t" + String.valueOf(s); } } return tmp + "\t" + p.label() + "\t" + predict; } }).saveAsTextFile(outputFile + ".tmp"); log.info("Merging files ..."); File[] listOfFiles = new File(outputFile + ".tmp").listFiles(); OutputStream output = new BufferedOutputStream(new FileOutputStream(outputFile, true)); for (File f : listOfFiles) { if (f.isFile() && f.getName().startsWith("part-")) { InputStream input = new BufferedInputStream(new FileInputStream(f)); IOUtils.copy(input, output); IOUtils.closeQuietly(input); } } IOUtils.closeQuietly(output); FileUtils.deleteDirectory(new File(outputFile + ".tmp")); // Evaluate model on test instances and compute test error RDD<Tuple2<Object, Object>> predictionAndLabel = inputData .map(new Function<LabeledPoint, Tuple2<Object, Object>>() { @Override public Tuple2<Object, Object> call(LabeledPoint p) { return new Tuple2<Object, Object>(model.predict(p.features()), p.label()); } }).rdd(); RegressionMetrics rm = new RegressionMetrics(predictionAndLabel); log.info("For the test dataset, MSE is: " + rm.meanSquaredError() + "\tVariance explained: " + rm.explainedVariance() + "\tR2: " + rm.r2()); } finish(); }