List of usage examples for java.lang Double POSITIVE_INFINITY
double POSITIVE_INFINITY
To view the source code for java.lang Double POSITIVE_INFINITY.
Click Source Link
From source file:com.github.tteofili.looseen.yay.SGM.java
/** * perform weights learning from the training examples using (configurable) mini batch gradient descent algorithm * * @param samples the training examples// ww w . j a v a 2s . com * @return the final cost with the updated weights * @throws Exception if BGD fails to converge or any numerical error happens */ private double learnWeights(Sample... samples) throws Exception { int iterations = 0; double cost = Double.MAX_VALUE; int j = 0; // momentum RealMatrix vb = MatrixUtils.createRealMatrix(biases[0].getRowDimension(), biases[0].getColumnDimension()); RealMatrix vb2 = MatrixUtils.createRealMatrix(biases[1].getRowDimension(), biases[1].getColumnDimension()); RealMatrix vw = MatrixUtils.createRealMatrix(weights[0].getRowDimension(), weights[0].getColumnDimension()); RealMatrix vw2 = MatrixUtils.createRealMatrix(weights[1].getRowDimension(), weights[1].getColumnDimension()); long start = System.currentTimeMillis(); int c = 1; RealMatrix x = MatrixUtils.createRealMatrix(configuration.batchSize, samples[0].getInputs().length); RealMatrix y = MatrixUtils.createRealMatrix(configuration.batchSize, samples[0].getOutputs().length); while (true) { int i = 0; for (int k = j * configuration.batchSize; k < j * configuration.batchSize + configuration.batchSize; k++) { Sample sample = samples[k % samples.length]; x.setRow(i, sample.getInputs()); y.setRow(i, sample.getOutputs()); i++; } j++; long time = (System.currentTimeMillis() - start) / 1000; if (iterations % (1 + (configuration.maxIterations / 100)) == 0 && time > 60 * c) { c += 1; // System.out.println("cost: " + cost + ", accuracy: " + evaluate(this) + " after " + iterations + " iterations in " + (time / 60) + " minutes (" + ((double) iterations / time) + " ips)"); } RealMatrix w0t = weights[0].transpose(); RealMatrix w1t = weights[1].transpose(); RealMatrix hidden = rectifierFunction.applyMatrix(x.multiply(w0t)); hidden.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + biases[0].getEntry(0, column); } @Override public double end() { return 0; } }); RealMatrix scores = hidden.multiply(w1t); scores.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + biases[1].getEntry(0, column); } @Override public double end() { return 0; } }); RealMatrix probs = scores.copy(); int len = scores.getColumnDimension() - 1; for (int d = 0; d < configuration.window - 1; d++) { int startColumn = d * len / (configuration.window - 1); RealMatrix subMatrix = scores.getSubMatrix(0, scores.getRowDimension() - 1, startColumn, startColumn + x.getColumnDimension()); for (int sm = 0; sm < subMatrix.getRowDimension(); sm++) { probs.setSubMatrix(softmaxActivationFunction.applyMatrix(subMatrix.getRowMatrix(sm)).getData(), sm, startColumn); } } RealMatrix correctLogProbs = MatrixUtils.createRealMatrix(x.getRowDimension(), 1); correctLogProbs.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return -Math.log(probs.getEntry(row, getMaxIndex(y.getRow(row)))); } @Override public double end() { return 0; } }); double dataLoss = correctLogProbs.walkInOptimizedOrder(new RealMatrixPreservingVisitor() { private double d = 0; @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public void visit(int row, int column, double value) { d += value; } @Override public double end() { return d; } }) / samples.length; double reg = 0d; reg += weights[0].walkInOptimizedOrder(new RealMatrixPreservingVisitor() { private double d = 0d; @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public void visit(int row, int column, double value) { d += Math.pow(value, 2); } @Override public double end() { return d; } }); reg += weights[1].walkInOptimizedOrder(new RealMatrixPreservingVisitor() { private double d = 0d; @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public void visit(int row, int column, double value) { d += Math.pow(value, 2); } @Override public double end() { return d; } }); double regLoss = 0.5 * configuration.regularizationLambda * reg; double newCost = dataLoss + regLoss; if (iterations == 0) { // System.out.println("started with cost = " + dataLoss + " + " + regLoss + " = " + newCost); } if (Double.POSITIVE_INFINITY == newCost) { throw new Exception("failed to converge at iteration " + iterations + " with alpha " + configuration.alpha + " : cost going from " + cost + " to " + newCost); } else if (iterations > 1 && (newCost < configuration.threshold || iterations > configuration.maxIterations)) { cost = newCost; // System.out.println("successfully converged after " + (iterations - 1) + " iterations (alpha:" + configuration.alpha + ",threshold:" + configuration.threshold + ") with cost " + newCost); break; } else if (Double.isNaN(newCost)) { throw new Exception("failed to converge at iteration " + iterations + " with alpha " + configuration.alpha + " : cost calculation underflow"); } // update registered cost cost = newCost; // calculate the derivatives to update the parameters RealMatrix dscores = probs.copy(); dscores.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return (y.getEntry(row, column) == 1 ? (value - 1) : value) / samples.length; } @Override public double end() { return 0; } }); // get derivative on second layer RealMatrix dW2 = hidden.transpose().multiply(dscores); // regularize dw2 dW2.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + configuration.regularizationLambda * w1t.getEntry(row, column); } @Override public double end() { return 0; } }); RealMatrix db2 = MatrixUtils.createRealMatrix(biases[1].getRowDimension(), biases[1].getColumnDimension()); dscores.walkInOptimizedOrder(new RealMatrixPreservingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public void visit(int row, int column, double value) { db2.setEntry(0, column, db2.getEntry(0, column) + value); } @Override public double end() { return 0; } }); RealMatrix dhidden = dscores.multiply(weights[1]); dhidden.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value < 0 ? 0 : value; } @Override public double end() { return 0; } }); RealMatrix db = MatrixUtils.createRealMatrix(biases[0].getRowDimension(), biases[0].getColumnDimension()); dhidden.walkInOptimizedOrder(new RealMatrixPreservingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public void visit(int row, int column, double value) { db.setEntry(0, column, db.getEntry(0, column) + value); } @Override public double end() { return 0; } }); // get derivative on first layer RealMatrix dW = x.transpose().multiply(dhidden); // regularize dW.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + configuration.regularizationLambda * w0t.getEntry(row, column); } @Override public double end() { return 0; } }); RealMatrix dWt = dW.transpose(); RealMatrix dWt2 = dW2.transpose(); if (configuration.useNesterovMomentum) { // update nesterov momentum final RealMatrix vbPrev = vb.copy(); final RealMatrix vb2Prev = vb2.copy(); final RealMatrix vwPrev = vw.copy(); final RealMatrix vw2Prev = vw2.copy(); vb.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * db.getEntry(row, column); } @Override public double end() { return 0; } }); vb2.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * db2.getEntry(row, column); } @Override public double end() { return 0; } }); vw.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * dWt.getEntry(row, column); } @Override public double end() { return 0; } }); vw2.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * dWt2.getEntry(row, column); } @Override public double end() { return 0; } }); // update bias biases[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.mu * vbPrev.getEntry(row, column) + (1 + configuration.mu) * vb.getEntry(row, column); } @Override public double end() { return 0; } }); biases[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.mu * vb2Prev.getEntry(row, column) + (1 + configuration.mu) * vb2.getEntry(row, column); } @Override public double end() { return 0; } }); // update the weights weights[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.mu * vwPrev.getEntry(row, column) + (1 + configuration.mu) * vw.getEntry(row, column); } @Override public double end() { return 0; } }); weights[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.mu * vw2Prev.getEntry(row, column) + (1 + configuration.mu) * vw2.getEntry(row, column); } @Override public double end() { return 0; } }); } else if (configuration.useMomentum) { // update momentum vb.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * db.getEntry(row, column); } @Override public double end() { return 0; } }); vb2.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * db2.getEntry(row, column); } @Override public double end() { return 0; } }); vw.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * dWt.getEntry(row, column); } @Override public double end() { return 0; } }); vw2.walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return configuration.mu * value - configuration.alpha * dWt2.getEntry(row, column); } @Override public double end() { return 0; } }); // update bias biases[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + vb.getEntry(row, column); } @Override public double end() { return 0; } }); biases[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + vb2.getEntry(row, column); } @Override public double end() { return 0; } }); // update the weights weights[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + vw.getEntry(row, column); } @Override public double end() { return 0; } }); weights[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value + vw2.getEntry(row, column); } @Override public double end() { return 0; } }); } else { // standard parameter update // update bias biases[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.alpha * db.getEntry(row, column); } @Override public double end() { return 0; } }); biases[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.alpha * db2.getEntry(row, column); } @Override public double end() { return 0; } }); // update the weights weights[0].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.alpha * dWt.getEntry(row, column); } @Override public double end() { return 0; } }); weights[1].walkInOptimizedOrder(new RealMatrixChangingVisitor() { @Override public void start(int rows, int columns, int startRow, int endRow, int startColumn, int endColumn) { } @Override public double visit(int row, int column, double value) { return value - configuration.alpha * dWt2.getEntry(row, column); } @Override public double end() { return 0; } }); } iterations++; } return cost; }
From source file:com.cognitect.transit.TransitTest.java
public void testSpecialNumbers() throws Exception { assertEquals(scalar("\"~zNaN\""), writeJson(Double.NaN)); assertEquals(scalar("\"~zINF\""), writeJson(Double.POSITIVE_INFINITY)); assertEquals(scalar("\"~z-INF\""), writeJson(Double.NEGATIVE_INFINITY)); assertEquals(scalar("\"~zNaN\""), writeJson(Float.NaN)); assertEquals(scalar("\"~zINF\""), writeJson(Float.POSITIVE_INFINITY)); assertEquals(scalar("\"~z-INF\""), writeJson(Float.NEGATIVE_INFINITY)); assertEquals(scalarVerbose("\"~zNaN\""), writeJsonVerbose(Double.NaN)); assertEquals(scalarVerbose("\"~zINF\""), writeJsonVerbose(Double.POSITIVE_INFINITY)); assertEquals(scalarVerbose("\"~z-INF\""), writeJsonVerbose(Double.NEGATIVE_INFINITY)); assertEquals(scalarVerbose("\"~zNaN\""), writeJsonVerbose(Float.NaN)); assertEquals(scalarVerbose("\"~zINF\""), writeJsonVerbose(Float.POSITIVE_INFINITY)); assertEquals(scalarVerbose("\"~z-INF\""), writeJsonVerbose(Float.NEGATIVE_INFINITY)); }
From source file:edu.harvard.iq.dataverse.dataaccess.TabularSubsetGenerator.java
public static Double[] subsetDoubleVector(InputStream in, int column, int numCases) { Double[] retVector = new Double[numCases]; Scanner scanner = new Scanner(in); scanner.useDelimiter("\\n"); for (int caseIndex = 0; caseIndex < numCases; caseIndex++) { if (scanner.hasNext()) { String[] line = (scanner.next()).split("\t", -1); // Verified: new Double("nan") works correctly, // resulting in Double.NaN; // Double("[+-]Inf") doesn't work however; // (the constructor appears to be expecting it // to be spelled as "Infinity", "-Infinity", etc. if ("inf".equalsIgnoreCase(line[column]) || "+inf".equalsIgnoreCase(line[column])) { retVector[caseIndex] = java.lang.Double.POSITIVE_INFINITY; } else if ("-inf".equalsIgnoreCase(line[column])) { retVector[caseIndex] = java.lang.Double.NEGATIVE_INFINITY; } else if (line[column] == null || line[column].equals("")) { // missing value: retVector[caseIndex] = null; } else { try { retVector[caseIndex] = new Double(line[column]); } catch (NumberFormatException ex) { retVector[caseIndex] = null; // missing value }//from ww w. ja va2s. com } } else { scanner.close(); throw new RuntimeException("Tab file has fewer rows than the stored number of cases!"); } } int tailIndex = numCases; while (scanner.hasNext()) { String nextLine = scanner.next(); if (!"".equals(nextLine)) { scanner.close(); throw new RuntimeException( "Column " + column + ": tab file has more nonempty rows than the stored number of cases (" + numCases + ")! current index: " + tailIndex + ", line: " + nextLine); } tailIndex++; } scanner.close(); return retVector; }
From source file:edu.ucla.stat.SOCR.analyses.gui.NormalPower.java
/**This method defines the specific statistical Analysis to be carried our on the user specified data. ANOVA is done in this case. */ public void doAnalysis() { ////////////System.out.println("doAnalysis"); if (dataTable.isEditing()) dataTable.getCellEditor().stopCellEditing(); Data data = new Data(); /****************************************************************** From this point, the code has been modified to work with input cells that are empty. ******************************************************************/ int yLength = 0; String cellValue = null;/*from w w w.j a v a 2 s.co m*/ ArrayList<String> yList = new ArrayList<String>(); resultPanelTextArea.append("\n\tNormal Distribution Power Analysis Results:\n"); try { for (int k = 0; k < dataTable.getRowCount(); k++) { try { cellValue = ((String) dataTable.getValueAt(k, dependentIndex)).trim(); if (cellValue != null && !cellValue.equals("")) { yList.add(yLength, cellValue); yLength++; } else { continue; // to the next for } } catch (Exception e) { // do nothing? } } } catch (Exception e) { //////System.out.println("dataTable Exception = " +e); } yData = new double[yLength]; for (int i = 0; i < yLength; i++) { yData[i] = Double.parseDouble((String) yList.get(i)); } // But the Analyses tools package takes the data in X HashMap. result = null; if (yLength > 0) { randomDataStep = true; } if (randomDataStep) { data.appendY("Y", yData, DataType.QUANTITATIVE); try { result = (NormalPowerResult) data.getAnalysis(analysisType); df = 0; sampleMean = 0; sampleVar = 0; try { sampleMean = result.getSampleMean(); } catch (Exception e) { } try { sampleVar = result.getSampleVariance(); } catch (Exception e) { } try { sampleSize = result.getSampleSize(); } catch (Exception e) { } sampleSE = Math.sqrt(sampleVar); sampleSizeText.setText(sampleSize + ""); if (power == Double.NaN) { power = .95; } powerText.setText(power + ""); sigmaText.setText(sampleSE + ""); mu0Text.setText(sampleMean + ""); resultPanelTextArea.append("\n\tSample Mean = " + sampleMean); resultPanelTextArea.append("\n\tSample variance = " + sampleVar); resultPanelTextArea.append("\n\tStandard Deviation = " + sampleSE); } catch (Exception e) { } } try { alpha = Double.parseDouble((String) (alphaCombo.getSelectedItem())); } catch (Exception e) { alpha = 0.05; } try { sampleSize = Integer.parseInt((String) (sampleSizeText.getText())); } catch (Exception e) { sampleSize = 0; } if (useCV) { try { sigma = Double.parseDouble((String) (sigmaZTestText.getText())); } catch (Exception e) { sigma = 0; } } else { try { sigma = Double.parseDouble((String) (sigmaText.getText())); } catch (Exception e) { sigma = 0; } } try { mu0 = Double.parseDouble((String) (mu0Text.getText())); } catch (Exception e) { mu0 = Double.POSITIVE_INFINITY; } try { muA = Double.parseDouble((String) (muAText.getText())); } catch (Exception e) { muA = Double.POSITIVE_INFINITY; } try { power = Double.parseDouble((String) (powerText.getText())); } catch (Exception e) { power = 0; } try { xValue = Double.parseDouble((String) (xValueText.getText())); } catch (Exception e) { xValue = Double.POSITIVE_INFINITY; } try { sigmaZTest = Double.parseDouble((String) (sigmaZTestText.getText())); } catch (Exception e) { sigmaZTest = Double.POSITIVE_INFINITY; } try { mu0ZTest = Double.parseDouble((String) (mu0ZTestText.getText())); } catch (Exception e) { mu0ZTest = Double.POSITIVE_INFINITY; } if (alpha == 0) alpha = 0.05; // default. if (sampleSize > 0) { useSampleSize = true; usePower = false; useCV = false; power = 0; if (hypothesisType == null && !randomDataStep && power == 0) { JOptionPane.showMessageDialog(this, "Generate Random Data or Select Hypothesis Parameters First!"); return; } } else if (power > 0) { //////////////System.out.println("Has power"); usePower = true; useSampleSize = false; useCV = false; sampleSize = 0; } if (useCV) { randomDataStep = false; if (mu0ZTest == Double.POSITIVE_INFINITY || xValue == Double.POSITIVE_INFINITY || sigmaZTest <= 0) { JOptionPane.showMessageDialog(this, "Enter Proper Parameters First!"); } try { result = (NormalPowerResult) data.getNormalAnalysis(mu0ZTest, xValue, sigmaZTest); } catch (Exception e) { } } if (useSampleSize) { randomDataStep = false; if (sampleSize == 0) { JOptionPane.showMessageDialog(this, "Select Parameters or Generante Random Data First!"); } else if (sampleSize != 0 && (mu0 == Double.POSITIVE_INFINITY || muA == Double.POSITIVE_INFINITY || sigma <= 0 || hypothesisType == null)) { JOptionPane.showMessageDialog(this, "You need to complete all fields in the SELECT PARAMETER seciont!"); } /* if (muA != Double.POSITIVE_INFINITY && hypothesisType == null) { JOptionPane.showMessageDialog(this, "Select Hypothesis Sign First!"); return; } */ else if (muA != Double.POSITIVE_INFINITY && muA > mu0 && hypothesisType.equals(NormalPowerResult.HYPOTHESIS_TYPE_LT)) { JOptionPane.showMessageDialog(this, "Are you sure mu_A is less than mu_0?"); return; } else if (muA != Double.POSITIVE_INFINITY && muA < mu0 && hypothesisType.equals(NormalPowerResult.HYPOTHESIS_TYPE_GT)) { JOptionPane.showMessageDialog(this, "Are you sure mu_A is greater than mu_0?"); return; } else if (muA == mu0) { JOptionPane.showMessageDialog(this, "mu_A is equal to mu_0."); return; } ////////////////System.out.println("in gui do try "); try { result = (NormalPowerResult) data.getNormalPower(sampleSize, sigma, alpha, mu0, muA, hypothesisType); //result = (NormalPowerResult) data.getNormalPower(100, 3500, 0.05, 2500 , 2450, hypothesisType); } catch (Exception e) { //////////////////System.out.println("in gui useSampleSize " + e); } } else if (usePower) { randomDataStep = false; if (power == 0 || mu0 == Double.POSITIVE_INFINITY || muA == Double.POSITIVE_INFINITY || sigma <= 0 || hypothesisType == null) { JOptionPane.showMessageDialog(this, "Select Parameters First!"); return; } else if (muA != Double.POSITIVE_INFINITY && muA > mu0 && hypothesisType.equals(NormalPowerResult.HYPOTHESIS_TYPE_LT)) { JOptionPane.showMessageDialog(this, "Are you sure mu_A is less than mu_0?"); return; } else if (muA != Double.POSITIVE_INFINITY && muA < mu0 && hypothesisType.equals(NormalPowerResult.HYPOTHESIS_TYPE_GT)) { JOptionPane.showMessageDialog(this, "Are you sure mu_A is greater than mu_0?"); return; } else if (muA == mu0) { JOptionPane.showMessageDialog(this, "mu_A is equal to mu_0."); return; } try { result = (NormalPowerResult) data.getNormalPowerSampleSize(power, sigma, alpha, mu0, muA, hypothesisType); //result = (NormalPowerResult) data.getNormalPower(100, 3500, 0.05, 2500 , 2450, hypothesisType); ////////////System.out.println("in gui usePower try result = " + result); } catch (Exception e) { ////////////System.out.println("in gui usePower " + e); } } else if (useCV) { try { zScore = result.getZScore(); resultPanelTextArea.append("\n\tGiven:"); resultPanelTextArea.append("\n\tMean of the Normal Distribution (mu_0) = " + mu0ZTest); resultPanelTextArea.append("\n\tStandard Deviation of the Normal Distribution (sigma) = " + sigma); resultPanelTextArea.append("\n\tValue of the point (X) = " + xValue); resultPanelTextArea.append("\n\n\tZ-Score = " + zScore); } catch (Exception e) { } try { probGreater = result.getPValue(); resultPanelTextArea.append("\n\tP(X > mu_0) = " + probGreater); resultPanelTextArea.append("\n\tP(X < mu_0) = " + (1 - probGreater)); } catch (Exception e) { } } if (usePower || useSampleSize) { try { sampleSize = result.getSampleSize(); } catch (Exception e) { } try { power = result.getPower(); } catch (Exception e) { } try { meanPlotPoints = result.getMeanPlotPoints(); } catch (Exception e) { } try { powerPlotPoints = result.getPowerPlotPoints(); } catch (Exception e) { } try { multipleMeanPlotPoints = result.getMultipleMeanPlotPoints(); } catch (Exception e) { } try { multiplePowerPlotPoints = result.getMultiplePowerPlotPoints(); } catch (Exception e) { } try { plotDescription = result.getPlotDescription(); } catch (Exception e) { } try { resultHypothesisType = result.getResultHypothesisType(); //resultPanelTextArea.append("\n\tpowerPlotPoints = "+powerPlotPoints[0] ); } catch (Exception e) { } if (sampleSize > 0) resultPanelTextArea.append("\n\tSample Size = " + sampleSize); if (power > 0) { try { resultPanelTextArea.append("\n\tPower = " + (power + "").substring(0, 15)); } catch (Exception e) { resultPanelTextArea.append("\n\tPower = " + power); } } if (resultHypothesisType != null) { if (resultHypothesisType.equals(NormalPowerResult.HYPOTHESIS_TYPE_NE)) { resultPanelTextArea.append("\n\n\tAlternative Hypothesis: mu_A is not equal to mu_0."); } else if (resultHypothesisType.equals(NormalPowerResult.HYPOTHESIS_TYPE_LT)) { resultPanelTextArea.append("\n\n\tAlternative Hypothesis: mu_A is less than mu_0."); } else if (resultHypothesisType.equals(NormalPowerResult.HYPOTHESIS_TYPE_GT)) { resultPanelTextArea.append("\n\n\tAlternative Hypothesis: mu_A is greater than mu_0."); } } resultPanelTextArea.append("\n\n\tmu_0 = " + mu0); if (muA < Double.POSITIVE_INFINITY) { resultPanelTextArea.append("\n\tmu_A = " + muA); } resultPanelTextArea.append("\n\tsigma = " + sigma); } resultPanelTextArea.setForeground(Color.BLUE); updateResults(); try { doGraph(); } catch (Exception e) { } if (useSampleSize || usePower) { try { doRawDataNormalCurve(); } catch (Exception e) { } try { modelObject = new NormalFit_Modeler(); sampleSE = sigma / Math.sqrt(sampleSize); doSampleMeanNormalCurve(); } catch (Exception e) { } } else if (useCV) { try { doZTestNormalCurve(); } catch (Exception e) { } } }
From source file:fxts.stations.core.TradeDesk.java
/** * This method is used to format double values with pips precision * (price, rate, interest etc) to string representation * that will be shown in tables, dialogs and other UI controls. * * @param aCurrency currency pair/*from w w w .j ava2s . co m*/ * @param aPrice price to format * * @return formatted string of adPrice according to asCurrency */ public static String formatPrice(String aCurrency, double aPrice) { if (aCurrency == null) { return null; } try { TradingSessionStatus sessionStatus = TradingServerSession.getInstance().getTradingSessionStatus(); TradingSecurity security = sessionStatus.getSecurity(aCurrency); int precision = security.getFXCMSymPrecision(); return PRICE_FORMATS.get(precision).format(aPrice); } catch (Exception e) { //checks for infinity if (aPrice == Double.POSITIVE_INFINITY || aPrice == Double.NEGATIVE_INFINITY) { return "Infinity"; } if (isCurrencyInThePair("JPY", aCurrency)) { return PRICE_FORMATS.get(3).format(aPrice); } else { return PRICE_FORMATS.get(5).format(aPrice); } } }
From source file:com.opengamma.analytics.financial.model.volatility.BlackScholesFormulaRepository.java
/** * The spot gamma, 2nd order sensitivity of the spot option value to the spot. <p> * $\frac{\partial^2 FV}{\partial^2 f}$/*w ww . ja va 2s .c om*/ * @param spot The spot value of the underlying * @param strike The Strike * @param timeToExpiry The time-to-expiry * @param lognormalVol The log-normal volatility * @param interestRate The interest rate * @param costOfCarry The cost-of-carry rate * @return The spot gamma */ @ExternalFunction public static double gamma(final double spot, final double strike, final double timeToExpiry, final double lognormalVol, final double interestRate, final double costOfCarry) { ArgumentChecker.isTrue(spot >= 0.0, "negative/NaN spot; have {}", spot); ArgumentChecker.isTrue(strike >= 0.0, "negative/NaN strike; have {}", strike); ArgumentChecker.isTrue(timeToExpiry >= 0.0, "negative/NaN timeToExpiry; have {}", timeToExpiry); ArgumentChecker.isTrue(lognormalVol >= 0.0, "negative/NaN lognormalVol; have {}", lognormalVol); ArgumentChecker.isFalse(Double.isNaN(interestRate), "interestRate is NaN"); ArgumentChecker.isFalse(Double.isNaN(costOfCarry), "costOfCarry is NaN"); double coef = 0.; if ((interestRate > LARGE && costOfCarry > LARGE) || (-interestRate > LARGE && -costOfCarry > LARGE) || Math.abs(costOfCarry - interestRate) < SMALL) { coef = 1.; //ref value is returned } else { final double rate = costOfCarry - interestRate; if (rate > LARGE) { return costOfCarry > LARGE ? 0. : Double.POSITIVE_INFINITY; } if (-rate > LARGE) { return 0.; } coef = Math.exp(rate * timeToExpiry); } final double rootT = Math.sqrt(timeToExpiry); double sigmaRootT = lognormalVol * rootT; if (Double.isNaN(sigmaRootT)) { sigmaRootT = 1.; //ref value is returned } if (spot > LARGE * strike || spot < SMALL * strike || sigmaRootT > LARGE) { return 0.; } double factor = Math.exp(costOfCarry * timeToExpiry); if (Double.isNaN(factor)) { factor = 1.; //ref value is returned } double d1 = 0.; if (Math.abs(spot - strike) < SMALL || (spot > LARGE && strike > LARGE)) { final double coefD1 = (Math.abs(costOfCarry) < SMALL && lognormalVol < SMALL) ? Math.signum(costOfCarry) + 0.5 * lognormalVol : (costOfCarry / lognormalVol + 0.5 * lognormalVol); final double tmp = coefD1 * rootT; d1 = Double.isNaN(tmp) ? 0. : tmp; } else { if (sigmaRootT < SMALL) { final double scnd = (Math.abs(costOfCarry) > LARGE && rootT < SMALL) ? Math.signum(costOfCarry) : costOfCarry * rootT; final double tmp = (Math.log(spot / strike) / rootT + scnd) / lognormalVol; d1 = Double.isNaN(tmp) ? 0. : tmp; } else { final double tmp = costOfCarry * rootT / lognormalVol; final double sig = (costOfCarry >= 0.) ? 1. : -1.; final double scnd = Double.isNaN(tmp) ? ((lognormalVol < LARGE && lognormalVol > SMALL) ? sig / lognormalVol : sig * rootT) : tmp; d1 = Math.log(spot / strike) / sigmaRootT + scnd + 0.5 * sigmaRootT; } } // if (Double.isNaN(d1)) { // throw new IllegalArgumentException("NaN found"); // } final double norm = NORMAL.getPDF(d1); final double res = norm < SMALL ? 0. : coef * norm / spot / sigmaRootT; return Double.isNaN(res) ? Double.POSITIVE_INFINITY : res; }
From source file:gdsc.core.ij.Utils.java
/** * Calculate a histogram given the provided data * //www .ja va2 s. c o m * @param data * @param numBins * The number of histogram bins between min and max * @return The histogram as a pair of arrays: { value[], frequency[] } */ public static double[][] calcHistogram(double[] data, int numBins) { double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; for (double f : data) { if (min > f) min = f; if (max < f) max = f; } return calcHistogram(data, min, max, numBins); }
From source file:com.rapidminer.operator.learner.functions.linear.LinearRegression.java
@Override public Model learn(ExampleSet exampleSet) throws OperatorException { // initializing data and parameter values. Attribute label = exampleSet.getAttributes().getLabel(); Attribute workingLabel = label; boolean cleanUpLabel = false; String firstClassName = null; String secondClassName = null; com.rapidminer.example.Tools.onlyNonMissingValues(exampleSet, getOperatorClassName(), this, Attributes.LABEL_NAME); boolean useBias = getParameterAsBoolean(PARAMETER_USE_BIAS); boolean removeColinearAttributes = getParameterAsBoolean(PARAMETER_ELIMINATE_COLINEAR_FEATURES); double ridge = getParameterAsDouble(PARAMETER_RIDGE); double minTolerance = getParameterAsDouble(PARAMETER_MIN_TOLERANCE); // prepare for classification by translating into 0-1 coding. if (label.isNominal()) { if (label.getMapping().size() == 2) { firstClassName = label.getMapping().getNegativeString(); secondClassName = label.getMapping().getPositiveString(); int firstIndex = label.getMapping().getNegativeIndex(); workingLabel = AttributeFactory.createAttribute("regression_label", Ontology.REAL); exampleSet.getExampleTable().addAttribute(workingLabel); for (Example example : exampleSet) { double index = example.getValue(label); if (index == firstIndex) { example.setValue(workingLabel, 0.0d); } else { example.setValue(workingLabel, 1.0d); }//from www . j a v a 2s . co m } exampleSet.getAttributes().setLabel(workingLabel); cleanUpLabel = true; } } // search all attributes and keep numerical int numberOfAttributes = exampleSet.getAttributes().size(); boolean[] isUsedAttribute = new boolean[numberOfAttributes]; int counter = 0; String[] attributeNames = new String[numberOfAttributes]; for (Attribute attribute : exampleSet.getAttributes()) { isUsedAttribute[counter] = attribute.isNumerical(); attributeNames[counter] = attribute.getName(); counter++; } // compute and store statistics and turn off attributes with zero // standard deviation exampleSet.recalculateAllAttributeStatistics(); double[] means = new double[numberOfAttributes]; double[] standardDeviations = new double[numberOfAttributes]; counter = 0; Attribute[] allAttributes = new Attribute[exampleSet.getAttributes().size()]; for (Attribute attribute : exampleSet.getAttributes()) { allAttributes[counter] = attribute; if (isUsedAttribute[counter]) { means[counter] = exampleSet.getStatistics(attribute, Statistics.AVERAGE_WEIGHTED); standardDeviations[counter] = Math .sqrt(exampleSet.getStatistics(attribute, Statistics.VARIANCE_WEIGHTED)); if (standardDeviations[counter] == 0) { isUsedAttribute[counter] = false; } } counter++; } double labelMean = exampleSet.getStatistics(workingLabel, Statistics.AVERAGE_WEIGHTED); double labelStandardDeviation = Math .sqrt(exampleSet.getStatistics(workingLabel, Statistics.VARIANCE_WEIGHTED)); int numberOfExamples = exampleSet.size(); // determine the number of used attributes + 1 int numberOfUsedAttributes = 1; for (int i = 0; i < isUsedAttribute.length; i++) { if (isUsedAttribute[i]) { numberOfUsedAttributes++; } } // remove colinear attributes double[] coefficientsOnFullData = performRegression(exampleSet, isUsedAttribute, means, labelMean, ridge); if (removeColinearAttributes) { boolean eliminateMore = true; while (eliminateMore) { int maxIndex = -1; double maxTolerance = 1; boolean found = false; for (int i = 0; i < isUsedAttribute.length; i++) { if (isUsedAttribute[i]) { double tolerance = getTolerance(exampleSet, isUsedAttribute, i, ridge, useBias); if (tolerance < minTolerance) { if (tolerance <= maxTolerance) { maxTolerance = tolerance; maxIndex = i; found = true; } } } } if (found) { isUsedAttribute[maxIndex] = false; } else { eliminateMore = false; } coefficientsOnFullData = performRegression(exampleSet, isUsedAttribute, means, labelMean, ridge); } } else { coefficientsOnFullData = performRegression(exampleSet, isUsedAttribute, means, labelMean, ridge); } // calculate error on full data double errorOnFullData = getSquaredError(exampleSet, isUsedAttribute, coefficientsOnFullData, useBias); // apply attribute selection method int selectionMethodIndex = getParameterAsInt(PARAMETER_FEATURE_SELECTION); String[] selectionMethodNames = SELECTION_METHODS.keySet().toArray(new String[SELECTION_METHODS.size()]); String selectedMethod = selectionMethodNames[selectionMethodIndex]; // getParameterAsString(PARAMETER_FEATURE_SELECTION); Class<? extends LinearRegressionMethod> methodClass = SELECTION_METHODS.get(selectedMethod); if (methodClass == null) { throw new UserError(this, 904, PARAMETER_FEATURE_SELECTION, "unknown method"); } LinearRegressionMethod method; try { method = methodClass.newInstance(); } catch (InstantiationException e) { throw new UserError(this, 904, PARAMETER_FEATURE_SELECTION, e.getMessage()); } catch (IllegalAccessException e) { throw new UserError(this, 904, PARAMETER_FEATURE_SELECTION, e.getMessage()); } // apply feature selection technique LinearRegressionResult result = method.applyMethod(this, useBias, ridge, exampleSet, isUsedAttribute, numberOfExamples, numberOfUsedAttributes, means, labelMean, standardDeviations, labelStandardDeviation, coefficientsOnFullData, errorOnFullData); // clean up eventually if was classification if (cleanUpLabel) { exampleSet.getAttributes().remove(workingLabel); exampleSet.getExampleTable().removeAttribute(workingLabel); exampleSet.getAttributes().setLabel(label); } // +++++++++++++++++++++++++++++++++++++++++++++ // calculating statistics of the resulting model // +++++++++++++++++++++++++++++++++++++++++++++ FDistribution fdistribution = new FDistribution(1, exampleSet.size() - result.coefficients.length); int length = result.coefficients.length; double[] standardErrors = new double[length]; double[] standardizedCoefficients = new double[length]; double[] tolerances = new double[length]; double[] tStatistics = new double[length]; double[] pValues = new double[length]; // calculating standard error matrix, (containing the error of // intercept) double mse = result.error / (exampleSet.size() - 1); int finalNumberOfAttributes = 0; for (boolean b : result.isUsedAttribute) { if (b) { finalNumberOfAttributes++; } } double[][] data = new double[exampleSet.size() + 1][finalNumberOfAttributes + 1]; for (int i = 0; i < data[0].length; i++) { data[0][i] = 1; } for (int i = 0; i < exampleSet.size() + 1; i++) { data[i][0] = 1; } int eIndex = 1; for (Example e : exampleSet) { int aIndex = 0; int aCounter = 1; for (Attribute a : exampleSet.getAttributes()) { if (result.isUsedAttribute[aIndex]) { data[eIndex][aCounter] = e.getValue(a); aCounter++; } aIndex++; } eIndex++; } RealMatrix matrix = MatrixUtils.createRealMatrix(data); RealMatrix matrixT = matrix.transpose(); RealMatrix productMatrix = matrixT.multiply(matrix); RealMatrix invertedMatrix = null; try { // try to invert matrix invertedMatrix = new LUDecomposition(productMatrix).getSolver().getInverse(); int index = 0; for (int i = 0; i < result.isUsedAttribute.length; i++) { if (result.isUsedAttribute[i]) { tolerances[index] = getTolerance(exampleSet, result.isUsedAttribute, i, ridge, useBias); standardErrors[index] = Math.sqrt(mse * invertedMatrix.getEntry(index + 1, index + 1)); // calculate standardized Coefficients // // Be careful, use in the calculation of standardizedCoefficients the i instead // of index for // standardDeviations, because all other arrays // refer to the selected attributes, whereas standardDeviations refers to // all attributes // standardizedCoefficients[index] = result.coefficients[index] * (standardDeviations[i] / labelStandardDeviation); if (!Tools.isZero(standardErrors[index])) { tStatistics[index] = result.coefficients[index] / standardErrors[index]; double probability = fdistribution .getProbabilityForValue(tStatistics[index] * tStatistics[index]); pValues[index] = probability < 0 ? 1.0d : Math.max(0.0d, 1.0d - probability); } else { if (Tools.isZero(result.coefficients[index])) { tStatistics[index] = 0.0d; pValues[index] = 1.0d; } else { tStatistics[index] = Double.POSITIVE_INFINITY; pValues[index] = 0.0d; } } index++; } } } catch (Throwable e) { // calculate approximate value if matrix can not be inverted double generalCorrelation = getCorrelation(exampleSet, isUsedAttribute, coefficientsOnFullData, useBias); generalCorrelation = Math.min(generalCorrelation * generalCorrelation, 1.0d); int index = 0; for (int i = 0; i < result.isUsedAttribute.length; i++) { if (result.isUsedAttribute[i]) { // calculating standard error and tolerance double tolerance = getTolerance(exampleSet, result.isUsedAttribute, i, ridge, useBias); standardErrors[index] = Math .sqrt((1.0d - generalCorrelation) / (tolerance * (exampleSet.size() - exampleSet.getAttributes().size() - 1.0d))) * labelStandardDeviation / standardDeviations[i]; tolerances[index] = tolerance; // calculating beta and test statistics // calculate standardized coefficients // // Be careful, use in the calculation of standardizedCoefficients the i instead // of index for // standardDeviations, because all other arrays // refer to the selected attributes, whereas standardDeviations refers to // all attributes // standardizedCoefficients[index] = result.coefficients[index] * (standardDeviations[i] / labelStandardDeviation); if (!Tools.isZero(standardErrors[index])) { tStatistics[index] = result.coefficients[index] / standardErrors[index]; double probability = fdistribution .getProbabilityForValue(tStatistics[index] * tStatistics[index]); pValues[index] = probability < 0 ? 1.0d : Math.max(0.0d, 1.0d - probability); } else { if (Tools.isZero(result.coefficients[index])) { tStatistics[index] = 0.0d; pValues[index] = 1.0d; } else { tStatistics[index] = Double.POSITIVE_INFINITY; pValues[index] = 0.0d; } } index++; } } } // Set all values for intercept if (invertedMatrix == null) { standardErrors[standardErrors.length - 1] = Double.POSITIVE_INFINITY; } else { standardErrors[standardErrors.length - 1] = Math.sqrt(mse * invertedMatrix.getEntry(0, 0)); } tolerances[tolerances.length - 1] = Double.NaN; standardizedCoefficients[standardizedCoefficients.length - 1] = Double.NaN; if (!Tools.isZero(standardErrors[standardErrors.length - 1])) { tStatistics[tStatistics.length - 1] = result.coefficients[result.coefficients.length - 1] / standardErrors[standardErrors.length - 1]; double probability = fdistribution.getProbabilityForValue( tStatistics[tStatistics.length - 1] * tStatistics[tStatistics.length - 1]); pValues[pValues.length - 1] = probability < 0 ? 1.0d : Math.max(0.0d, 1.0d - probability); } else { if (Tools.isZero(result.coefficients[result.coefficients.length - 1])) { tStatistics[tStatistics.length - 1] = 0.0d; pValues[pValues.length - 1] = 1.0d; } else { tStatistics[tStatistics.length - 1] = Double.POSITIVE_INFINITY; pValues[pValues.length - 1] = 0.0d; } } // delivering weights if (weightOutput.isConnected()) { AttributeWeights weights = new AttributeWeights(exampleSet); int selectedAttributes = 0; for (int i = 0; i < attributeNames.length; i++) { if (isUsedAttribute[i]) { weights.setWeight(attributeNames[i], result.coefficients[selectedAttributes]); selectedAttributes++; } else { weights.setWeight(attributeNames[i], 0); } } weightOutput.deliver(weights); } return new LinearRegressionModel(exampleSet, result.isUsedAttribute, result.coefficients, standardErrors, standardizedCoefficients, tolerances, tStatistics, pValues, useBias, firstClassName, secondClassName); }
From source file:com.rapidminer.operator.generator.ExampleSetGenerator.java
@Override public List<ParameterType> getParameterTypes() { List<ParameterType> types = super.getParameterTypes(); ParameterType type = new ParameterTypeStringCategory(PARAMETER_TARGET_FUNCTION, "Specifies the target function of this example set", KNOWN_FUNCTION_NAMES, KNOWN_FUNCTION_NAMES[0]); type.setExpert(false);/*from ww w . ja v a2 s. com*/ types.add(type); type = new ParameterTypeInt(PARAMETER_NUMBER_EXAMPLES, "The number of generated examples.", 1, Integer.MAX_VALUE, 100); type.setExpert(false); types.add(type); type = new ParameterTypeInt(PARAMETER_NUMBER_OF_ATTRIBUTES, "The number of attributes.", 1, Integer.MAX_VALUE, 5); type.setExpert(false); types.add(type); NonEqualStringCondition useTwoBounds = new NonEqualStringCondition(this, PARAMETER_TARGET_FUNCTION, false, (String[]) ArrayUtils.addAll(FUCTIONS_IGNORING_BOUND, FUNCTIONS_USING_SINGLE_BOUND)); type = new ParameterTypeDouble(PARAMETER_ATTRIBUTES_LOWER_BOUND, "The minimum value for the attributes. In case of target functions using Gaussian distribution, the attribute values may exceed this value.", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, -10); type.registerDependencyCondition(new OrParameterCondition(this, false, new BelowOrEqualOperatorVersionCondition(this, VERSION_TARGET_PARAMETERS_CHANGED), useTwoBounds)); types.add(type); type = new ParameterTypeDouble(PARAMETER_ATTRIBUTES_UPPER_BOUND, "The maximum value for the attributes. In case of target functions using Gaussian distribution, the attribute values may exceed this value.", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, 10); type.registerDependencyCondition(new OrParameterCondition(this, false, new BelowOrEqualOperatorVersionCondition(this, VERSION_TARGET_PARAMETERS_CHANGED), useTwoBounds)); types.add(type); type = new ParameterTypeDouble(PARAMETER_ATTRIBUTES_GAUSSIAN_STDDEV, "Standard deviation of the Gaussian distribution used for generating attributes.", Double.MIN_VALUE, Double.POSITIVE_INFINITY, 10); type.registerDependencyCondition( new AboveOperatorVersionCondition(this, VERSION_TARGET_PARAMETERS_CHANGED)); type.registerDependencyCondition( new EqualStringCondition(this, PARAMETER_TARGET_FUNCTION, false, FUNCTIONS_USING_GAUSSIAN_STDDEV)); types.add(type); type = new ParameterTypeDouble(PARAMETER_ATTRIBUTES_LARGEST_RADIUS, "The radius of the outermost ring cluster.", 10.0, Double.POSITIVE_INFINITY, 10); type.registerDependencyCondition( new AboveOperatorVersionCondition(this, VERSION_TARGET_PARAMETERS_CHANGED)); type.registerDependencyCondition( new EqualStringCondition(this, PARAMETER_TARGET_FUNCTION, false, FUNCTIONS_USING_LARGEST_RADIUS)); types.add(type); types.addAll(RandomGenerator.getRandomGeneratorParameters(this)); DataManagementParameterHelper.addParameterTypes(types, this); return types; }
From source file:ffx.xray.CrystalStats.java
/** * print HKL statistics/completeness info */// w ww . j ava2 s.c o m public void printHKLStats() { double res[][] = new double[n][2]; int nhkl[][] = new int[n][3]; for (int i = 0; i < n; i++) { res[i][0] = Double.NEGATIVE_INFINITY; res[i][1] = Double.POSITIVE_INFINITY; } for (HKL ih : reflectionlist.hkllist) { int i = ih.index(); int b = ih.bin(); // ignored cases if (Double.isNaN(fo[i][0]) || fo[i][1] <= 0.0) { nhkl[b][2]++; continue; } // determine res limits of each bin double rh = Crystal.res(crystal, ih); if (rh > res[b][0]) { res[b][0] = rh; } if (rh < res[b][1]) { res[b][1] = rh; } // count the reflection if (freer[i] == refinementdata.rfreeflag) { nhkl[b][1]++; } else { nhkl[b][0]++; } } StringBuilder sb = new StringBuilder(String.format("\n %15s | %8s|%9s| %7s | %7s | %s\n", "Res. Range", " HKL (R)", " HKL (cv)", " Bin", " Miss", "Complete (%)")); for (int i = 0; i < n; i++) { sb.append(String.format(" %7.3f %7.3f | ", res[i][0], res[i][1])); sb.append(String.format("%7d | %7d | %7d | %7d | ", nhkl[i][0], nhkl[i][1], nhkl[i][0] + nhkl[i][1], nhkl[i][2])); sb.append(String.format("%6.2f\n", (((double) nhkl[i][0] + nhkl[i][1]) / (nhkl[i][0] + nhkl[i][1] + nhkl[i][2])) * 100.0)); } sb.append(String.format(" %7.3f %7.3f | ", res[0][0], res[n - 1][1])); int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int i = 0; i < n; i++) { sum1 += nhkl[i][0]; sum2 += nhkl[i][1]; sum3 += nhkl[i][2]; } sb.append(String.format("%7d | %7d | %7d | %7d | ", sum1, sum2, sum1 + sum2, sum3)); sb.append(String.format("%6.2f\n", (((double) sum1 + sum2) / (sum1 + sum2 + sum3)) * 100.0)); sb.append(String.format(" Number of reflections if complete: %10d", refinementdata.n)); nobshkl = sum1 + sum2; highnobshkl = nhkl[n - 1][0] + nhkl[n - 1][1]; nobsrfree = sum2; highnobsrfree = nhkl[n - 1][1]; completeness = (((double) sum1 + sum2) / (sum1 + sum2 + sum3)) * 100.0; highcompleteness = (((double) nhkl[n - 1][0] + nhkl[n - 1][1]) / (nhkl[n - 1][0] + nhkl[n - 1][1] + nhkl[n - 1][2])) * 100.0; if (print) { logger.info(sb.toString()); } }