List of usage examples for java.lang Double SIZE
int SIZE
To view the source code for java.lang Double SIZE.
Click Source Link
From source file:Main.java
public static void main(String[] args) { System.out.println(Double.SIZE); }
From source file:com.linkedin.pinot.core.segment.memory.PinotDataBufferTest.java
private static void loadVerifyDouble(PinotDataBuffer buffer) { final int fieldSize = Double.SIZE / 8; int maxElementCount = (int) (buffer.size() / fieldSize); int elemCount = Math.min(100_000, maxElementCount); Map<Integer, Double> positionValues = new HashMap<>(); for (long i = 0; i < elemCount; i++) { int pos = random.nextInt(elemCount); double val = random.nextDouble(); positionValues.put(pos, val); buffer.putDouble(pos * fieldSize, val); }/*from w ww. j a va 2 s . co m*/ for (Map.Entry<Integer, Double> entry : positionValues.entrySet()) { Assert.assertEquals(buffer.getDouble(entry.getKey() * fieldSize), entry.getValue(), "Failure at index: " + entry.getKey()); } }
From source file:com.moscona.dataSpace.Numeric.java
@Override public long sizeInBytes() { if (value == null) { return 2L; }/*from ww w . jav a 2s . com*/ if (value.getClass() == Double.class) { return 2L + Double.SIZE / 8; } if (value.getClass() == Float.class) { return 2L + Float.SIZE / 8; } if (value.getClass() == Long.class) { return 2L + Long.SIZE / 8; } if (value.getClass() == Integer.class) { return 2L + Integer.SIZE / 8; } if (value.getClass() == Short.class) { return 2L + Short.SIZE / 8; } if (value.getClass() == Byte.class) { return 2L + Byte.SIZE / 8; } return 2L; }
From source file:com.bah.culvert.util.Bytes.java
/** * Return the Lexicographically-sortable encoding of this double-precision * number. Please note that this encoding is NOT the same as the IEEE 1394 * encoding.// ww w .ja v a 2s. c om * @param doubleValue * @return the lexicographically sortable value of this double. */ public static byte[] toBytes(double doubleValue) { int bytes = Double.SIZE / 8; return ByteBuffer.allocate(bytes).putDouble(doubleValue).array(); }
From source file:edu.cornell.med.icb.pca.RotationReaderWriter.java
public ObjectSet<CharSequence> getTableColumnIds(final CharSequence datasetEndpointName, final MutableString pathwayId) { final String cachedTableFile = getRotationFile(datasetEndpointName, pathwayId); if (LOG.isTraceEnabled()) { LOG.trace("Attempting to read cached table from " + cachedTableFile); }//ww w. j av a2s .co m final ObjectSet<CharSequence> result = new ObjectArraySet<CharSequence>(); try { final CompoundDataInput dataInput = cfr.readFile(cachedTableFile); final int numberOfColumns = dataInput.readInt(); LOG.trace("Reading cached table with %d columns%n" + numberOfColumns); for (int i = 0; i < numberOfColumns; i++) { final String colType = dataInput.readUTF(); final String colId = dataInput.readUTF(); result.add(colId); if (colType.equals("s")) { final int numStrings = dataInput.readInt(); for (int j = 0; j < numStrings; j++) { dataInput.readUTF(); } } else if (colType.equals("d")) { final int numDoubles = dataInput.readInt(); // we don't need to read these doubles, just skip them; dataInput.skipBytes(Double.SIZE * numDoubles / 8); } else { LOG.error("UNKNOWN COLUMN TYPE " + colType + " cannot read cached table from file " + cachedTableFile); return null; } } return result; } catch (IOException e) { LOG.error(e); return null; } }
From source file:de.betterform.xml.xforms.ui.AbstractFormControl.java
/** * convert a localized value into its XML Schema datatype representation. If the value given cannot be parsed with * the locale in betterForm context the default locale (US) will be used as fallback. This can be convenient for * user-agents that do not pass a localized value back. * * @param value the value to convert/*from w w w. j a v a 2 s. com*/ * @return converted value that can be used to update instance data and match the Schema datatype lexical space * @throws java.text.ParseException in case the incoming string cannot be converted into a Schema datatype representation */ protected String delocaliseValue(String value) throws XFormsException, ParseException { if (value == null || value.equals("")) { return value; } if (Config.getInstance().getProperty(XFormsProcessorImpl.BETTERFORM_ENABLE_L10N).equals("true")) { Locale locale = (Locale) getModel().getContainer().getProcessor().getContext() .get(XFormsProcessorImpl.BETTERFORM_LOCALE); XFormsProcessorImpl processor = this.model.getContainer().getProcessor(); if (processor.hasControlType(this.id, NamespaceConstants.XMLSCHEMA_PREFIX + ":float") || processor.hasControlType(this.id, NamespaceConstants.XMLSCHEMA_PREFIX + ":decimal") || processor.hasControlType(this.id, NamespaceConstants.XMLSCHEMA_PREFIX + ":double")) { NumberFormat formatter = NumberFormat.getNumberInstance(locale); formatter.setMaximumFractionDigits(Double.SIZE); BigDecimal number; try { number = strictParse(value, locale); } catch (ParseException e) { LOGGER.warn("value: '" + value + "' could not be parsed for locale: " + locale); return value; } catch (NumberFormatException nfe) { LOGGER.warn("value: '" + value + "' could not be parsed for locale: " + locale); return value; } catch (InputMismatchException ime) { LOGGER.warn("value: '" + value + "' could not be parsed for locale: " + locale); return value; } return number.toPlainString(); } else if (processor.hasControlType(this.id, NamespaceConstants.XMLSCHEMA_PREFIX + ":date")) { DateFormat df = DateFormat.getDateInstance(DateFormat.DEFAULT, locale); Date d = null; try { d = df.parse(value); } catch (ParseException e) { //try the default locale - else fail with ParseException df = new SimpleDateFormat("yyyy-MM-dd"); df.setLenient(false); d = df.parse(value); } df = new SimpleDateFormat("yyyy-MM-dd"); return df.format(d); } else if (processor.hasControlType(this.id, NamespaceConstants.XMLSCHEMA_PREFIX + ":dateTime")) { String timezone = ""; // int position = ; if (value.contains("GMT")) { timezone = value.substring(value.indexOf("GMT") + 3, value.length()); } else if (value.contains("+")) { timezone = value.substring(value.indexOf("+"), value.length()); } else if (value.contains("Z")) { timezone = "Z"; } DateFormat sf = DateFormat.getDateTimeInstance(DateFormat.DEFAULT, DateFormat.DEFAULT, locale); Date d = null; try { d = sf.parse(value); } catch (ParseException e) { //try the default locale - else fail with ParseException sf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); d = null; d = sf.parse(value); } sf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"); String converted = sf.format(d); if (!timezone.equals("")) { return converted + timezone; } return converted; } } return value; }
From source file:hivemall.fm.FactorizationMachineUDTF.java
protected void recordTrain(@Nonnull final Feature[] x, final double y) throws HiveException { if (_iterations <= 1) { return;//from w w w.j a v a 2s . co m } ByteBuffer inputBuf = _inputBuf; NioStatefullSegment dst = _fileIO; if (inputBuf == null) { final File file; try { file = File.createTempFile("hivemall_fm", ".sgmt"); file.deleteOnExit(); if (!file.canWrite()) { throw new UDFArgumentException("Cannot write a temporary file: " + file.getAbsolutePath()); } LOG.info("Record training examples to a file: " + file.getAbsolutePath()); } catch (IOException ioe) { throw new UDFArgumentException(ioe); } catch (Throwable e) { throw new UDFArgumentException(e); } this._inputBuf = inputBuf = ByteBuffer.allocateDirect(1024 * 1024); // 1 MiB this._fileIO = dst = new NioStatefullSegment(file, false); } int xBytes = Feature.requiredBytes(x); int recordBytes = (Integer.SIZE + Double.SIZE) / 8 + xBytes; int requiredBytes = (Integer.SIZE / 8) + recordBytes; int remain = inputBuf.remaining(); if (remain < requiredBytes) { writeBuffer(inputBuf, dst); } inputBuf.putInt(recordBytes); inputBuf.putInt(x.length); for (Feature f : x) { f.writeTo(inputBuf); } inputBuf.putDouble(y); }
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
/** * Compress a set of columns.//from w w w . ja v a 2 s. c o m * * The header contains a compressed array of data types. * The body contains compressed columns and their metadata. * The footer contains a compressed array of chunk sizes. The final four bytes of the footer encode the byte size of that compressed array. * * @param colSet * * @return ByteBuffer representing the compressed set. */ @Override public ByteBuffer compress(ColumnBuffer[] colSet) { // Many compression libraries allow you to avoid allocation of intermediate arrays. // To use these API, we need to preallocate the output container. // Reserve space for the header. int[] dataType = new int[colSet.length]; int maxCompressedSize = Snappy.maxCompressedLength(4 * dataType.length); // Reserve space for the compressed nulls BitSet for each column. maxCompressedSize += colSet.length * Snappy.maxCompressedLength((colSet.length / 8) + 1); // Track the length of `List<Integer> compressedSize` which will be declared later. int uncompressedFooterLength = 1 + 2 * colSet.length; for (int colNum = 0; colNum < colSet.length; ++colNum) { // Reserve space for the compressed columns. dataType[colNum] = colSet[colNum].getType().toTType().getValue(); switch (TTypeId.findByValue(dataType[colNum])) { case BOOLEAN_TYPE: maxCompressedSize += Integer.SIZE / Byte.SIZE; // This is for the encoded length. maxCompressedSize += Snappy.maxCompressedLength((colSet.length / 8) + 1); break; case TINYINT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length); break; case SMALLINT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Short.SIZE / Byte.SIZE); break; case INT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE); break; case BIGINT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Long.SIZE / Byte.SIZE); break; case DOUBLE_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Double.SIZE / Byte.SIZE); break; case BINARY_TYPE: // Reserve space for the size of the compressed array of row sizes. maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE); // Reserve space for the size of the compressed flattened bytes. for (ByteBuffer nextBuffer : colSet[colNum].toTColumn().getBinaryVal().getValues()) { maxCompressedSize += Snappy.maxCompressedLength(nextBuffer.limit()); } // Add an additional value to the list of compressed chunk sizes (length of `rowSize` array). uncompressedFooterLength++; break; case STRING_TYPE: // Reserve space for the size of the compressed array of row sizes. maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE); // Reserve space for the size of the compressed flattened bytes. for (String nextString : colSet[colNum].toTColumn().getStringVal().getValues()) { maxCompressedSize += Snappy .maxCompressedLength(nextString.getBytes(StandardCharsets.UTF_8).length); } // Add an additional value to the list of compressed chunk sizes (length of `rowSize` array). uncompressedFooterLength++; break; default: throw new IllegalStateException("Unrecognized column type"); } } // Reserve space for the footer. maxCompressedSize += Snappy.maxCompressedLength(uncompressedFooterLength * Integer.SIZE / Byte.SIZE); // Allocate the output container. ByteBuffer output = ByteBuffer.allocate(maxCompressedSize); // Allocate the footer. This goes in the footer because we don't know the chunk sizes until after // the columns have been compressed and written. ArrayList<Integer> compressedSize = new ArrayList<Integer>(uncompressedFooterLength); // Write to the output buffer. try { // Write the header. compressedSize.add(writePrimitives(dataType, output)); // Write the compressed columns and metadata. for (int colNum = 0; colNum < colSet.length; colNum++) { switch (TTypeId.findByValue(dataType[colNum])) { case BOOLEAN_TYPE: { TBoolColumn column = colSet[colNum].toTColumn().getBoolVal(); List<Boolean> bools = column.getValues(); BitSet bsBools = new BitSet(bools.size()); for (int rowNum = 0; rowNum < bools.size(); rowNum++) { bsBools.set(rowNum, bools.get(rowNum)); } compressedSize.add(writePrimitives(column.getNulls(), output)); // BitSet won't write trailing zeroes so we encode the length output.putInt(column.getValuesSize()); compressedSize.add(writePrimitives(bsBools.toByteArray(), output)); break; } case TINYINT_TYPE: { TByteColumn column = colSet[colNum].toTColumn().getByteVal(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedBytes(column.getValues(), output)); break; } case SMALLINT_TYPE: { TI16Column column = colSet[colNum].toTColumn().getI16Val(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedShorts(column.getValues(), output)); break; } case INT_TYPE: { TI32Column column = colSet[colNum].toTColumn().getI32Val(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedIntegers(column.getValues(), output)); break; } case BIGINT_TYPE: { TI64Column column = colSet[colNum].toTColumn().getI64Val(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedLongs(column.getValues(), output)); break; } case DOUBLE_TYPE: { TDoubleColumn column = colSet[colNum].toTColumn().getDoubleVal(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedDoubles(column.getValues(), output)); break; } case BINARY_TYPE: { TBinaryColumn column = colSet[colNum].toTColumn().getBinaryVal(); // Initialize the array of row sizes. int[] rowSizes = new int[column.getValuesSize()]; int totalSize = 0; for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { rowSizes[rowNum] = column.getValues().get(rowNum).limit(); totalSize += column.getValues().get(rowNum).limit(); } // Flatten the data for Snappy for a better compression ratio. ByteBuffer flattenedData = ByteBuffer.allocate(totalSize); for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { flattenedData.put(column.getValues().get(rowNum)); } // Write nulls bitmap. compressedSize.add(writePrimitives(column.getNulls(), output)); // Write the list of row sizes. compressedSize.add(writePrimitives(rowSizes, output)); // Write the compressed, flattened data. compressedSize.add(writePrimitives(flattenedData.array(), output)); break; } case STRING_TYPE: { TStringColumn column = colSet[colNum].toTColumn().getStringVal(); // Initialize the array of row sizes. int[] rowSizes = new int[column.getValuesSize()]; int totalSize = 0; for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { rowSizes[rowNum] = column.getValues().get(rowNum).length(); totalSize += column.getValues().get(rowNum).length(); } // Flatten the data for Snappy for a better compression ratio. StringBuilder flattenedData = new StringBuilder(totalSize); for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { flattenedData.append(column.getValues().get(rowNum)); } // Write nulls bitmap. compressedSize.add(writePrimitives(column.getNulls(), output)); // Write the list of row sizes. compressedSize.add(writePrimitives(rowSizes, output)); // Write the flattened data. compressedSize.add( writePrimitives(flattenedData.toString().getBytes(StandardCharsets.UTF_8), output)); break; } default: throw new IllegalStateException("Unrecognized column type"); } } // Write the footer. output.putInt(writeBoxedIntegers(compressedSize, output)); } catch (IOException e) { e.printStackTrace(); } output.flip(); return output; }
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
private int writePrimitives(double[] primitives, ByteBuffer output) throws IOException { int bytesWritten = Snappy.rawCompress(primitives, 0, primitives.length * Double.SIZE / Byte.SIZE, output.array(), output.arrayOffset() + output.position()); output.position(output.position() + bytesWritten); return bytesWritten; }
From source file:org.apache.pig.impl.util.avro.AvroTupleWrapper.java
@SuppressWarnings({ "rawtypes", "unchecked" }) private long getMemorySize(final IndexedRecord r) { int total = 0; final int bitsPerByte = 8; for (Field f : r.getSchema().getFields()) { switch (f.schema().getType()) { case BOOLEAN: case ENUM: case INT: total += Integer.SIZE << bitsPerByte; break; case DOUBLE: total += Double.SIZE << bitsPerByte; break; case FLOAT: total += Float.SIZE << bitsPerByte; break; case NULL: break; case STRING: total += ((String) r.get(f.pos())).length() * (Character.SIZE << bitsPerByte); break; case BYTES: total += ((Byte[]) r.get(f.pos())).length; break; case RECORD: total += new AvroTupleWrapper((IndexedRecord) r.get(f.pos())).getMemorySize(); break; case ARRAY: total += new AvroBagWrapper((GenericArray) r.get(f.pos())).getMemorySize(); break; }/*from w ww. j av a2 s .co m*/ } return total; }