List of usage examples for java.lang Byte SIZE
int SIZE
To view the source code for java.lang Byte SIZE.
Click Source Link
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
private int writePrimitives(double[] primitives, ByteBuffer output) throws IOException { int bytesWritten = Snappy.rawCompress(primitives, 0, primitives.length * Double.SIZE / Byte.SIZE, output.array(), output.arrayOffset() + output.position()); output.position(output.position() + bytesWritten); return bytesWritten; }
From source file:org.apache.hadoop.hdfs.server.namenode.TestFSEditLogLoader.java
@Test public void testFSEditLogOpCodes() throws IOException { //try all codes for (FSEditLogOpCodes c : FSEditLogOpCodes.values()) { final byte code = c.getOpCode(); assertEquals("c=" + c + ", code=" + code, c, FSEditLogOpCodes.fromByte(code)); }//from w w w. j av a 2 s . co m //try all byte values for (int b = 0; b < (1 << Byte.SIZE); b++) { final byte code = (byte) b; assertEquals("b=" + b + ", code=" + code, fromByte(code), FSEditLogOpCodes.fromByte(code)); } }
From source file:org.kiji.schema.FormattedEntityId.java
/** * Decode a byte array containing an hbase row key into an ordered list corresponding to * the key format in the layout file./*from w w w . j av a2s . co m*/ * * @param format The row key format as specified in the layout file. * @param hbaseRowKey A byte array containing the hbase row key. * @return An ordered list of component values in the key. */ private static List<Object> makeKijiRowKey(RowKeyFormat2 format, byte[] hbaseRowKey) { if (hbaseRowKey.length == 0) { throw new EntityIdException("Invalid hbase row key"); } List<Object> kijiRowKey = new ArrayList<Object>(); // skip over the hash int pos = format.getSalt().getHashSize(); // we are suppressing materialization, so the components cannot be retrieved. int kijiRowElem = 0; if (format.getSalt().getSuppressKeyMaterialization()) { if (pos < hbaseRowKey.length) { throw new EntityIdException("Extra bytes in key after hash when materialization is" + "suppressed"); } return null; } ByteBuffer buf; while (kijiRowElem < format.getComponents().size() && pos < hbaseRowKey.length) { switch (format.getComponents().get(kijiRowElem).getType()) { case STRING: // Read the row key until we encounter a Null (0) byte or end. int endpos = pos; while (endpos < hbaseRowKey.length && (hbaseRowKey[endpos] != (byte) 0)) { endpos += 1; } String str = null; try { str = new String(hbaseRowKey, pos, endpos - pos, "UTF-8"); } catch (UnsupportedEncodingException e) { LOG.error(e.toString()); throw new EntityIdException(String.format("UnsupportedEncoding for component %d", kijiRowElem)); } kijiRowKey.add(str); pos = endpos + 1; break; case INTEGER: // Toggle highest order bit to return to original 2's complement. hbaseRowKey[pos] = (byte) ((int) hbaseRowKey[pos] ^ (int) Byte.MIN_VALUE); try { buf = ByteBuffer.wrap(hbaseRowKey, pos, Integer.SIZE / Byte.SIZE); } catch (IndexOutOfBoundsException e) { throw new EntityIdException("Malformed hbase Row Key"); } kijiRowKey.add(Integer.valueOf(buf.getInt())); pos = pos + Integer.SIZE / Byte.SIZE; break; case LONG: // Toggle highest order bit to return to original 2's complement. hbaseRowKey[pos] = (byte) ((int) hbaseRowKey[pos] ^ (int) Byte.MIN_VALUE); try { buf = ByteBuffer.wrap(hbaseRowKey, pos, Long.SIZE / Byte.SIZE); } catch (IndexOutOfBoundsException e) { throw new EntityIdException("Malformed hbase Row Key"); } kijiRowKey.add(Long.valueOf(buf.getLong())); pos = pos + Long.SIZE / Byte.SIZE; break; default: throw new RuntimeException("Invalid code path"); } kijiRowElem += 1; } // Fail if there are extra bytes in hbase row key. if (pos < hbaseRowKey.length) { throw new EntityIdException("Extra bytes in hbase row key cannot be mapped to any " + "component"); } // Fail if we encounter nulls before it is legal to do so. if (kijiRowElem < format.getNullableStartIndex()) { throw new EntityIdException("Too few components decoded from hbase row key. Component " + "number " + kijiRowElem + " cannot be null"); } // finish up with nulls for everything that wasn't in the key for (; kijiRowElem < format.getComponents().size(); kijiRowElem++) { kijiRowKey.add(null); } return kijiRowKey; }
From source file:com.moz.fiji.schema.FormattedEntityId.java
/** * Decode a byte array containing an hbase row key into an ordered list corresponding to * the key format in the layout file.//from w w w . j a va 2s.c om * * @param format The row key format as specified in the layout file. * @param hbaseRowKey A byte array containing the hbase row key. * @return An ordered list of component values in the key. */ private static List<Object> makeFijiRowKey(RowKeyFormat2 format, byte[] hbaseRowKey) { if (hbaseRowKey.length == 0) { throw new EntityIdException("Invalid hbase row key"); } List<Object> fijiRowKey = new ArrayList<Object>(); // skip over the hash int pos = format.getSalt().getHashSize(); // we are suppressing materialization, so the components cannot be retrieved. int fijiRowElem = 0; if (format.getSalt().getSuppressKeyMaterialization()) { if (pos < hbaseRowKey.length) { throw new EntityIdException("Extra bytes in key after hash when materialization is" + "suppressed"); } return null; } ByteBuffer buf; while (fijiRowElem < format.getComponents().size() && pos < hbaseRowKey.length) { switch (format.getComponents().get(fijiRowElem).getType()) { case STRING: // Read the row key until we encounter a Null (0) byte or end. int endpos = pos; while (endpos < hbaseRowKey.length && (hbaseRowKey[endpos] != (byte) 0)) { endpos += 1; } String str = null; try { str = new String(hbaseRowKey, pos, endpos - pos, "UTF-8"); } catch (UnsupportedEncodingException e) { LOG.error(e.toString()); throw new EntityIdException(String.format("UnsupportedEncoding for component %d", fijiRowElem)); } fijiRowKey.add(str); pos = endpos + 1; break; case INTEGER: // Toggle highest order bit to return to original 2's complement. hbaseRowKey[pos] = (byte) ((int) hbaseRowKey[pos] ^ (int) Byte.MIN_VALUE); try { buf = ByteBuffer.wrap(hbaseRowKey, pos, Integer.SIZE / Byte.SIZE); } catch (IndexOutOfBoundsException e) { throw new EntityIdException("Malformed hbase Row Key"); } fijiRowKey.add(Integer.valueOf(buf.getInt())); pos = pos + Integer.SIZE / Byte.SIZE; break; case LONG: // Toggle highest order bit to return to original 2's complement. hbaseRowKey[pos] = (byte) ((int) hbaseRowKey[pos] ^ (int) Byte.MIN_VALUE); try { buf = ByteBuffer.wrap(hbaseRowKey, pos, Long.SIZE / Byte.SIZE); } catch (IndexOutOfBoundsException e) { throw new EntityIdException("Malformed hbase Row Key"); } fijiRowKey.add(Long.valueOf(buf.getLong())); pos = pos + Long.SIZE / Byte.SIZE; break; default: throw new RuntimeException("Invalid code path"); } fijiRowElem += 1; } // Fail if there are extra bytes in hbase row key. if (pos < hbaseRowKey.length) { throw new EntityIdException("Extra bytes in hbase row key cannot be mapped to any " + "component"); } // Fail if we encounter nulls before it is legal to do so. if (fijiRowElem < format.getNullableStartIndex()) { throw new EntityIdException("Too few components decoded from hbase row key. Component " + "number " + fijiRowElem + " cannot be null"); } // finish up with nulls for everything that wasn't in the key for (; fijiRowElem < format.getComponents().size(); fijiRowElem++) { fijiRowKey.add(null); } return fijiRowKey; }
From source file:org.cellprofiler.subimager.ImageWriterHandler.java
private byte[] convertImage(NDImage ndimage, PixelType pixelType, boolean toBigEndian) { double[] inputDouble = ndimage.getBuffer(); switch (pixelType) { case INT8://from w w w . j a v a 2 s.c o m return convertToIntegerType(inputDouble, Byte.MIN_VALUE, Byte.MAX_VALUE, Byte.SIZE / 8, toBigEndian); case UINT8: return convertToIntegerType(inputDouble, 0, (1L << Byte.SIZE) - 1, Byte.SIZE / 8, toBigEndian); case INT16: return convertToIntegerType(inputDouble, Short.MIN_VALUE, Short.MAX_VALUE, Short.SIZE / 8, toBigEndian); case UINT16: return convertToIntegerType(inputDouble, 0, (1L << Short.SIZE) - 1, Short.SIZE / 8, toBigEndian); case INT32: return convertToIntegerType(inputDouble, Integer.MIN_VALUE, Integer.MAX_VALUE, Integer.SIZE / 8, toBigEndian); case UINT32: return convertToIntegerType(inputDouble, 0, (1L << Integer.SIZE) - 1, Integer.SIZE / 8, toBigEndian); case FLOAT: { int bpp = Float.SIZE / 8; byte[] buffer = new byte[inputDouble.length * bpp]; for (int i = 0; i < inputDouble.length; i++) { DataTools.unpackBytes(Float.floatToIntBits((float) inputDouble[i]), buffer, i * bpp, bpp, !toBigEndian); } return buffer; } case DOUBLE: { int bpp = Double.SIZE / 8; byte[] buffer = new byte[inputDouble.length * bpp]; for (int i = 0; i < inputDouble.length; i++) { DataTools.unpackBytes(Double.doubleToLongBits(inputDouble[i]), buffer, i * bpp, bpp, !toBigEndian); } return buffer; } default: throw new UnsupportedOperationException("The pixel type, " + pixelType.getValue() + ", should have been explicitly handled by the caller and an error should have been reported to the web client."); } }
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
/** * Decompress a set of columns from a ByteBuffer and update the position of the buffer. * * @param input A ByteBuffer with `position` indicating the starting point of the compressed chunk. * @param chunkSize The length of the compressed chunk to be decompressed from the input buffer. * * @return The set of columns./*from w ww . j av a 2 s .c o m*/ */ @Override public ColumnBuffer[] decompress(ByteBuffer input, int chunkSize) { int startPos = input.position(); try { // Read the footer. int footerSize = input.getInt(startPos + chunkSize - 4); Iterator<Integer> compressedSize = Arrays .asList(ArrayUtils.toObject(Snappy.uncompressIntArray(input.array(), input.arrayOffset() + startPos + chunkSize - Integer.SIZE / Byte.SIZE - footerSize, footerSize))) .iterator(); // Read the header. int[] dataType = readIntegers(compressedSize.next(), input); int numOfCols = dataType.length; // Read the columns. ColumnBuffer[] outputCols = new ColumnBuffer[numOfCols]; for (int colNum = 0; colNum < numOfCols; colNum++) { byte[] nulls = readBytes(compressedSize.next(), input); switch (TTypeId.findByValue(dataType[colNum])) { case BOOLEAN_TYPE: { int numRows = input.getInt(); byte[] vals = readBytes(compressedSize.next(), input); BitSet bsBools = BitSet.valueOf(vals); boolean[] bools = new boolean[numRows]; for (int rowNum = 0; rowNum < numRows; rowNum++) { bools[rowNum] = bsBools.get(rowNum); } TBoolColumn column = new TBoolColumn(Arrays.asList(ArrayUtils.toObject(bools)), ByteBuffer.wrap(nulls)); outputCols[colNum] = new ColumnBuffer(TColumn.boolVal(column)); break; } case TINYINT_TYPE: { byte[] vals = readBytes(compressedSize.next(), input); TByteColumn column = new TByteColumn(Arrays.asList(ArrayUtils.toObject(vals)), ByteBuffer.wrap(nulls)); outputCols[colNum] = new ColumnBuffer(TColumn.byteVal(column)); break; } case SMALLINT_TYPE: { short[] vals = readShorts(compressedSize.next(), input); TI16Column column = new TI16Column(Arrays.asList(ArrayUtils.toObject(vals)), ByteBuffer.wrap(nulls)); outputCols[colNum] = new ColumnBuffer(TColumn.i16Val(column)); break; } case INT_TYPE: { int[] vals = readIntegers(compressedSize.next(), input); TI32Column column = new TI32Column(Arrays.asList(ArrayUtils.toObject(vals)), ByteBuffer.wrap(nulls)); outputCols[colNum] = new ColumnBuffer(TColumn.i32Val(column)); break; } case BIGINT_TYPE: { long[] vals = readLongs(compressedSize.next(), input); TI64Column column = new TI64Column(Arrays.asList(ArrayUtils.toObject(vals)), ByteBuffer.wrap(nulls)); outputCols[colNum] = new ColumnBuffer(TColumn.i64Val(column)); break; } case DOUBLE_TYPE: { double[] vals = readDoubles(compressedSize.next(), input); TDoubleColumn column = new TDoubleColumn(Arrays.asList(ArrayUtils.toObject(vals)), ByteBuffer.wrap(nulls)); outputCols[colNum] = new ColumnBuffer(TColumn.doubleVal(column)); break; } case BINARY_TYPE: { int[] rowSize = readIntegers(compressedSize.next(), input); ByteBuffer flattenedData = ByteBuffer.wrap(readBytes(compressedSize.next(), input)); ByteBuffer[] vals = new ByteBuffer[rowSize.length]; for (int rowNum = 0; rowNum < rowSize.length; rowNum++) { vals[rowNum] = ByteBuffer.wrap(flattenedData.array(), flattenedData.position(), rowSize[rowNum]); flattenedData.position(flattenedData.position() + rowSize[rowNum]); } TBinaryColumn column = new TBinaryColumn(Arrays.asList(vals), ByteBuffer.wrap(nulls)); outputCols[colNum] = new ColumnBuffer(TColumn.binaryVal(column)); break; } case STRING_TYPE: { int[] rowSize = readIntegers(compressedSize.next(), input); ByteBuffer flattenedData = ByteBuffer.wrap(readBytes(compressedSize.next(), input)); String[] vals = new String[rowSize.length]; for (int rowNum = 0; rowNum < rowSize.length; rowNum++) { vals[rowNum] = new String(flattenedData.array(), flattenedData.position(), rowSize[rowNum], StandardCharsets.UTF_8); flattenedData.position(flattenedData.position() + rowSize[rowNum]); } TStringColumn column = new TStringColumn(Arrays.asList(vals), ByteBuffer.wrap(nulls)); outputCols[colNum] = new ColumnBuffer(TColumn.stringVal(column)); break; } default: throw new IllegalStateException( "Unrecognized column type: " + TTypeId.findByValue(dataType[colNum])); } } input.position(startPos + chunkSize); return outputCols; } catch (IOException e) { e.printStackTrace(); return (ColumnBuffer[]) null; } }
From source file:it.unimi.di.big.mg4j.document.SimpleCompressedDocumentCollection.java
private void loadOffsets(final String basename, final boolean rethrow) throws IOException { try {/*from w ww. ja v a 2 s.c o m*/ docOffsets = loadOffsetsSuccinctly(basename + DOCUMENT_OFFSETS_EXTENSION, documents, new File(basename + DOCUMENTS_EXTENSION).length() * Byte.SIZE + 1); termOffsets = loadOffsetsSuccinctly(basename + TERM_OFFSETS_EXTENSION, terms, new File(basename + TERMS_EXTENSION).length() + 1); nonTermOffsets = nonTerms < 0 ? null : loadOffsetsSuccinctly(basename + NONTERM_OFFSETS_EXTENSION, nonTerms, new File(basename + NONTERMS_EXTENSION).length() + 1); } catch (IOException e) { // We leave the possibility for a filename() to fix the problem and load the right files. if (rethrow) throw e; } }
From source file:uk.ac.ucl.excites.sapelli.storage.model.columns.StringColumn.java
@Override protected int getMaximumValueSize(boolean lossless) { return sizeField.size() + (getMaximumBytes() * Byte.SIZE); }
From source file:ubic.gemma.analysis.expression.coexpression.ProbeLinkCoexpressionAnalyzerImpl.java
/** * @param datasetsTestedIn/*from w w w. j a va 2 s .co m*/ * @param eeIdOrder * @return */ private byte[] computeTestedDatasetVector(Long geneId, Collection<? extends BioAssaySet> ees, Map<Long, Integer> eeIdOrder) { /* * This condition is, pretty much only true once in practice. That's because the first time through populates * genesTestedIn for all the genes tested in any of the data sets. */ if (!genesTestedIn.containsKey(geneId)) { cacheEesGeneTestedIn(ees, eeIdOrder); } assert eeIdOrder.size() == ees.size(); assert genesTestedIn.containsKey(geneId); List<Boolean> eesTestingGene = genesTestedIn.get(geneId); assert eesTestingGene.size() == ees.size(); // initialize. byte[] result = new byte[(int) Math.ceil(eeIdOrder.size() / (double) Byte.SIZE)]; for (int i = 0, j = result.length; i < j; i++) { result[i] = 0x0; } for (BioAssaySet ee : ees) { Long eeid = ee.getId(); Integer index = eeIdOrder.get(eeid); if (eesTestingGene.get(index)) { BitUtil.set(result, index); } } return result; }
From source file:ubic.gemma.analysis.expression.coexpression.Gene2GenePopulationServiceImpl.java
/** * Create a vector representing the datasets which had specific probes for the query and target genes. A '1' means * it did, '0' means it did not./*from w ww. ja va 2 s .c o m*/ * * @param nonspecificEE * @param eeIdOrder * @return */ private byte[] computeSpecificityVector(Collection<Long> nonspecificEE, Map<Long, Integer> eeIdOrder) { assert nonspecificEE.size() <= eeIdOrder.size(); byte[] result = new byte[(int) Math.ceil(eeIdOrder.size() / (double) Byte.SIZE)]; /* * Start initialized with 0's (might not be necessary...) */ for (int i = 0, j = result.length; i < j; i++) { result[i] = 0x0; } /* * Set the bits we're using to 1. */ for (int i = 0; i < eeIdOrder.size(); i++) { BitUtil.set(result, i); } /* * Set it so 1=specific 0=nonspecific */ for (Long id : nonspecificEE) { BitUtil.clear(result, eeIdOrder.get(id)); } assert BitUtil.count(result) == eeIdOrder.size() - nonspecificEE.size() : "Got " + BitUtil.count(result) + " ones, expected " + (eeIdOrder.size() - nonspecificEE.size()); return result; }