List of usage examples for java.lang Byte SIZE
int SIZE
To view the source code for java.lang Byte SIZE.
Click Source Link
From source file:org.apache.hadoop.hbase.migration.nineteen.regionserver.HStoreFile.java
/** * Reads in an info file/*from w ww .j a v a 2 s. c om*/ * * @param filesystem file system * @return The sequence id contained in the info file * @throws IOException */ public long loadInfo(final FileSystem filesystem) throws IOException { Path p = null; if (isReference()) { p = getInfoFilePath(reference.getEncodedRegionName(), this.reference.getFileId()); } else { p = getInfoFilePath(); } long length = filesystem.getFileStatus(p).getLen(); boolean hasMoreThanSeqNum = length > (Byte.SIZE + Bytes.SIZEOF_LONG); DataInputStream in = new DataInputStream(filesystem.open(p)); try { byte flag = in.readByte(); if (flag == INFO_SEQ_NUM) { if (hasMoreThanSeqNum) { flag = in.readByte(); if (flag == MAJOR_COMPACTION) { this.majorCompaction = in.readBoolean(); } } return in.readLong(); } throw new IOException("Cannot process log file: " + p); } finally { in.close(); } }
From source file:it.unimi.di.big.mg4j.index.DiskBasedIndex.java
/** Commodity method for loading a big list of binary longs with specified endianness into a {@linkplain LongBigArrays long big array}. * /*from w w w .j a v a2 s . com*/ * @param ioFactory the factory that will be used to perform I/O. * @param filename the file containing the longs. * @param byteOrder the endianness of the longs. * @return a big list of longs containing the longs in <code>file</code>. */ public static LongBigArrayBigList loadLongBigList(final IOFactory ioFactory, final CharSequence filename, final ByteOrder byteOrder) throws IOException { final long length = ioFactory.length(filename.toString()) / (Long.SIZE / Byte.SIZE); ReadableByteChannel channel = ioFactory.getReadableByteChannel(filename.toString()); final LongBigArrayBigList loadLongBigList = loadLongBigList(channel, length, byteOrder); channel.close(); return loadLongBigList; }
From source file:jsave.Utils.java
/** * Convert an array of bytes to an array of double * * @param byteArray array of bytes/* w w w .java 2 s. c o m*/ * @return an array of double */ public static double[] toDoubleArray(byte[] byteArray) { int times = 4 / Byte.SIZE; double[] doubles = new double[byteArray.length / times]; for (int i = 0; i < doubles.length; i++) { doubles[i] = ByteBuffer.wrap(byteArray, i * times, times).getDouble(); } return doubles; }
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
private int writePrimitives(short[] primitives, ByteBuffer output) throws IOException { int bytesWritten = Snappy.rawCompress(primitives, 0, primitives.length * Short.SIZE / Byte.SIZE, output.array(), output.arrayOffset() + output.position()); output.position(output.position() + bytesWritten); return bytesWritten; }
From source file:de.undercouch.bson4jackson.BsonParser.java
/** * Can be called when a new embedded document is found. Reads the * document's header and creates a new context on the stack. * @param array true if the document is an embedded array * @return the json token read//from ww w . j a v a2 s . c om * @throws IOException if an I/O error occurs */ protected JsonToken handleNewDocument(boolean array) throws IOException { if (_in == null) { //this means Feature.HONOR_DOCUMENT_LENGTH is enabled, and we //haven't yet started reading. Read the first int to find out the //length of the document. byte[] buf = new byte[Integer.SIZE / Byte.SIZE]; int len = 0; while (len < buf.length) { int l = _rawInputStream.read(buf, len, buf.length - len); if (l == -1) { throw new IOException("Not enough bytes for length of document"); } len += l; } //wrap the input stream by a bounded stream, subtract buf.length from the //length because the size itself is included in the length int documentLength = ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN).getInt(); InputStream in = new BoundedInputStream(_rawInputStream, documentLength - buf.length); //buffer if the raw input stream is not already buffered if (!(_rawInputStream instanceof BufferedInputStream)) { in = new StaticBufferedInputStream(in); } _counter = new CountingInputStream(in); _in = new LittleEndianInputStream(_counter); } else { //read document header (skip size, we're not interested) _in.readInt(); } _currentContext = new Context(_currentContext, array); return (array ? JsonToken.START_ARRAY : JsonToken.START_OBJECT); }
From source file:org.callimachusproject.auth.CookieAuthenticationManager.java
private String getPassword(int hour, String username, String iri, String nonce) throws IOException { int size = secret.length + username.length() + iri.length() + nonce.length(); ByteArrayOutputStream baos = new ByteArrayOutputStream(size * 2); baos.write(secret);/* w w w . j a v a2s . com*/ for (int i = 0, n = Integer.SIZE / Byte.SIZE; i < n; i++) { baos.write((byte) hour); hour >>= Byte.SIZE; } baos.write(getIdentifier().getBytes("UTF-8")); baos.write(username.getBytes("UTF-8")); baos.write(iri.getBytes("UTF-8")); baos.write(nonce.getBytes("UTF-8")); return new String(Hex.encodeHex(DigestUtils.md5(baos.toByteArray()))); }
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
private int writePrimitives(int[] primitives, ByteBuffer output) throws IOException { int bytesWritten = Snappy.rawCompress(primitives, 0, primitives.length * Integer.SIZE / Byte.SIZE, output.array(), output.arrayOffset() + output.position()); output.position(output.position() + bytesWritten); return bytesWritten; }
From source file:org.eclipse.january.dataset.DTypeUtils.java
/** * @param dtype/* www . jav a2 s . c o m*/ * @param isize * number of elements in an item * @return length of single item in bytes */ public static int getItemBytes(final int dtype, final int isize) { int size; switch (dtype) { case Dataset.BOOL: size = 1; // How is this defined? break; case Dataset.INT8: case Dataset.ARRAYINT8: size = Byte.SIZE / 8; break; case Dataset.INT16: case Dataset.ARRAYINT16: case Dataset.RGB: size = Short.SIZE / 8; break; case Dataset.INT32: case Dataset.ARRAYINT32: size = Integer.SIZE / 8; break; case Dataset.INT64: case Dataset.ARRAYINT64: size = Long.SIZE / 8; break; case Dataset.FLOAT32: case Dataset.ARRAYFLOAT32: case Dataset.COMPLEX64: size = Float.SIZE / 8; break; case Dataset.FLOAT64: case Dataset.ARRAYFLOAT64: case Dataset.COMPLEX128: size = Double.SIZE / 8; break; default: size = 0; break; } return size * isize; }
From source file:org.callimachusproject.auth.DigestPasswordAccessor.java
private String getDaypass(int day, String email, String secret) { if (secret == null) return null; byte[] random = readBytes(secret); byte[] id = email.getBytes(Charset.forName("UTF-8")); byte[] seed = new byte[random.length + id.length + Integer.SIZE / Byte.SIZE]; System.arraycopy(random, 0, seed, 0, random.length); System.arraycopy(id, 0, seed, random.length, id.length); for (int i = random.length + id.length; i < seed.length; i++) { seed[i] = (byte) day; day >>= Byte.SIZE;/*from ww w. j a va2 s .c o m*/ } return new PasswordGenerator(seed).nextPassword(); }
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
private int writePrimitives(long[] primitives, ByteBuffer output) throws IOException { int bytesWritten = Snappy.rawCompress(primitives, 0, primitives.length * Long.SIZE / Byte.SIZE, output.array(), output.arrayOffset() + output.position()); output.position(output.position() + bytesWritten); return bytesWritten; }