List of usage examples for java.util BitSet toByteArray
public byte[] toByteArray()
From source file:Main.java
public static void main(String[] args) { BitSet bitset1 = new BitSet(8); // assign values to bitset1 bitset1.set(0);//from w w w .j a va2s. c o m bitset1.set(1); bitset1.set(2); // print the sets System.out.println("Bitset1:" + bitset1); System.out.println(Arrays.toString(bitset1.toByteArray())); }
From source file:com.roche.sequencing.bioinformatics.common.utils.BitSetUtil.java
public static void writeBitSetToFile(BitSet bitset, File outputFile) throws IOException { // erase the existing file since the content at the end of the file // would be preserved if it is not written over thus preventing the containerInformationStart location // from being stored at the very end of the file if (outputFile.exists()) { outputFile.delete();// ww w . j a v a 2s .c o m } FileUtil.createNewFile(outputFile); try (FileOutputStream writer = new FileOutputStream(outputFile)) { writer.write(bitset.toByteArray()); } }
From source file:de.upb.wdqa.wdvd.revisiontags.TagDownloaderRevisionData.java
/** * Converts the tags to a memory efficient byte[] representation *//*from w ww . j a v a2 s . c om*/ private byte[] tagsToBytes(Set<DbTag> tags) { BitSet bitSet = new BitSet(); if (tags != null) { for (DbTag tag : tags) { bitSet.set(tag.getTagId()); } } return bitSet.toByteArray(); }
From source file:org.osgp.adapter.protocol.dlms.domain.commands.ConfigurationObjectHelperService.java
/** * Calculate the byte array for the given list of ConfigurationFlagType * objects/*from w w w .j a va 2 s. c om*/ * * @param configurationFlags * List of ConfigurationFlag objects * @return byte array with MSB in first element */ public byte[] toByteArray(final List<ConfigurationFlag> configurationFlags) { final BitSet bitSet = new BitSet(NUMBER_OF_FLAG_BITS); for (final ConfigurationFlag configurationFlag : configurationFlags) { if (configurationFlag.isEnabled()) { bitSet.set(this.toBitPosition(configurationFlag.getConfigurationFlagType()), true); } } final byte[] byteArray = bitSet.toByteArray(); // swap 0 and 1 final byte tmp = byteArray[1]; byteArray[1] = byteArray[0]; byteArray[0] = tmp; return byteArray; }
From source file:at.tuwien.mnsa.smssender.SMSPDUConverter.java
/** * Get the PDU encoding for the given message * @param message// ww w .j av a2 s .c o m * @param rightshift of the content for concatination * @return */ public SMSPDUConversionResult getContent(String message, int rightshift) { List<Byte> finalized = new LinkedList<>(); BitSet currentWorkingBS = new BitSet(16); int currentShiftpos = 0; boolean currentlyExtended = false; int len = 0; //repeat while there are characters left while (message.length() > 0) { String c = message.substring(0, 1); message = message.substring(1); byte value; //loook up current character if (this.GSM_3GPP_TS_23_038.containsKey(c)) { value = this.GSM_3GPP_TS_23_038.get(c); } else { if (this.GSM_3GPP_TS_23_038_EXTENSION.containsKey(c)) { if (!currentlyExtended) { //extension -> now do the escape character! //do the "new" character the other round message = c + message; currentlyExtended = true; value = this.GSM_3GPP_TS_23_038.get("\u001B"); } else { //we just did the ecsape character, now do the char from //the extended alphabet value = this.GSM_3GPP_TS_23_038_EXTENSION.get(c); currentlyExtended = false; } } else { throw new RuntimeException("Not found: " + c); } } //start at 0x0 if (currentShiftpos == 0) { //add current char beginning at pos 0 addByteToBitset(value, currentWorkingBS, 0, 1, 7); } else { //else start at second byte and do flipping //make place for the right bits of the current char in the front currentWorkingBS = rightShiftBitset(currentWorkingBS, currentShiftpos); //add last X bits in front of the bitset addByteToBitset(value, currentWorkingBS, 0, 8 - currentShiftpos, currentShiftpos); //add the first X bits at the end of the bitset if any if (currentShiftpos < 7) { addByteToBitset(value, currentWorkingBS, 8, 1, 7 - currentShiftpos); } //the first byte of the bitset is now complete! :) byte finalByte = currentWorkingBS.toByteArray()[0]; finalByte = swapEndianFormat(finalByte); finalized.add(finalByte); //shift bitset left by 8 bits since we just finished and exported a byte currentWorkingBS = leftShiftBitset(currentWorkingBS, 8); } currentShiftpos = (currentShiftpos + 1) % 8; len++; //for first character -> just add to the bitset //addByteToBitset(value, bitset, i*7); /*//exchange characters for (int j=0;j<((i%8)*8-(i%8)*7);j++) { boolean cBit = content.get() }*/ } //add last byte (swap back our eagerly shifted byte) if (currentShiftpos == 7) { byte finalByte = 0x00; finalized.add(finalByte); } else if (currentShiftpos != 0) { byte finalByte = (currentWorkingBS.isEmpty()) ? 0x0 : currentWorkingBS.toByteArray()[0]; finalByte = swapEndianFormat(finalByte); //I don't really know why, //but java fills right shifts with 1s //so we have to manually set all new (left) bits to zero for (int i = 0; i < currentShiftpos; i++) { finalByte = (byte) (finalByte >> 1); finalByte = (byte) (finalByte & 0x7F); //unset first bit } //finalByte = (byte) (finalByte & 0x3F); finalized.add(finalByte); } byte[] finalM = ArrayUtils.toPrimitive(finalized.toArray(new Byte[finalized.size()])); Logger.getGlobal().info("1: " + DatatypeConverter.printHexBinary(finalM)); //in case of rightshift for concatenation -> right shift the whole array if (rightshift > 0) { BitSet bs = BitSet.valueOf(finalM); bs = rightShiftBitset(bs, rightshift); finalM = bs.toByteArray(); Logger.getGlobal().info("2: " + DatatypeConverter.printHexBinary(finalM)); } SMSPDUConversionResult res = new SMSPDUConversionResult(finalM, len); return res; }
From source file:org.apache.eagle.alert.engine.serialization.PartitionedEventSerializerTest.java
@Test public void testBitSet() { BitSet bitSet = new BitSet(); bitSet.set(0, true); // 1 bitSet.set(1, false); // 0 bitSet.set(2, true); // 1 LOG.info("Bit Set Size: {}", bitSet.size()); LOG.info("Bit Set Byte[]: {}", bitSet.toByteArray()); LOG.info("Bit Set Byte[]: {}", bitSet.toLongArray()); LOG.info("BitSet[0]: {}", bitSet.get(0)); LOG.info("BitSet[1]: {}", bitSet.get(1)); LOG.info("BitSet[1]: {}", bitSet.get(2)); byte[] bytes = bitSet.toByteArray(); BitSet bitSet2 = BitSet.valueOf(bytes); LOG.info("Bit Set Size: {}", bitSet2.size()); LOG.info("Bit Set Byte[]: {}", bitSet2.toByteArray()); LOG.info("Bit Set Byte[]: {}", bitSet2.toLongArray()); LOG.info("BitSet[0]: {}", bitSet2.get(0)); LOG.info("BitSet[1]: {}", bitSet2.get(1)); LOG.info("BitSet[1]: {}", bitSet2.get(2)); BitSet bitSet3 = new BitSet(); bitSet3.set(0, true);// w w w. j av a2 s. com Assert.assertEquals(1, bitSet3.length()); BitSet bitSet4 = new BitSet(); bitSet4.set(0, false); Assert.assertEquals(0, bitSet4.length()); Assert.assertFalse(bitSet4.get(1)); Assert.assertFalse(bitSet4.get(2)); }
From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java
@Override @RetrySemantics.ReadOnly//from w w w . j a v a 2 s .c o m public GetOpenTxnsResponse getOpenTxns() throws MetaException { try { // We need to figure out the current transaction number and the list of // open transactions. To avoid needing a transaction on the underlying // database we'll look at the current transaction number first. If it // subsequently shows up in the open list that's ok. Connection dbConn = null; Statement stmt = null; ResultSet rs = null; try { /** * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()} */ dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); String s = "select ntxn_next - 1 from NEXT_TXN_ID"; LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); if (!rs.next()) { throw new MetaException( "Transaction tables not properly " + "initialized, no record found in next_txn_id"); } long hwm = rs.getLong(1); if (rs.wasNull()) { throw new MetaException( "Transaction tables not properly " + "initialized, null record found in next_txn_id"); } close(rs); List<Long> openList = new ArrayList<Long>(); //need the WHERE clause below to ensure consistent results with READ_COMMITTED s = "select txn_id, txn_state from TXNS where txn_id <= " + hwm + " order by txn_id"; LOG.debug("Going to execute query<" + s + ">"); rs = stmt.executeQuery(s); long minOpenTxn = Long.MAX_VALUE; BitSet abortedBits = new BitSet(); while (rs.next()) { long txnId = rs.getLong(1); openList.add(txnId); char c = rs.getString(2).charAt(0); if (c == TXN_OPEN) { minOpenTxn = Math.min(minOpenTxn, txnId); } else if (c == TXN_ABORTED) { abortedBits.set(openList.size() - 1); } } LOG.debug("Going to rollback"); dbConn.rollback(); ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray()); GetOpenTxnsResponse otr = new GetOpenTxnsResponse(hwm, openList, byteBuffer); if (minOpenTxn < Long.MAX_VALUE) { otr.setMin_open_txn(minOpenTxn); } return otr; } catch (SQLException e) { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, "getOpenTxns"); throw new MetaException( "Unable to select from transaction database, " + StringUtils.stringifyException(e)); } finally { close(rs, stmt, dbConn); } } catch (RetryException e) { return getOpenTxns(); } }
From source file:org.apache.hadoop.hive.serde2.compression.SnappyCompDe.java
/** * Compress a set of columns./* w w w.j a va 2 s .c o m*/ * * The header contains a compressed array of data types. * The body contains compressed columns and their metadata. * The footer contains a compressed array of chunk sizes. The final four bytes of the footer encode the byte size of that compressed array. * * @param colSet * * @return ByteBuffer representing the compressed set. */ @Override public ByteBuffer compress(ColumnBuffer[] colSet) { // Many compression libraries allow you to avoid allocation of intermediate arrays. // To use these API, we need to preallocate the output container. // Reserve space for the header. int[] dataType = new int[colSet.length]; int maxCompressedSize = Snappy.maxCompressedLength(4 * dataType.length); // Reserve space for the compressed nulls BitSet for each column. maxCompressedSize += colSet.length * Snappy.maxCompressedLength((colSet.length / 8) + 1); // Track the length of `List<Integer> compressedSize` which will be declared later. int uncompressedFooterLength = 1 + 2 * colSet.length; for (int colNum = 0; colNum < colSet.length; ++colNum) { // Reserve space for the compressed columns. dataType[colNum] = colSet[colNum].getType().toTType().getValue(); switch (TTypeId.findByValue(dataType[colNum])) { case BOOLEAN_TYPE: maxCompressedSize += Integer.SIZE / Byte.SIZE; // This is for the encoded length. maxCompressedSize += Snappy.maxCompressedLength((colSet.length / 8) + 1); break; case TINYINT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length); break; case SMALLINT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Short.SIZE / Byte.SIZE); break; case INT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE); break; case BIGINT_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Long.SIZE / Byte.SIZE); break; case DOUBLE_TYPE: maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Double.SIZE / Byte.SIZE); break; case BINARY_TYPE: // Reserve space for the size of the compressed array of row sizes. maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE); // Reserve space for the size of the compressed flattened bytes. for (ByteBuffer nextBuffer : colSet[colNum].toTColumn().getBinaryVal().getValues()) { maxCompressedSize += Snappy.maxCompressedLength(nextBuffer.limit()); } // Add an additional value to the list of compressed chunk sizes (length of `rowSize` array). uncompressedFooterLength++; break; case STRING_TYPE: // Reserve space for the size of the compressed array of row sizes. maxCompressedSize += Snappy.maxCompressedLength(colSet.length * Integer.SIZE / Byte.SIZE); // Reserve space for the size of the compressed flattened bytes. for (String nextString : colSet[colNum].toTColumn().getStringVal().getValues()) { maxCompressedSize += Snappy .maxCompressedLength(nextString.getBytes(StandardCharsets.UTF_8).length); } // Add an additional value to the list of compressed chunk sizes (length of `rowSize` array). uncompressedFooterLength++; break; default: throw new IllegalStateException("Unrecognized column type"); } } // Reserve space for the footer. maxCompressedSize += Snappy.maxCompressedLength(uncompressedFooterLength * Integer.SIZE / Byte.SIZE); // Allocate the output container. ByteBuffer output = ByteBuffer.allocate(maxCompressedSize); // Allocate the footer. This goes in the footer because we don't know the chunk sizes until after // the columns have been compressed and written. ArrayList<Integer> compressedSize = new ArrayList<Integer>(uncompressedFooterLength); // Write to the output buffer. try { // Write the header. compressedSize.add(writePrimitives(dataType, output)); // Write the compressed columns and metadata. for (int colNum = 0; colNum < colSet.length; colNum++) { switch (TTypeId.findByValue(dataType[colNum])) { case BOOLEAN_TYPE: { TBoolColumn column = colSet[colNum].toTColumn().getBoolVal(); List<Boolean> bools = column.getValues(); BitSet bsBools = new BitSet(bools.size()); for (int rowNum = 0; rowNum < bools.size(); rowNum++) { bsBools.set(rowNum, bools.get(rowNum)); } compressedSize.add(writePrimitives(column.getNulls(), output)); // BitSet won't write trailing zeroes so we encode the length output.putInt(column.getValuesSize()); compressedSize.add(writePrimitives(bsBools.toByteArray(), output)); break; } case TINYINT_TYPE: { TByteColumn column = colSet[colNum].toTColumn().getByteVal(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedBytes(column.getValues(), output)); break; } case SMALLINT_TYPE: { TI16Column column = colSet[colNum].toTColumn().getI16Val(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedShorts(column.getValues(), output)); break; } case INT_TYPE: { TI32Column column = colSet[colNum].toTColumn().getI32Val(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedIntegers(column.getValues(), output)); break; } case BIGINT_TYPE: { TI64Column column = colSet[colNum].toTColumn().getI64Val(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedLongs(column.getValues(), output)); break; } case DOUBLE_TYPE: { TDoubleColumn column = colSet[colNum].toTColumn().getDoubleVal(); compressedSize.add(writePrimitives(column.getNulls(), output)); compressedSize.add(writeBoxedDoubles(column.getValues(), output)); break; } case BINARY_TYPE: { TBinaryColumn column = colSet[colNum].toTColumn().getBinaryVal(); // Initialize the array of row sizes. int[] rowSizes = new int[column.getValuesSize()]; int totalSize = 0; for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { rowSizes[rowNum] = column.getValues().get(rowNum).limit(); totalSize += column.getValues().get(rowNum).limit(); } // Flatten the data for Snappy for a better compression ratio. ByteBuffer flattenedData = ByteBuffer.allocate(totalSize); for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { flattenedData.put(column.getValues().get(rowNum)); } // Write nulls bitmap. compressedSize.add(writePrimitives(column.getNulls(), output)); // Write the list of row sizes. compressedSize.add(writePrimitives(rowSizes, output)); // Write the compressed, flattened data. compressedSize.add(writePrimitives(flattenedData.array(), output)); break; } case STRING_TYPE: { TStringColumn column = colSet[colNum].toTColumn().getStringVal(); // Initialize the array of row sizes. int[] rowSizes = new int[column.getValuesSize()]; int totalSize = 0; for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { rowSizes[rowNum] = column.getValues().get(rowNum).length(); totalSize += column.getValues().get(rowNum).length(); } // Flatten the data for Snappy for a better compression ratio. StringBuilder flattenedData = new StringBuilder(totalSize); for (int rowNum = 0; rowNum < column.getValuesSize(); rowNum++) { flattenedData.append(column.getValues().get(rowNum)); } // Write nulls bitmap. compressedSize.add(writePrimitives(column.getNulls(), output)); // Write the list of row sizes. compressedSize.add(writePrimitives(rowSizes, output)); // Write the flattened data. compressedSize.add( writePrimitives(flattenedData.toString().getBytes(StandardCharsets.UTF_8), output)); break; } default: throw new IllegalStateException("Unrecognized column type"); } } // Write the footer. output.putInt(writeBoxedIntegers(compressedSize, output)); } catch (IOException e) { e.printStackTrace(); } output.flip(); return output; }
From source file:org.apache.metron.stellar.common.utils.hashing.tlsh.TLSHHasher.java
public Map<String, String> bin(String hash) throws DecoderException { Random r = new Random(0); byte[] h = Hex.decodeHex(hash.substring(2 * checksumOption.getChecksumLength()).toCharArray()); BitSet vector = BitSet.valueOf(h); int n = vector.length(); Map<String, String> ret = new HashMap<>(); boolean singleHash = hashes.size() == 1; for (int numHashes : hashes) { BitSet projection = new BitSet(); for (int i = 0; i < numHashes; ++i) { int index = r.nextInt(n); projection.set(i, vector.get(index)); }//from w ww . ja v a2 s . c om String outputHash = numHashes + Hex.encodeHexString(projection.toByteArray()); if (singleHash) { ret.put(TLSH_BIN_KEY, outputHash); } else { ret.put(TLSH_BIN_KEY + "_" + numHashes, outputHash); } } return ret; }