List of usage examples for java.lang Integer toBinaryString
public static String toBinaryString(int i)
From source file:org.red5.io.mp4.impl.MP4Reader.java
/** * Process the audio information contained in the atoms. * /*w w w.j a va2 s.com*/ * @param stbl * @param ase * AudioSampleEntry * @param scale * timescale */ private void processAudioBox(SampleTableBox stbl, AudioSampleEntry ase, long scale) { // get codec String codecName = ase.getType(); // set the audio codec here - may be mp4a or... setAudioCodecId(codecName); log.debug("Sample size: {}", ase.getSampleSize()); long ats = ase.getSampleRate(); // skip invalid audio time scale if (ats > 0) { audioTimeScale = ats * 1.0; } log.debug("Sample rate (audio time scale): {}", audioTimeScale); audioChannels = ase.getChannelCount(); log.debug("Channels: {}", audioChannels); if (ase.getBoxes(ESDescriptorBox.class).size() > 0) { // look for esds ESDescriptorBox esds = ase.getBoxes(ESDescriptorBox.class).get(0); if (esds == null) { log.debug("esds not found in default path"); // check for decompression param atom AppleWaveBox wave = ase.getBoxes(AppleWaveBox.class).get(0); if (wave != null) { log.debug("wave atom found"); // wave/esds esds = wave.getBoxes(ESDescriptorBox.class).get(0); if (esds == null) { log.debug("esds not found in wave"); // mp4a/esds //AC3SpecificBox mp4a = wave.getBoxes(AC3SpecificBox.class).get(0); //esds = mp4a.getBoxes(ESDescriptorBox.class).get(0); } } } //mp4a: esds if (esds != null) { // http://stackoverflow.com/questions/3987850/mp4-atom-how-to-discriminate-the-audio-codec-is-it-aac-or-mp3 ESDescriptor descriptor = esds.getEsDescriptor(); if (descriptor != null) { DecoderConfigDescriptor configDescriptor = descriptor.getDecoderConfigDescriptor(); AudioSpecificConfig audioInfo = configDescriptor.getAudioSpecificInfo(); if (audioInfo != null) { audioDecoderBytes = audioInfo.getConfigBytes(); /* the first 5 (0-4) bits tell us about the coder used for aacaot/aottype * http://wiki.multimedia.cx/index.php?title=MPEG-4_Audio 0 - NULL 1 - AAC Main (a deprecated AAC profile from MPEG-2) 2 - AAC LC or backwards compatible HE-AAC 3 - AAC Scalable Sample Rate 4 - AAC LTP (a replacement for AAC Main, rarely used) 5 - HE-AAC explicitly signaled (Non-backward compatible) 23 - Low Delay AAC 29 - HE-AACv2 explicitly signaled 32 - MP3on4 Layer 1 33 - MP3on4 Layer 2 34 - MP3on4 Layer 3 */ byte audioCoderType = audioDecoderBytes[0]; //match first byte switch (audioCoderType) { case 0x02: log.debug("Audio type AAC LC"); case 0x11: //ER (Error Resilient) AAC LC log.debug("Audio type ER AAC LC"); default: audioCodecType = 1; //AAC LC break; case 0x01: log.debug("Audio type AAC Main"); audioCodecType = 0; //AAC Main break; case 0x03: log.debug("Audio type AAC SBR"); audioCodecType = 2; //AAC LC SBR break; case 0x05: case 0x1d: log.debug("Audio type AAC HE"); audioCodecType = 3; //AAC HE break; case 0x20: case 0x21: case 0x22: log.debug("Audio type MP3"); audioCodecType = 33; //MP3 audioCodecId = "mp3"; break; } log.debug("Audio coder type: {} {} id: {}", new Object[] { audioCoderType, Integer.toBinaryString(audioCoderType), audioCodecId }); } else { log.debug("Audio specific config was not found"); DecoderSpecificInfo info = configDescriptor.getDecoderSpecificInfo(); if (info != null) { log.debug("Decoder info found: {}", info.getTag()); // qcelp == 5 } } } else { log.debug("No ES descriptor found"); } } } else { log.debug("Audio sample entry had no descriptor"); } processAudioStbl(stbl, scale); }
From source file:org.openhab.binding.lutron.internal.grxprg.PrgProtocolHandler.java
/** * Handles the controller information response (currently not used). * * @param m the non-null {@link Matcher} that matched the response * @param resp the possibly null, possibly empty actual response *//* w w w. j ava2 s . c om*/ private void handleControlInfo(Matcher m, String resp) { if (m == null) { throw new IllegalArgumentException("m (matcher) cannot be null"); } if (m.groupCount() == 9) { int controlUnit = 0; try { controlUnit = Integer.parseInt(m.group(1)); final String q4 = m.group(8); final String q4bits = new StringBuilder(Integer.toBinaryString(Integer.parseInt(q4, 16))).reverse() .toString(); // final boolean seqType = (q4bits.length() > 0 ? q4bits.charAt(0) : '0') == '1'; final boolean seqMode = (q4bits.length() > 1 ? q4bits.charAt(1) : '0') == '1'; final boolean zoneLock = (q4bits.length() > 2 ? q4bits.charAt(2) : '0') == '1'; final boolean sceneLock = (q4bits.length() > 3 ? q4bits.charAt(4) : '0') == '1'; _callback.stateChanged(controlUnit, PrgConstants.CHANNEL_SCENESEQ, seqMode ? OnOffType.ON : OnOffType.OFF); _callback.stateChanged(controlUnit, PrgConstants.CHANNEL_SCENELOCK, sceneLock ? OnOffType.ON : OnOffType.OFF); _callback.stateChanged(controlUnit, PrgConstants.CHANNEL_ZONELOCK, zoneLock ? OnOffType.ON : OnOffType.OFF); } catch (NumberFormatException e) { logger.error("Invalid controller information response: '{}'", resp); } } else { logger.error("Invalid controller information response: '{}'", resp); } }
From source file:org.zaproxy.zap.extension.ascanrulesAlpha.GitMetadata.java
/** * gets the data for the object in the pack file data with the version specified, at the * specified offset//from ww w . j av a2 s . co m * * @param packfiledata byte array containing the raw data associated with the pack file * @param packfiledataoffset the offset for the specified intry into the raw pack file data * @param entryLength the deflated length of the packfile object entry * @param packFileVersion the version of the pack file. The version determines the file format, * and thus the object extraction logic. * @return the inflated binary data associated with the entry extracted from the pack file * @throws Exception */ private byte[] getPackedObjectData(byte[] packfiledata, int packfiledataoffset, int entryLength, int packFileVersion) throws Exception { try { // wrap the entry we are interested in in a ByteBuffer (using the offsets to calculate // the length) // Note: the offset is from the start of the "pack" file, not from after the header. if (packfiledataoffset > (packfiledata.length - 1)) { throw new Exception("The offset " + packfiledataoffset + " into the pack file is not valid given pack file data length:" + packfiledata.length); } if ((packfiledataoffset + entryLength) > packfiledata.length) { throw new Exception("The offset " + packfiledataoffset + " into the pack file and the entry length " + entryLength + " is not valid given pack file data length:" + packfiledata.length); } ByteBuffer entryBuffer = ByteBuffer.wrap(packfiledata, packfiledataoffset, entryLength); byte typeandsize = entryBuffer.get(); // size byte #1: 4 bits of size data available // get bits 6,5,4 into a byte, as the least significant bits. So if typeandsize = // bXYZbbbbb, then entryType = 00000XYZ // TODO: there may be a change required here for version 4 "pack" files, which use a 4 // bit type, rather than a 3 bit type in earlier versions. // but maybe not, because we only handle one type (for blobs), which probably does not // set the highest bit in the "type" nibble. // The valid Object Type Bit Patterns for Version 2/3 are // # 000 - invalid: Reserved // # 001 - COMMIT object // # 010 - TREE object // # 011 - BLOB object // # 100 - TAG object // # 101 - invalid: Reserved // # 110 - DELTA_ENCODED object w/ offset to base // # 111 - DELTA_ENCODED object w/ base BINARY_OBJ_ID byte entryType = (byte) ((typeandsize & (byte) 0x70) >> 4); if (log.isDebugEnabled()) log.debug("The pack file entry is of type " + entryType); if (entryType == 0x7) { // TODO :support Packed Objects of type 'DELTA_ENCODED object with base // BINARY_OBJ_ID' throw new Exception( "Packed Objects of type 'DELTA_ENCODED object with base BINARY_OBJ_ID' are not yet supported. If you have a test case, please let the OWASP Zap dev team know!"); } // Note that 0x7F is 0111 1111 in binary. Useful to mask off all but the top bit of a // byte // and that 0x80 is 1000 0000 in binary. Useful to mask off the lower bits of a byte // and that 0x70 is 0111 0000 in binary. Used above to mask off 3 bits of a byte // and that 0xF is 0000 1111 in binary. // get bits 2,1,0 into a byte, as the least significant bits. So if typeandsize = // bbbbbbXYZ, then entrySizeNibble = 00000XYZ // get the lower 4 bits of the byte as the first size byte byte entrySizeNibble = (byte) ((typeandsize & (byte) 0xF)); int entrySizeWhenInflated = (int) entrySizeNibble; // set up to check if the "more" flag is set on the entry+size byte, then look at the // next byte for size.. byte nextsizebyte = (byte) (typeandsize & (byte) 0x80); // the next piece of logic decodes the variable length "size" information, which comes // in an initial 4 bit, followed by potentially multiple additional 7 bit chunks. // (3 bits type for versions < 4, or 4 bits for version 4 "pack" files) int sizebytescounted = 1; while ((nextsizebyte & 0x80) > 0) { // top bit is set on nextsizebyte, so we need to get the next byte as well if (sizebytescounted > 4) { // this should not happen. the size should be determined by a max of 4 bytes. throw new Exception( "The number of entry size bytes read exceeds 4. Either data corruption, or a parsing error has occurred"); } nextsizebyte = entryBuffer.get(); entrySizeWhenInflated = ((((nextsizebyte & 0x7F)) << (4 + (7 * (sizebytescounted - 1)))) | entrySizeWhenInflated); sizebytescounted++; } // handle each object type byte[] inflatedObjectData = null; if (entryType == 0x0) { throw new Exception("Invalid packed Git Object type 0x0: Reserved"); } else if (entryType == 0x5) { throw new Exception("Invalid packed Git Object type 0x5: Reserved"); } else if (entryType == 0x1 || entryType == 0x2 || entryType == 0x3 || entryType == 0x4) { // for non-deltified objects - this is the simple and common case (in small // repositories, at least) // this includes Commits, Trees, Blobs, and Tags if (log.isDebugEnabled()) log.debug("The size of the un-deltified inflated entry should be " + entrySizeWhenInflated + ", binary: " + Integer.toBinaryString(entrySizeWhenInflated)); // extract the data from the "pack" file, taking into account its total size, based // on the offsets, and the number of type and size bytes already read. int entryDataBytesToRead = entryLength - sizebytescounted; // if (log.isDebugEnabled()) log.debug("Read " + sizebytescounted + " size bytes, so // will read " + entryDataBytesToRead + " bytes of entry data from the 'pack' // file"); byte deflatedSource[] = new byte[entryDataBytesToRead]; entryBuffer.get(deflatedSource); // since it's undeltified, it's probably not a very big file, so no need to specify // a very large buffer size. inflatedObjectData = inflate(deflatedSource, 1024); } else if (entryType == 0x6) { // for 'DELTA_ENCODED object with offset to base' // this object type is not common in small repos. it will get more common in larger // Git repositorie. int deltabaseoffset = readBigEndianModifiedBase128Number(entryBuffer); int deltaoffsetBytesRead = this.tempbytesread; if (log.isDebugEnabled()) log.debug("DELTA_ENCODED object with offset to base: got a delta base offset of " + deltabaseoffset + ", by reading " + deltaoffsetBytesRead + " bytes"); // the data after the delta base offset is deflated. so read it, inflate it, and // decode it. int deflatedDeltaDataBytesToRead = entryLength - sizebytescounted - deltaoffsetBytesRead; byte deflatedDeltaData[] = new byte[deflatedDeltaDataBytesToRead]; entryBuffer.get(deflatedDeltaData); byte[] inflatedDeltaData = inflate(deflatedDeltaData, 1024); ByteBuffer inflateddeltadataBuffer = ByteBuffer.wrap(inflatedDeltaData); // read the base object length and result object length as little-endian base 128 // numbers from the inflated delta data int baseobjectlength = readLittleEndianBase128Number(inflateddeltadataBuffer); int resultobjectlength = readLittleEndianBase128Number(inflateddeltadataBuffer); // now that we have the offset into the pack data for the base object (relative to // the entry we're looking at), // and the length of the base object, go and get the base object // note that the base entry could be another deltified object, in which case, we // will need to recurse. if (log.isDebugEnabled()) log.debug("Getting a packed object from pack file offset " + packfiledataoffset + ", delta base offset " + deltabaseoffset + ", with inflated base object length " + deltabaseoffset + ", and deflated base object length " + baseobjectlength); // TODO: calculate the actual length of the entry for the base object. This will be // <= deltabaseoffset, so for now, use that.. // Note: this is an optimisation, rather than a functional issue.. byte[] inflateddeltabasedata = getPackedObjectData(packfiledata, packfiledataoffset - deltabaseoffset, deltabaseoffset, packFileVersion); if (inflateddeltabasedata.length != baseobjectlength) { throw new Exception( "The length of the delta base data extracted (" + inflateddeltabasedata.length + ") does not match the expected length (" + baseobjectlength + ")"); } // apply the deltas from inflateddeltadataBuffer to inflateddeltabasedataBuffer, to // create an object of length resultobjectlength // now read the chunks, until there is no more data to be read while (inflateddeltadataBuffer.hasRemaining()) { byte chunkByte = inflateddeltadataBuffer.get(); // log.debug("The delta chunk leading byte (in binary) is "+ // Integer.toBinaryString(chunkByte & 0xFF) ); if ((chunkByte & 0x80) == 0) { // log.debug("The delta chunk leading byte indicates an INSERT"); // this is an insert chunk, so get its length byte chunkInsertLength = chunkByte; // the top bit is NOT set, so just use the entire // chunkByte if (chunkInsertLength < 0) throw new Exception( "The insert chunk length (" + chunkInsertLength + ") should be positive."); if (chunkInsertLength > inflateddeltadataBuffer.remaining()) throw new Exception("The insert chunk requests " + chunkInsertLength + " bytes, but only " + inflateddeltadataBuffer.remaining() + " are available"); if (chunkInsertLength > resultobjectlength) throw new Exception("The insert chunk of length (" + chunkInsertLength + ") should be no bigger than the resulting object, which is of expected length (" + resultobjectlength + ")"); byte[] insertdata = new byte[chunkInsertLength]; inflateddeltadataBuffer.get(insertdata, 0, chunkInsertLength); chunkByte = insertdata[insertdata.length - 1]; // if it passed the checks, append the insert chunk to the result buffer. inflatedObjectData = ArrayUtils.addAll(inflatedObjectData, insertdata); } else { // log.debug("The delta chunk leading byte indicates a COPY"); // this is a copy chunk (where bit 7 is set on the byte) // so bits 6-0 specify how the remainder of the chunk determine the copy // base offset and length int chunkCopyOffset = 0; int chunkCopyLength = 0; int bitshift = 0; byte chunkCopyOpcode = chunkByte; bitshift = 0; for (int i = 0; i < 4; i++) { // is the lsb set in the opcode (after we've shifted it right)? if ((chunkCopyOpcode & 0x01) > 0) { chunkByte = inflateddeltadataBuffer.get(); chunkCopyOffset |= ((((int) chunkByte & 0xFF) << bitshift)); } chunkCopyOpcode >>= 1; bitshift += 8; } // get the length bitshift = 0; // the length is determined by the pack file version. For Version 3, use 4 // bytes (0..3). For Version 2, use 3 bytes (0..2) // support V3 as well here.. for (int i = 0; i < (packFileVersion == 3 ? 3 : (packFileVersion == 2 ? 2 : 0)); i++) { // is the lsb set in the opcode (after we've shifted it right)?? if ((chunkCopyOpcode & 0x01) > 0) { chunkByte = inflateddeltadataBuffer.get(); chunkCopyLength |= ((((int) chunkByte & 0xFF) << bitshift)); } chunkCopyOpcode >>= 1; bitshift += 8; } if (chunkCopyLength == 0) { chunkCopyLength = 1 << 16; } if (packFileVersion == 2) { // Version 2 gave the ability to switch the source and target if a flag // was set. // we do not yet support it, because it doesn't seem to occur in the // wild. If you have examples, please let us know! boolean switchDirection = ((chunkCopyOpcode & 0x01) > 0); if (switchDirection) throw new Exception( "Git Pack File Version 2 chunk copy direction switching (copy from result) is not yet supported"); } if (chunkCopyOffset < 0) throw new Exception( "The copy chunk offset (" + chunkCopyOffset + ") should be positive."); if (chunkCopyLength < 0) throw new Exception( "The copy chunk length (" + chunkCopyLength + ") should be positive."); if (chunkCopyLength > resultobjectlength) throw new Exception("The copy chunk of length (" + chunkCopyLength + ") should be no than the resulting object, which is of expected length (" + resultobjectlength + ")"); byte[] copydata = new byte[chunkCopyLength]; copydata = Arrays.copyOfRange(inflateddeltabasedata, chunkCopyOffset, chunkCopyOffset + chunkCopyLength); // if it passed the checks, append the copy chunk to the result buffer. inflatedObjectData = ArrayUtils.addAll(inflatedObjectData, copydata); } } // all the delta chunks have been handled return inflatedObjectData; } // validate that entrySizeWhenInflated == the actual size of the inflated data // there may not be much point in doing this, since the inflate will (in all // probability) fail if the length were wrong if (entrySizeWhenInflated != inflatedObjectData.length) throw new Exception("The predicted inflated length of the entry was " + entrySizeWhenInflated + ", when we inflated the entry, we got data of length " + inflatedObjectData.length); return inflatedObjectData; } catch (Exception e) { log.error("Some error occurred extracting a packed object", e); throw e; } }
From source file:org.apache.hadoop.hive.ql.exec.persistence.BytesBytesMultiHashMap.java
private void debugDumpKeyProbe(long keyOffset, int keyLength, int hashCode, int finalSlot) { final int bucketMask = refs.length - 1; WriteBuffers.ByteSegmentRef fakeRef = new WriteBuffers.ByteSegmentRef(keyOffset, keyLength); writeBuffers.populateValue(fakeRef); int slot = hashCode & bucketMask; long probeSlot = slot; StringBuilder sb = new StringBuilder("Probe path debug for ["); sb.append(Utils.toStringBinary(fakeRef.getBytes(), (int) fakeRef.getOffset(), fakeRef.getLength())); sb.append("] hashCode ").append(Integer.toBinaryString(hashCode)).append(" is: "); int i = 0;/*w ww .ja v a2s . c om*/ while (slot != finalSlot) { probeSlot += (++i); slot = (int) (probeSlot & bucketMask); sb.append(slot).append(" - ").append(probeSlot).append(" - ").append(Long.toBinaryString(refs[slot])) .append("\n"); } LOG.info(sb.toString()); }
From source file:org.finra.dm.service.AbstractServiceTest.java
/** * Creates relative database entities required for the unit tests. *//*from w ww .j a v a 2 s. c o m*/ protected void createDatabaseEntitiesForBusinessObjectDataDdlTesting(String businessObjectFormatFileType, String partitionKey, String partitionKeyGroupName, int partitionColumnPosition, List<String> partitionValues, List<String> subPartitionValues, String schemaDelimiterCharacter, String schemaEscapeCharacter, String schemaNullValue, List<SchemaColumn> schemaColumns, List<SchemaColumn> partitionColumns, boolean replaceUnderscoresWithHyphens, String customDdlName, boolean generateStorageFileEntities) { // Create a business object format entity if it does not exist. BusinessObjectFormatEntity businessObjectFormatEntity = dmDao .getBusinessObjectFormatByAltKey(new BusinessObjectFormatKey(NAMESPACE_CD, BOD_NAME, FORMAT_USAGE_CODE, businessObjectFormatFileType, FORMAT_VERSION)); if (businessObjectFormatEntity == null) { businessObjectFormatEntity = createBusinessObjectFormatEntity(NAMESPACE_CD, BOD_NAME, FORMAT_USAGE_CODE, businessObjectFormatFileType, FORMAT_VERSION, FORMAT_DESCRIPTION, true, partitionKey, partitionKeyGroupName, schemaDelimiterCharacter, schemaEscapeCharacter, schemaNullValue, schemaColumns, partitionColumns); } if (StringUtils.isNotBlank(customDdlName)) { boolean partitioned = (partitionColumns != null); createCustomDdlEntity(businessObjectFormatEntity, customDdlName, getTestCustomDdl(partitioned)); } // Create business object data entities along with the corresponding storage unit entities. StorageEntity storageEntity = dmDao.getStorageByName(StorageEntity.MANAGED_STORAGE); // Create business object data for each partition value. for (String partitionValue : partitionValues) { BusinessObjectDataEntity businessObjectDataEntity; if (partitionColumnPosition == BusinessObjectDataEntity.FIRST_PARTITION_COLUMN_POSITION) { businessObjectDataEntity = createBusinessObjectDataEntity(NAMESPACE_CD, BOD_NAME, FORMAT_USAGE_CODE, businessObjectFormatFileType, FORMAT_VERSION, partitionValue, subPartitionValues, DATA_VERSION, true, BusinessObjectDataStatusEntity.VALID); } else { List<String> testSubPartitionValues = new ArrayList<>(subPartitionValues); // Please note that the second partition column is located at index 0. testSubPartitionValues.set(partitionColumnPosition - 2, partitionValue); businessObjectDataEntity = createBusinessObjectDataEntity(NAMESPACE_CD, BOD_NAME, FORMAT_USAGE_CODE, businessObjectFormatFileType, FORMAT_VERSION, PARTITION_VALUE, testSubPartitionValues, DATA_VERSION, true, BusinessObjectDataStatusEntity.VALID); } String s3KeyPrefix = businessObjectDataHelper.buildS3KeyPrefix(businessObjectFormatEntity, dmDaoHelper.getBusinessObjectDataKey(businessObjectDataEntity)); StorageUnitEntity storageUnitEntity = createStorageUnitEntity(storageEntity, businessObjectDataEntity); // If flag is set, create one storage file for each "auto-discoverable" partition. // Please note that is n! - thus we want to keep the number of partition levels small. if (generateStorageFileEntities) { int discoverableSubPartitionsCount = partitionColumns != null ? partitionColumns.size() - subPartitionValues.size() - 1 : 0; int storageFilesCount = (int) Math.pow(2, discoverableSubPartitionsCount); for (int i = 0; i < storageFilesCount; i++) { // Build a relative sub-directory path. StringBuilder subDirectory = new StringBuilder(); String binaryString = StringUtils.leftPad(Integer.toBinaryString(i), discoverableSubPartitionsCount, "0"); for (int j = 0; j < discoverableSubPartitionsCount; j++) { String subpartitionKey = partitionColumns.get(j + subPartitionValues.size() + 1).getName() .toLowerCase(); if (replaceUnderscoresWithHyphens) { subpartitionKey = subpartitionKey.replace("_", "-"); } subDirectory .append(String.format("/%s=%s", subpartitionKey, binaryString.substring(j, j + 1))); } // Create a storage file entity. createStorageFileEntity(storageUnitEntity, String.format("%s%s/data.dat", s3KeyPrefix, subDirectory.toString()), FILE_SIZE_1_KB, ROW_COUNT_1000); } } // Add storage directory path value to the storage unit, since we have no storage files generated. else { storageUnitEntity.setDirectoryPath(s3KeyPrefix); } dmDao.saveAndRefresh(storageUnitEntity); dmDao.saveAndRefresh(businessObjectDataEntity); } }
From source file:core.module.codec.EncodeDecodeOtaMessage.java
private static GlucoMonDatabase decodeRetrieveCommand19Bit(Date messageSubmitDate, int retrieveType, InputStream is) throws IOException { /*/*from w ww . j av a2s. c o m*/ The structure of a reading sent trough the network has a 4 byte compressed format. Value of reading (10 bit) : value from 000 to 600 601 : HIGH, Value higher then 600 602 : REXC, Range exceeded, no valid reading 1020 : TCORR, Time correction Date and Time (19bit): resolution 1 minute Every 3 hours, the 59th minute will be rounded to the 58th or 00th minute (Compression). saved in minutes Extra info(3 bit): TC, Time Correct (1 bit) 0 means time stamp of measurement in NOK 1 means time stamp of measurement is OK CT, Controll Test (1 bit) 0 means a blood sample 1 means a controll test Reserved (1 bit) */ int numberOfGlucometers = is.read(); //(retrieveType == SEND_READING_NORMAL_RESPONSE) either alarm or normal send GlucoMonDatabase db = new GlucoMonDatabase((retrieveType == SEND_READING_ALARM_RESPONSE)); long timeCorrectionOffsetMinutes = 0; for (int i = 0; i < numberOfGlucometers; i++) { MedicalDevice currentGlucoseMeter = new MedicalDevice(decryptSerialNumber(is)); int numberOfReadings = (short) ((is.read() << 8) + (is.read() << 0)); // DIY readShort() to aoid creating DataInputStream just for this for (int j = 0; j < numberOfReadings; j++) { // DIY readInt() to aoid creating DataInputStream just for this int reading = ((is.read() << 24) + (is.read() << 16) + (is.read() << 8) + (is.read() << 0)); if (((reading >>> 22) & TIME_CORRECTION_READING_FLAG_VALUE) == TIME_CORRECTION_READING_FLAG_VALUE) { int offset = (reading & 0x3ffff8) >> 3; if ((0x40000 & offset) != 0) { logger.finer("OFFSET NEGATIVE (sign bit (19th) is flipped, convert to negative number)"); offset = (offset) - 0x80000; } timeCorrectionOffsetMinutes = convert19BitDateMinutes(offset); //add back minutes lost in compression logger.finer("OFFSET=" + offset + ",bin=" + Integer.toBinaryString(offset) + ",convert19BitDateMinutes=" + timeCorrectionOffsetMinutes); } else { DataPoint dp = decodeReading19Bit(messageSubmitDate, reading, timeCorrectionOffsetMinutes); currentGlucoseMeter.addDataPoint(dp); } } db.addGlucoseMeter(currentGlucoseMeter); } return db; }
From source file:edu.cmu.tetrad.search.SearchGraphUtils.java
public static List<Set<Node>> powerSet(List<Node> nodes) { List<Set<Node>> subsets = new ArrayList<Set<Node>>(); int total = (int) Math.pow(2, nodes.size()); for (int i = 0; i < total; i++) { Set<Node> newSet = new HashSet<Node>(); String selection = Integer.toBinaryString(i); for (int j = selection.length() - 1; j >= 0; j--) { if (selection.charAt(j) == '1') { newSet.add(nodes.get(selection.length() - j - 1)); }/* www. ja v a 2 s .c o m*/ } subsets.add(newSet); } return subsets; }
From source file:org.uzebox.tools.converters.gfx.Main.java
private static String toBin(byte b) { int i = (b & 0xff); String s = Integer.toBinaryString(i); int l = 8 - s.length(); for (int j = 0; j < l; j++) s = "0" + s; //System.out.println(s); s = s.substring(0, 5);/* w w w . j a v a2s. co m*/ System.out.println(s); return s; }
From source file:com.processing.core.PApplet.java
/** * Returns a String that contains the binary value of an int. * The digits parameter determines how many digits will be used. *//*from w ww. jav a 2 s . co m*/ static final public String binary(int what, int digits) { String stuff = Integer.toBinaryString(what); if (digits > 32) { digits = 32; } int length = stuff.length(); if (length > digits) { return stuff.substring(length - digits); } else if (length < digits) { int offset = 32 - (digits - length); return "00000000000000000000000000000000".substring(offset) + stuff; } return stuff; }