List of usage examples for java.nio ByteBuffer putInt
public abstract ByteBuffer putInt(int value);
From source file:org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap.java
private DataMapRow convertToRow(IndexKey key) { ByteBuffer buffer = ByteBuffer .allocate(key.getDictionaryKeys().length + key.getNoDictionaryKeys().length + 8); buffer.putInt(key.getDictionaryKeys().length); buffer.putInt(key.getNoDictionaryKeys().length); buffer.put(key.getDictionaryKeys()); buffer.put(key.getNoDictionaryKeys()); DataMapRowImpl dataMapRow = new DataMapRowImpl(memoryDMStore.getSchema()); dataMapRow.setByteArray(buffer.array(), 0); return dataMapRow; }
From source file:io.pcp.parfait.dxm.PcpMmvWriter.java
private void populateDataBuffer(ByteBuffer dataFileBuffer, Collection<PcpValueInfo> valueInfos) throws IOException { // Automatically cleanup the file if this is a mapping where we // mandate PID checking from the MMV PMDA (MMV_FLAG_PROCESS) and // we were able to stash a path name earlier if (file != null && flags.contains(MmvFlag.MMV_FLAG_PROCESS)) { file.deleteOnExit();// w ww . j ava 2 s. c o m } dataFileBuffer.position(0); dataFileBuffer.put(TAG); dataFileBuffer.putInt(mmvVersion.getVersion()); long generation = System.currentTimeMillis() / 1000; dataFileBuffer.putLong(generation); int gen2Offset = dataFileBuffer.position(); // Generation 2 will be filled in later, once the file's ready dataFileBuffer.putLong(0); // 2 TOC blocks, 3 if there are instances dataFileBuffer.putInt(tocCount()); dataFileBuffer.putInt(getFlagMask()); dataFileBuffer.putInt(getProcessIdentifier()); dataFileBuffer.putInt(clusterIdentifier); Collection<? extends MmvWritable> instanceDomains = getInstanceDomains(); Collection<? extends MmvWritable> instances = getInstances(); Collection<? extends MmvWritable> metrics = getMetricInfos(); Collection<? extends MmvWritable> strings = getStrings(); int tocBlockIndex = 0; if (!instanceDomains.isEmpty()) { dataFileBuffer.position(getTocOffset(tocBlockIndex++)); writeToc(dataFileBuffer, TocType.INSTANCE_DOMAINS, instanceDomains.size(), instanceDomains.iterator().next().getOffset()); } if (!instances.isEmpty()) { dataFileBuffer.position(getTocOffset(tocBlockIndex++)); writeToc(dataFileBuffer, TocType.INSTANCES, instances.size(), instances.iterator().next().getOffset()); } dataFileBuffer.position(getTocOffset(tocBlockIndex++)); int metricsFirstEntryOffset = metrics.isEmpty() ? 0 : metrics.iterator().next().getOffset(); int valuesFirstEntryOffset = valueInfos.isEmpty() ? 0 : valueInfos.iterator().next().getOffset(); writeToc(dataFileBuffer, TocType.METRICS, metrics.size(), metricsFirstEntryOffset); dataFileBuffer.position(getTocOffset(tocBlockIndex++)); writeToc(dataFileBuffer, TocType.VALUES, valueInfos.size(), valuesFirstEntryOffset); if (!getStrings().isEmpty()) { dataFileBuffer.position(getTocOffset(tocBlockIndex++)); writeToc(dataFileBuffer, TocType.STRINGS, strings.size(), strings.iterator().next().getOffset()); } for (MmvWritable instanceDomain : instanceDomains) { instanceDomain.writeToMmv(dataFileBuffer); } for (MmvWritable info : metrics) { info.writeToMmv(dataFileBuffer); } for (MmvWritable info : valueInfos) { info.writeToMmv(dataFileBuffer); } for (MmvWritable string : strings) { string.writeToMmv(dataFileBuffer); } // Once it's set up, let the agent know dataFileBuffer.position(gen2Offset); dataFileBuffer.putLong(generation); }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCacheOld.java
/** * Store external with codec./*from www. j a v a2 s. c o m*/ * Format: * 0..3 - total record size (-4) * 4..7 - size of a key in bytes (16 if use hash128) * 8 .. x - key data * x+1 ..x+1- IN_MEMORY flag ( 1- in memory, 0 - not) * x+2 ... block, serialized and compressed * * @param blockName the block name * @param buf the buf * @param inMemory the in memory * @throws IOException Signals that an I/O exception has occurred. */ private void storeExternalWithCodec(String blockName, Cacheable buf, boolean inMemory) throws IOException { // If external storage is disable - bail out if (overflowExtEnabled == false) { return; } byte[] hashed = Utils.hash128(blockName); // Check if we have already this block in external storage cache if (extStorageCache.contains(hashed)) { return; } ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer(); deserializer.set(buf.getDeserializer()); SerDe serde = extStorageCache.getSerDe(); Codec codec = extStorageCache.getCompressionCodec(); buffer.clear(); buffer.position(4); // Save key buffer.putInt(hashed.length); buffer.put(hashed); buffer.put(inMemory ? (byte) 1 : (byte) 0); if (buf != null) { serde.writeCompressed(buffer, buf, codec); int pos = buffer.position(); buffer.putInt(0, pos - 4); } else { buffer.putInt(0, 0); } StorageHandle handle = storage.storeData(buffer); try { // WE USE byte array as a key extStorageCache.put(hashed, handle); } catch (Exception e) { throw new IOException(e); } }
From source file:de.rwhq.btree.InnerNode.java
/** * @param serializedKey/*w w w . j a va 2 s . co m*/ * @param pageId * @param posOfKeyForInsert */ private void insertKeyPointerPageIdAtPosition(final byte[] serializedKey, final Integer pageId, final int posOfKeyForInsert) { final KeyStruct thisKeyStruct = new KeyStruct(posOfKeyForInsert); final ByteBuffer buf = rawPage().bufferForWriting(thisKeyStruct.getOffset()); final int spaceNeededForInsert = getSizeOfPageId() + keySerializer.getSerializedLength(); System.arraycopy(buf.array(), buf.position(), buf.array(), buf.position() + spaceNeededForInsert, buf.limit() - buf.position() - spaceNeededForInsert); buf.put(serializedKey); buf.putInt(pageId); setNumberOfKeys(getNumberOfKeys() + 1); rawPage().sync(); }
From source file:org.voltdb.utils.CatalogUtil.java
private static ByteBuffer makeCatalogVersionAndBytes(int catalogVersion, long txnId, long uniqueId, byte[] catalogBytes, byte[] deploymentBytes) { ByteBuffer versionAndBytes = ByteBuffer.allocate(4 + // catalog bytes length catalogBytes.length + 4 + // deployment bytes length deploymentBytes.length + 4 + // catalog version 8 + // txnID 8 + // unique ID 20 + // catalog SHA-1 hash 20 // deployment SHA-1 hash );//from ww w. j a v a2s . co m versionAndBytes.putInt(catalogVersion); versionAndBytes.putLong(txnId); versionAndBytes.putLong(uniqueId); try { versionAndBytes.put((new InMemoryJarfile(catalogBytes)).getSha1Hash()); } catch (IOException ioe) { VoltDB.crashLocalVoltDB("Unable to build InMemoryJarfile from bytes, should never happen.", true, ioe); } versionAndBytes.put(makeDeploymentHash(deploymentBytes)); versionAndBytes.putInt(catalogBytes.length); versionAndBytes.put(catalogBytes); versionAndBytes.putInt(deploymentBytes.length); versionAndBytes.put(deploymentBytes); return versionAndBytes; }
From source file:org.springframework.integration.x.ip.websocket.WebSocketSerializer.java
@Override public void serialize(final Object frame, OutputStream outputStream) throws IOException { String data = ""; WebSocketFrame theFrame = null;/* w w w. ja v a2 s . c om*/ if (frame instanceof String) { data = (String) frame; theFrame = new WebSocketFrame(WebSocketFrame.TYPE_DATA, data); } else if (frame instanceof WebSocketFrame) { theFrame = (WebSocketFrame) frame; data = theFrame.getPayload(); } if (data != null && data.startsWith("HTTP/1.1")) { outputStream.write(data.getBytes()); return; } int lenBytes; int payloadLen = this.server ? 0 : 0x80; //masked boolean close = theFrame.getType() == WebSocketFrame.TYPE_CLOSE; boolean ping = theFrame.getType() == WebSocketFrame.TYPE_PING; boolean pong = theFrame.getType() == WebSocketFrame.TYPE_PONG; byte[] bytes = theFrame.getBinary() != null ? theFrame.getBinary() : data.getBytes("UTF-8"); int length = bytes.length; if (close) { length += 2; } if (length >= Math.pow(2, 16)) { lenBytes = 8; payloadLen |= 127; } else if (length > 125) { lenBytes = 2; payloadLen |= 126; } else { lenBytes = 0; payloadLen |= length; } int mask = (int) System.currentTimeMillis(); ByteBuffer buffer = ByteBuffer.allocate(length + 6 + lenBytes); if (ping) { buffer.put((byte) 0x89); } else if (pong) { buffer.put((byte) 0x8a); } else if (close) { buffer.put((byte) 0x88); } else if (theFrame.getType() == WebSocketFrame.TYPE_DATA_BINARY) { buffer.put((byte) 0x82); } else { // Final fragment; text buffer.put((byte) 0x81); } buffer.put((byte) payloadLen); if (lenBytes == 2) { buffer.putShort((short) length); } else if (lenBytes == 8) { buffer.putLong(length); } byte[] maskBytes = new byte[4]; if (!server) { buffer.putInt(mask); buffer.position(buffer.position() - 4); buffer.get(maskBytes); } if (close) { buffer.putShort(theFrame.getStatus()); // TODO: mask status when client } for (int i = 0; i < bytes.length; i++) { if (server) { buffer.put(bytes[i]); } else { buffer.put((byte) (bytes[i] ^ maskBytes[i % 4])); } } outputStream.write(buffer.array(), 0, buffer.position()); }
From source file:srebrinb.compress.sevenzip.SevenZOutputFile.java
/** * Finishes the addition of entries to this archive, without closing it. * /*from w ww. j a va 2 s .c om*/ * @throws IOException if archive is already closed. */ public void finish() throws IOException { if (finished) { throw new IOException("This archive has already been finished"); } finished = true; final long headerPosition = channel.position(); final ByteArrayOutputStream headerBaos = new ByteArrayOutputStream(); final DataOutputStream header = new DataOutputStream(headerBaos); writeHeader(header); header.flush(); final byte[] headerBytes = headerBaos.toByteArray(); channel.write(ByteBuffer.wrap(headerBytes)); final CRC32 crc32 = new CRC32(); crc32.update(headerBytes); ByteBuffer bb = ByteBuffer.allocate(SevenZFile.sevenZSignature.length + 2 /* version */ + 4 /* start header CRC */ + 8 /* next header position */ + 8 /* next header length */ + 4 /* next header CRC */).order(ByteOrder.LITTLE_ENDIAN); // signature header channel.position(0); bb.put(SevenZFile.sevenZSignature); // version bb.put((byte) 0).put((byte) 2); // placeholder for start header CRC bb.putInt(0); // start header bb.putLong(headerPosition - SevenZFile.SIGNATURE_HEADER_SIZE).putLong(0xffffFFFFL & headerBytes.length) .putInt((int) crc32.getValue()); crc32.reset(); crc32.update(bb.array(), SevenZFile.sevenZSignature.length + 6, 20); bb.putInt(SevenZFile.sevenZSignature.length + 2, (int) crc32.getValue()); bb.flip(); channel.write(bb); }
From source file:org.carbondata.core.util.CarbonUtil.java
public static void writeLevelCardinalityFile(String loadFolderLoc, String tableName, int[] dimCardinality) throws KettleException { String levelCardinalityFilePath = loadFolderLoc + File.separator + CarbonCommonConstants.LEVEL_METADATA_FILE + tableName + CarbonCommonConstants.CARBON_METADATA_EXTENSION; FileOutputStream fileOutputStream = null; FileChannel channel = null;/*from w ww.j av a 2s .com*/ try { int dimCardinalityArrLength = dimCardinality.length; // first four bytes for writing the length of array, remaining for array data ByteBuffer buffer = ByteBuffer.allocate(CarbonCommonConstants.INT_SIZE_IN_BYTE + dimCardinalityArrLength * CarbonCommonConstants.INT_SIZE_IN_BYTE); fileOutputStream = new FileOutputStream(levelCardinalityFilePath); channel = fileOutputStream.getChannel(); buffer.putInt(dimCardinalityArrLength); for (int i = 0; i < dimCardinalityArrLength; i++) { buffer.putInt(dimCardinality[i]); } buffer.flip(); channel.write(buffer); buffer.clear(); LOGGER.info("Level cardinality file written to : " + levelCardinalityFilePath); } catch (IOException e) { LOGGER.error( "Error while writing level cardinality file : " + levelCardinalityFilePath + e.getMessage()); throw new KettleException("Not able to write level cardinality file", e); } finally { closeStreams(channel, fileOutputStream); } }
From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java
private void commitBucketTableToDisk() throws BucketTableManagerException { File currentFile = null;//from w ww. ja v a 2 s . c o m FileChannel fileChannel = null; ByteBuffer headerBuffer = null; try { logger.warn("Start commit bucket table..."); if (bucketTable.getRequestedCheckPoint() == null || bucketTable.getRequestedCheckPoint().isEmpty()) throw new BucketTableManagerException("commit requested while there is no requested checkpoint"); currentFile = getLatestCommitedFile(); File nextFile = getNextFile(getLatestCommitedFile()); fileChannel = (new RandomAccessFile(nextFile, "rw")).getChannel(); // Write header with empty checkpoint headerBuffer = ByteBuffer.allocate(HEADERSIZE); fileChannel.position(0L); headerBuffer.putInt(MAGICSTART); headerBuffer.putLong(mapSize); // NeedlePointer lastCheckPoint = bucketTable.getLastCheckPoint() ; // Reset checkpoint to no checkpoint done NeedlePointer lastCheckPoint = new NeedlePointer(); // Empty needle lastCheckPoint.putNeedlePointerToBuffer(headerBuffer); headerBuffer.putInt(MAGICEND); headerBuffer.flip(); // truncate buffer fileChannel.write(headerBuffer); // Now writes buffers for (int i = 0; i < nbBuffers; i++) { bucketTable.prepareBufferForWriting(i); int written = fileChannel.write(bucketTable.getBuffer(i)); if (written < bucketTable.getBuffer(i).limit()) throw new BucketTableManagerException("Incomplete write for bucket table file " + nextFile.getName() + ", expected " + mapSize + HEADERSIZE); // else // logger.info("Bucket table commit: written "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ; try { Thread.sleep(10); } catch (Throwable th) { } } // Writes second magic number ByteBuffer buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE); buffer.rewind(); buffer.limit(INTSIZE); buffer.putInt(MAGICSTART); buffer.rewind(); fileChannel.write(buffer); // Write Needle Log Info Iterator<NeedleLogInfo> it = logInfoPerLogNumber.values().iterator(); while (it.hasNext()) { buffer.rewind(); buffer.limit(NeedleLogInfo.INFOSIZE); NeedleLogInfo nli = it.next(); nli.putNeedleLogInfo(buffer, true); int written = fileChannel.write(buffer); if (written < NeedleLogInfo.INFOSIZE) throw new BucketTableManagerException( "Incomplete write for bucket table file, writing log infos " + nextFile.getName()); } // Writes checkpoint headerBuffer = ByteBuffer.allocate(NeedlePointer.POINTERSIZE); headerBuffer.rewind(); headerBuffer.limit(NeedlePointer.POINTERSIZE); // System.out.println("Writing checkpoint in index "+bucketTable.getRequestedCheckPoint()) ; bucketTable.getRequestedCheckPoint().putNeedlePointerToBuffer(headerBuffer, true); // Flip buffer after write headerBuffer.rewind(); // fileChannel.force(false) ; if (fileChannel.write(headerBuffer, CHECKPOINTOFFSET) < NeedlePointer.POINTERSIZE) { throw new BucketTableManagerException("Could not write checkpoint to " + nextFile.getName()); } fileChannel.force(true); fileChannel.close(); if (!nextFile.renameTo(getCommittedFile(nextFile))) throw new BucketTableManagerException( "Could not rename " + nextFile.getName() + " to " + getCommittedFile(nextFile).getName()); logger.warn("Committed bucket table."); } catch (IOException ie) { throw new BucketTableManagerException("Failed writting bucket table", ie); } finally { headerBuffer = null; //May ease garbage collection if (fileChannel != null) { try { fileChannel.close(); } catch (Exception ex) { throw new BucketTableManagerException("Failed to close file channel", ex); } } } try { if (currentFile != null) { if (!currentFile.delete()) logger.error("Failed deleting previous bucket table" + currentFile.getName()); } } catch (Throwable th) { logger.error("Failed deleting previous bucket table" + currentFile.getName(), th); } }
From source file:org.openpilot_nonag.uavtalk.UAVTalk.java
/** * Send an object through the telemetry link. * @throws IOException//w ww. j a v a 2 s . com * @param[in] obj Object handle to send * @param[in] type Transaction type \return Success (true), Failure (false) */ private boolean transmitSingleObject(int type, long objId, long instId, UAVObject obj) throws IOException { int length = 0; assert (objMngr != null && outStream != null); // IMPORTANT : obj can be null (when type is NACK for example) // Determine data length if (type == TYPE_OBJ_REQ || type == TYPE_ACK || type == TYPE_NACK) { length = 0; } else { length = obj.getNumBytes(); } ByteBuffer bbuf = ByteBuffer.allocate(MAX_PACKET_LENGTH); bbuf.order(ByteOrder.LITTLE_ENDIAN); // Setup type and object id fields bbuf.put((byte) (SYNC_VAL & 0xff)); bbuf.put((byte) (type & 0xff)); bbuf.putShort((short) (length + HEADER_LENGTH)); bbuf.putInt((int) objId); bbuf.putShort((short) (instId & 0xffff)); // Check length if (length >= MAX_PAYLOAD_LENGTH) { ++stats.txErrors; return false; } // Copy data (if any) if (length > 0) try { if (obj.pack(bbuf) == 0) { ++stats.txErrors; return false; } } catch (Exception e) { ++stats.txErrors; // TODO Auto-generated catch block e.printStackTrace(); return false; } // Calculate checksum bbuf.put((byte) (updateCRC(0, bbuf.array(), bbuf.position()) & 0xff)); int packlen = bbuf.position(); bbuf.position(0); byte[] dst = new byte[packlen]; bbuf.get(dst, 0, packlen); outStream.write(dst); // Update stats ++stats.txObjects; stats.txBytes += bbuf.position(); stats.txObjectBytes += length; // Done return true; }