List of usage examples for java.nio ByteBuffer getLong
public abstract long getLong();
From source file:org.carbondata.core.util.CarbonUtil.java
/** * @param listOfNodeInfo/*from www .jav a2s . c o m*/ * @param filesLocation * @param measureCount * @param mdKeySize * @param fileSize * @return */ private static List<BlockletInfo> getBlockletDetails(List<BlockletInfo> listOfNodeInfo, String filesLocation, int measureCount, int mdKeySize, long fileSize) { long offset = fileSize - CarbonCommonConstants.LONG_SIZE_IN_BYTE; FileHolder fileHolder = FileFactory.getFileHolder(FileFactory.getFileType(filesLocation)); offset = fileHolder.readDouble(filesLocation, offset); int totalMetaDataLength = (int) (fileSize - CarbonCommonConstants.LONG_SIZE_IN_BYTE - offset); ByteBuffer buffer = ByteBuffer.wrap(fileHolder.readByteArray(filesLocation, offset, totalMetaDataLength)); buffer.rewind(); while (buffer.hasRemaining()) { int[] msrLength = new int[measureCount]; long[] msrOffset = new long[measureCount]; BlockletInfo info = new BlockletInfo(); byte[] startKey = new byte[mdKeySize]; byte[] endKey = new byte[mdKeySize]; info.setFileName(filesLocation); info.setNumberOfKeys(buffer.getInt()); info.setKeyLength(buffer.getInt()); info.setKeyOffset(buffer.getLong()); buffer.get(startKey); buffer.get(endKey); info.setStartKey(startKey); info.setEndKey(endKey); for (int i = 0; i < measureCount; i++) { msrLength[i] = buffer.getInt(); msrOffset[i] = buffer.getLong(); } info.setMeasureLength(msrLength); info.setMeasureOffset(msrOffset); listOfNodeInfo.add(info); } fileHolder.finish(); return listOfNodeInfo; }
From source file:edu.umass.cs.gigapaxos.paxospackets.RequestPacket.java
public RequestPacket(ByteBuffer bbuf) throws UnsupportedEncodingException, UnknownHostException { super(bbuf);/* w w w . j ava2 s .c o m*/ int exactLength = bbuf.position(); this.requestID = bbuf.getLong(); this.stop = bbuf.get() == (byte) 1; exactLength += (8 + 1); // addresses byte[] ca = new byte[4]; bbuf.get(ca); int cport = (int) bbuf.getShort(); cport = cport >= 0 ? cport : cport + 2 * (Short.MAX_VALUE + 1); this.clientAddress = cport != 0 ? new InetSocketAddress(InetAddress.getByAddress(ca), cport) : null; byte[] la = new byte[4]; bbuf.get(la); int lport = (int) bbuf.getShort(); lport = lport >= 0 ? lport : lport + 2 * (Short.MAX_VALUE + 1); this.listenAddress = lport != 0 ? new InetSocketAddress(InetAddress.getByAddress(la), lport) : null; exactLength += (4 + 2 + 4 + 2); // other non-final fields this.entryReplica = bbuf.getInt(); this.entryTime = bbuf.getLong(); this.shouldReturnRequestValue = bbuf.get() == (byte) 1; this.forwardCount = bbuf.getInt(); exactLength += (4 + 8 + 1 + 4); // digest related fields this.broadcasted = bbuf.get() == (byte) 1; int digestLength = bbuf.getInt(); if (digestLength > 0) bbuf.get(this.digest = new byte[digestLength]); // highly variable length fields // requestValue int reqValLen = bbuf.getInt(); byte[] reqValBytes = new byte[reqValLen]; bbuf.get(reqValBytes); this.requestValue = reqValBytes.length > 0 ? new String(reqValBytes, CHARSET) : null; exactLength += (4 + reqValBytes.length); // responseValue int respValLen = bbuf.getInt(); byte[] respValBytes = new byte[respValLen]; bbuf.get(respValBytes); this.responseValue = respValBytes.length > 0 ? new String(respValBytes, CHARSET) : null; exactLength += (4 + respValBytes.length); int numBatched = bbuf.getInt(); if (numBatched == 0) return; // else // batched requests this.batched = new RequestPacket[numBatched]; for (int i = 0; i < numBatched; i++) { int len = bbuf.getInt(); byte[] element = new byte[len]; bbuf.get(element); this.batched[i] = new RequestPacket(element); } assert (exactLength > 0); }
From source file:org.dcache.chimera.JdbcFs.java
@Override public FsInode inodeFromBytes(byte[] handle) throws ChimeraFsException { FsInode inode;/*from w w w. j a v a2s .c om*/ if (handle.length < MIN_HANDLE_LEN) { throw new FileNotFoundHimeraFsException("File handle too short"); } ByteBuffer b = ByteBuffer.wrap(handle); int fsid = b.get(); int type = b.get(); int len = b.get(); // eat the file id size. long ino = b.getLong(); int opaqueLen = b.get(); if (opaqueLen > b.remaining()) { throw new FileNotFoundHimeraFsException("Bad Opaque len"); } byte[] opaque = new byte[opaqueLen]; b.get(opaque); FsInodeType inodeType = FsInodeType.valueOf(type); switch (inodeType) { case INODE: int level = Integer.parseInt(new String(opaque)); inode = new FsInode(this, ino, level); break; case ID: inode = new FsInode_ID(this, ino); break; case TAGS: inode = new FsInode_TAGS(this, ino); break; case TAG: String tag = new String(opaque); inode = new FsInode_TAG(this, ino, tag); break; case NAMEOF: inode = new FsInode_NAMEOF(this, ino); break; case PARENT: inode = new FsInode_PARENT(this, ino); break; case PATHOF: inode = new FsInode_PATHOF(this, ino); break; case CONST: inode = new FsInode_CONST(this, ino); break; case PSET: inode = new FsInode_PSET(this, ino, getArgs(opaque)); break; case PCUR: inode = new FsInode_PCUR(this, ino); break; case PLOC: inode = new FsInode_PLOC(this, ino); break; case PCRC: inode = new FsInode_PCRC(this, ino); break; default: throw new FileNotFoundHimeraFsException("Unsupported file handle type: " + inodeType); } return inode; }
From source file:com.healthmarketscience.jackcess.Database.java
/** * Returns the password mask retrieved from the given header page and * format, or {@code null} if this format does not use a password mask. *///from w w w . jav a 2 s . c om static byte[] getPasswordMask(ByteBuffer buffer, JetFormat format) { // get extra password mask if necessary (the extra password mask is // generated from the database creation date stored in the header) int pwdMaskPos = format.OFFSET_HEADER_DATE; if (pwdMaskPos < 0) { return null; } buffer.position(pwdMaskPos); double dateVal = Double.longBitsToDouble(buffer.getLong()); byte[] pwdMask = new byte[4]; ByteBuffer.wrap(pwdMask).order(PageChannel.DEFAULT_BYTE_ORDER).putInt((int) dateVal); return pwdMask; }
From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java
private void initializeBucketTableFromLastCommittedBucketFile() throws BucketTableManagerException { FileInputStream tableStream = null; FileChannel fileChannel = null; try {/*from ww w .ja va 2 s . c om*/ File latestCommittedFile = getLatestCommitedFile(); if (latestCommittedFile != null) { tableStream = new FileInputStream(latestCommittedFile); fileChannel = tableStream.getChannel(); ByteBuffer buffer = ByteBuffer.allocate(HEADERSIZE); fileChannel.position(0L); int read = fileChannel.read(buffer); if (read < HEADERSIZE) { fileChannel.close(); throw new BucketTableManagerException( "Wrong bucket table header size: " + read + "/" + HEADERSIZE); } // Check content of header. Start with Big Endian (default for Java) buffer.rewind(); byteOrder = ByteOrder.BIG_ENDIAN; buffer.order(byteOrder); int magic = buffer.getInt(); if (magic == MAGICSTART_BADENDIAN) { byteOrder = ByteOrder.LITTLE_ENDIAN; buffer.order(byteOrder); } else if (magic != MAGICSTART) { fileChannel.close(); throw new BucketTableManagerException("Bad header in bucket table file"); } // Read number of buckets long headerMapSize = buffer.getLong(); // Read checkPoint NeedlePointer includedCheckpoint = new NeedlePointer(); includedCheckpoint.getNeedlePointerFromBuffer(buffer); // Read second magic number magic = buffer.getInt(); if (magic != MAGICEND) { fileChannel.close(); throw new BucketTableManagerException("Bad header in bucket table file"); } // Check number of buckets against requested map size if (headerMapSize != mapSize) { // Map size does not match fileChannel.close(); throw new BucketTableManagerException( "Requested map size " + mapSize + " does not match header map size " + headerMapSize); } // Sets initial checkpoint bucketTable.setInitialCheckPoint(includedCheckpoint); // Now reads all entries logger.info("Hot start: loading buckets..."); for (int i = 0; i < nbBuffers; i++) { bucketTable.prepareBufferForReading(i); read = fileChannel.read(bucketTable.getBuffer(i)); if (read < bucketTable.getBuffer(i).limit()) throw new BucketTableManagerException("Incomplete bucket table file " + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE); //else // logger.info("Hot start: loaded "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ; } // Checks second magic marker buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE); buffer.rewind(); buffer.limit(INTSIZE); if (fileChannel.read(buffer) < INTSIZE) throw new BucketTableManagerException( "Incomplete bucket table file, missing secong magic number " + latestCommittedFile.getName()); buffer.rewind(); magic = buffer.getInt(); if (magic != MAGICSTART) { fileChannel.close(); throw new BucketTableManagerException("Bad header in bucket table file"); } // Now reads clean counters while (true) { buffer.rewind(); buffer.limit(NeedleLogInfo.INFOSIZE); read = fileChannel.read(buffer); if (read > 0 && read < NeedleLogInfo.INFOSIZE) throw new BucketTableManagerException("Incomplete bucket table file, log info too short " + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE); if (read <= 0) break; else { NeedleLogInfo nli = new NeedleLogInfo(useAverage); buffer.rewind(); nli.getNeedleLogInfo(buffer); logInfoPerLogNumber.put(new Integer(nli.getNeedleFileNumber()), nli); } } logger.info("Hot start: loaded " + (nbBuffers * entriesPerBuffer) + " buckets"); } else { // Empty file bucketTable.setInitialCheckPoint(new NeedlePointer()); bucketTable.format(); } } catch (IOException ie) { throw new BucketTableManagerException("Failed initializing bucket table", ie); } catch (BufferUnderflowException bue) { throw new BucketTableManagerException("Bucket table too short", bue); } finally { if (fileChannel != null) { try { fileChannel.close(); } catch (IOException ex) { throw new BucketTableManagerException("Error while closing file channel", ex); } } } }
From source file:com.btoddb.fastpersitentqueue.FpqIT.java
@Test public void testThreading() throws Exception { final int numEntries = 1000; final int numPushers = 4; final int numPoppers = 4; final int entrySize = 1000; fpq1.setMaxTransactionSize(2000);//w ww.j a va2s .c o m final int popBatchSize = 100; fpq1.setMaxMemorySegmentSizeInBytes(10000000); fpq1.setMaxJournalFileSize(10000000); fpq1.setMaxJournalDurationInMs(30000); fpq1.setFlushPeriodInMs(1000); fpq1.setNumberOfFlushWorkers(4); final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong counter = new AtomicLong(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); fpq1.init(); ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = counter.getAndIncrement(); pushSum.addAndGet(x); ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]); bb.putLong(x); fpq1.beginTransaction(); fpq1.push(bb.array()); fpq1.commit(); if ((x + 1) % 500 == 0) { System.out.println("pushed ID = " + x); } Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !fpq1.isEmpty()) { try { fpq1.beginTransaction(); try { Collection<FpqEntry> entries = fpq1.pop(popBatchSize); if (null == entries) { Thread.sleep(100); continue; } for (FpqEntry entry : entries) { ByteBuffer bb = ByteBuffer.wrap(entry.getData()); popSum.addAndGet(bb.getLong()); if (entry.getId() % 500 == 0) { System.out.println("popped ID = " + entry.getId()); } } numPops.addAndGet(entries.size()); fpq1.commit(); entries.clear(); } finally { if (fpq1.isTransactionActive()) { fpq1.rollback(); } } Thread.sleep(popRand.nextInt(10)); } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(fpq1.getNumberOfEntries(), is(0L)); assertThat(pushSum.get(), is(popSum.get())); assertThat(fpq1.getMemoryMgr().getNumberOfActiveSegments(), is(1)); assertThat(fpq1.getMemoryMgr().getSegments(), hasSize(1)); assertThat(fpq1.getJournalMgr().getJournalFiles().entrySet(), hasSize(1)); assertThat(FileUtils.listFiles(fpq1.getPagingDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), is(empty())); assertThat( FileUtils.listFiles(fpq1.getJournalDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), hasSize(1)); }
From source file:org.apache.bookkeeper.bookie.BookieShell.java
/** * Scan over an entry log file for a particular entry * //from ww w . j av a 2 s . c o m * @param logId * Entry Log File id. * @param ledgerId * id of the ledger * @param entryId * entryId of the ledger we are looking for (-1 for all of the entries of the ledger) * @param printMsg * Whether printing the entry data. * @throws Exception */ protected void scanEntryLogForSpecificEntry(long logId, final long lId, final long eId, final boolean printMsg) throws Exception { System.out.println("Scan entry log " + logId + " (" + Long.toHexString(logId) + ".log)" + " for LedgerId " + lId + ((eId == -1) ? "" : " for EntryId " + eId)); final MutableBoolean entryFound = new MutableBoolean(false); scanEntryLog(logId, new EntryLogScanner() { @Override public boolean accept(long ledgerId) { return ((lId == ledgerId) && ((!entryFound.booleanValue()) || (eId == -1))); } @Override public void process(long ledgerId, long startPos, ByteBuffer entry) { long entrysLedgerId = entry.getLong(); long entrysEntryId = entry.getLong(); entry.rewind(); if ((ledgerId == entrysLedgerId) && (ledgerId == lId) && ((entrysEntryId == eId)) || (eId == -1)) { entryFound.setValue(true); formatEntry(startPos, entry, printMsg); } } }); if (!entryFound.booleanValue()) { System.out.println("LedgerId " + lId + ((eId == -1) ? "" : " EntryId " + eId) + " is not available in the entry log " + logId + " (" + Long.toHexString(logId) + ".log)"); } }
From source file:org.apache.bookkeeper.bookie.BookieShell.java
/** * Format the message into a readable format. * * @param pos/* w w w . j a v a2s.c o m*/ * File offset of the message stored in entry log file * @param recBuff * Entry Data * @param printMsg * Whether printing the message body */ private void formatEntry(long pos, ByteBuffer recBuff, boolean printMsg) { long ledgerId = recBuff.getLong(); long entryId = recBuff.getLong(); int entrySize = recBuff.limit(); System.out.println("--------- Lid=" + ledgerId + ", Eid=" + entryId + ", ByteOffset=" + pos + ", EntrySize=" + entrySize + " ---------"); if (entryId == Bookie.METAENTRY_ID_LEDGER_KEY) { int masterKeyLen = recBuff.getInt(); byte[] masterKey = new byte[masterKeyLen]; recBuff.get(masterKey); System.out.println("Type: META"); System.out.println("MasterKey: " + bytes2Hex(masterKey)); System.out.println(); return; } if (entryId == Bookie.METAENTRY_ID_FENCE_KEY) { System.out.println("Type: META"); System.out.println("Fenced"); System.out.println(); return; } // process a data entry long lastAddConfirmed = recBuff.getLong(); System.out.println("Type: DATA"); System.out.println("LastConfirmed: " + lastAddConfirmed); if (!printMsg) { System.out.println(); return; } // skip digest checking recBuff.position(32 + 8); System.out.println("Data:"); System.out.println(); try { byte[] ret = new byte[recBuff.remaining()]; recBuff.get(ret); formatter.formatEntry(ret); } catch (Exception e) { System.out.println("N/A. Corrupted."); } System.out.println(); }
From source file:com.healthmarketscience.jackcess.Column.java
/** * Decodes a date value.// w w w .java 2s . c om */ private Date readDateValue(ByteBuffer buffer) { // seems access stores dates in the local timezone. guess you just hope // you read it in the same timezone in which it was written! long dateBits = buffer.getLong(); long time = fromDateDouble(Double.longBitsToDouble(dateBits)); return new DateExt(time, dateBits); }
From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java
private UUID getUUID(ByteBuffer buf) { ByteOrder byteOrder = buf.order(); buf.order(ByteOrder.BIG_ENDIAN); long uuid_high = buf.getLong(); long uuid_low = buf.getLong(); buf.order(byteOrder);//from ww w. j av a2 s . c om return new UUID(uuid_high, uuid_low); }