List of usage examples for java.lang Long BYTES
int BYTES
To view the source code for java.lang Long BYTES.
Click Source Link
From source file:io.pravega.controller.store.stream.ZKStream.java
@Override public CompletableFuture<Void> createMarkerData(int segmentNumber, long timestamp) { final String path = ZKPaths.makePath(markerPath, String.format("%d", segmentNumber)); byte[] b = new byte[Long.BYTES]; BitConverter.writeLong(b, 0, timestamp); return store.createZNodeIfNotExist(path, b).thenAccept(x -> cache.invalidateCache(markerPath)); }
From source file:io.stallion.dataAccess.file.FilePersisterBase.java
/** * Derives a Long id by hashing the file path and then taking the first 8 bytes * of the path./*from w w w .java 2 s . c o m*/ * * This is used if the model object doesn't have a defined id field. * * @param path * @return */ public Long makeIdFromFilePath(String path) { path = path.toLowerCase(); path = path.replace(getBucketFolderPath().toLowerCase(), ""); path = StringUtils.stripStart(path, "/"); path = getBucket() + "-----" + path; // Derive a long id by hashing the file path byte[] bs = Arrays.copyOfRange(DigestUtils.md5(path), 0, 6); bs = ArrayUtils.addAll(new byte[] { 0, 0 }, bs); ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); buffer.put(bs); buffer.flip();//need flip Long l = buffer.getLong(); if (l < 0) { l = -l; } Log.finest("calculated id is {0}", l); return l; }
From source file:org.apache.accumulo.core.file.rfile.PrintInfo.java
/** * Print the unencrypted parameters that tell the Crypto Service how to decrypt the file. This * information is useful for debugging if and how a file was encrypted. *///from ww w.ja v a 2 s . c om private void printCryptoParams(Path path, FileSystem fs) { byte[] noCryptoBytes = new NoFileEncrypter().getDecryptionParameters(); try (FSDataInputStream fsDis = fs.open(path)) { long fileLength = fs.getFileStatus(path).getLen(); fsDis.seek(fileLength - 16 - Utils.Version.size() - (Long.BYTES)); long cryptoParamOffset = fsDis.readLong(); fsDis.seek(cryptoParamOffset); byte[] cryptoParams = CryptoUtils.readParams(fsDis); if (!Arrays.equals(noCryptoBytes, cryptoParams)) { System.out.println("Encrypted with Params: " + Key.toPrintableString(cryptoParams, 0, cryptoParams.length, cryptoParams.length)); } else { System.out.println("No on disk encryption detected."); } } catch (IOException ioe) { log.error("Error reading crypto params", ioe); } }
From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorage.java
@Override public long getLastAddConfirmed(long ledgerId) throws IOException { Long lac = ledgerCache.getLastAddConfirmed(ledgerId); if (lac == null) { ByteBuf bb = getEntry(ledgerId, BookieProtocol.LAST_ADD_CONFIRMED); if (null == bb) { return BookieProtocol.INVALID_ENTRY_ID; } else {/*from w w w . j a va 2 s . co m*/ try { bb.skipBytes(2 * Long.BYTES); // skip ledger & entry id lac = bb.readLong(); lac = ledgerCache.updateLastAddConfirmed(ledgerId, lac); } finally { bb.release(); } } } return lac; }
From source file:com.act.lcms.v2.fullindex.Builder.java
protected void extractTriples(Iterator<LCMSSpectrum> iter, List<MZWindow> windows) throws RocksDBException, IOException { /* Warning: this method makes heavy use of ByteBuffers to perform memory efficient collection of values and * conversion of those values into byte arrays that RocksDB can consume. If you haven't already, go read this * tutorial on ByteBuffers: http://mindprod.com/jgloss/bytebuffer.html *// w ww .ja v a 2 s . c om * ByteBuffers are quite low-level structures, and they use some terms you need to watch out for: * capacity: The total number of bytes in the array backing the buffer. Don't write more than this. * position: The next index in the buffer to read or write a byte. Moves with each read or write op. * limit: A mark of where the final byte in the buffer was written. Don't read past this. * The remaining() call is affected by the limit. * mark: Ignore this for now, we don't use it. (We'll always, always read buffers from 0.) * * And here are some methods that we'll use often: * clear: Set position = 0, limit = 0. Pretend the buffer is empty, and is ready for more writes. * flip: Set limit = position, then position = 0. This remembers how many bytes were written to the buffer * (as the current position), and then puts the position at the beginning. * Always call this after the write before a read. * rewind: Set position = 0. Buffer is ready for reading, but unless the limit was set we might now know how * many bytes there are to read. Always call flip() before rewind(). Can rewind many times to re-read * the buffer repeatedly. * remaining: How many bytes do we have left to read? Requires an accurate limit value to avoid garbage bytes. * reset: Don't use this. It uses the mark, which we don't need currently. * * Write/read patterns look like: * buffer.clear(); // Clear out anything already in the buffer. * buffer.put(thing1).put(thing2)... // write a bunch of stuff * buffer.flip(); // Prep for reading. Call *once*! * * while (buffer.hasRemaining()) { buffer.get(); } // Read a bunch of stuff. * buffer.rewind(); // Ready for reading again! * while (buffer.hasRemaining()) { buffer.get(); } // Etc. * buffer.reset(); // Forget what was written previously, buffer is ready for reuse. * * We use byte buffers because they're fast, efficient, and offer incredibly convenient means of serializing a * stream of primitive types to their minimal binary representations. The same operations on objects + object * streams require significantly more CPU cycles, consume more memory, and tend to be brittle (i.e. if a class * definition changes slightly, serialization may break). Since the data we're dealing with is pretty simple, we * opt for the low-level approach. */ /* Because we'll eventually use the window indices to map a mz range to a list of triples that fall within that * range, verify that all of the indices are unique. If they're not, we'll end up overwriting the data in and * corrupting the structure of the index. */ ensureUniqueMZWindowIndices(windows); // For every mz window, allocate a buffer to hold the indices of the triples that fall in that window. ByteBuffer[] mzWindowTripleBuffers = new ByteBuffer[windows.size()]; for (int i = 0; i < mzWindowTripleBuffers.length; i++) { /* Note: the mapping between these buffers and their respective mzWindows is purely positional. Specifically, * mzWindows.get(i).getIndex() != i, but mzWindowTripleBuffers[i] belongs to mzWindows.get(i). We'll map windows * indices to the contents of mzWindowTripleBuffers at the very end of this function. */ mzWindowTripleBuffers[i] = ByteBuffer.allocate(Long.BYTES * 4096); // Start with 4096 longs = 8 pages per window. } // Every TMzI gets an index which we'll use later when we're querying by m/z and time. long counter = -1; // We increment at the top of the loop. // Note: we could also write to an mmapped file and just track pointers, but then we might lose out on compression. // We allocate all the buffers strictly here, as we know how many bytes a long and a triple will take. Then reuse! ByteBuffer counterBuffer = ByteBuffer.allocate(Long.BYTES); ByteBuffer valBuffer = ByteBuffer.allocate(TMzI.BYTES); List<Float> timepoints = new ArrayList<>(2000); // We can be sloppy here, as the count is small. /* We use a sweep-line approach to scanning through the m/z windows so that we can aggregate all intensities in * one pass over the current LCMSSpectrum (this saves us one inner loop in our extraction process). The m/z * values in the LCMSSpectrum become our "critical" or "interesting points" over which we sweep our m/z ranges. * The next window in m/z order is guaranteed to be the next one we want to consider since we address the points * in m/z order as well. As soon as we've passed out of the range of one of our windows, we discard it. It is * valid for a window to be added to and discarded from the working queue in one application of the work loop. */ LinkedList<MZWindow> tbdQueueTemplate = new LinkedList<>(windows); // We can reuse this template to init the sweep. int spectrumCounter = 0; while (iter.hasNext()) { LCMSSpectrum spectrum = iter.next(); float time = spectrum.getTimeVal().floatValue(); // This will record all the m/z + intensity readings that correspond to this timepoint. Exactly sized too! ByteBuffer triplesForThisTime = ByteBuffer.allocate(Long.BYTES * spectrum.getIntensities().size()); // Batch up all the triple writes to reduce the number of times we hit the disk in this loop. // Note: huge success! RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch(); // Initialize the sweep line lists. Windows go follow: tbd -> working -> done (nowhere). LinkedList<MZWindow> workingQueue = new LinkedList<>(); LinkedList<MZWindow> tbdQueue = (LinkedList<MZWindow>) tbdQueueTemplate.clone(); // clone is in the docs, so okay! for (Pair<Double, Double> mzIntensity : spectrum.getIntensities()) { // Very important: increment the counter for every triple. Otherwise we'll overwrite triples = Very Bad (tm). counter++; // Brevity = soul of wit! Double mz = mzIntensity.getLeft(); Double intensity = mzIntensity.getRight(); // Reset the buffers so we end up re-using the few bytes we've allocated. counterBuffer.clear(); // Empty (virtually). counterBuffer.putLong(counter); counterBuffer.flip(); // Prep for reading. valBuffer.clear(); // Empty (virtually). TMzI.writeToByteBuffer(valBuffer, time, mz, intensity.floatValue()); valBuffer.flip(); // Prep for reading. // First, shift any applicable ranges onto the working queue based on their minimum mz. while (!tbdQueue.isEmpty() && tbdQueue.peekFirst().getMin() <= mz) { workingQueue.add(tbdQueue.pop()); } // Next, remove any ranges we've passed. while (!workingQueue.isEmpty() && workingQueue.peekFirst().getMax() < mz) { workingQueue.pop(); // TODO: add() this to a recovery queue which can then become the tbdQueue. Edge cases! } /* In the old indexed trace extractor world, we could bail here if there were no target m/z's in our window set * that matched with the m/z of our current mzIntensity. However, since we're now also recording the links * between timepoints and their (t, m/z, i) triples, we need to keep on keepin' on regardless of whether we have * any m/z windows in the working set right now. */ // The working queue should now hold only ranges that include this m/z value. Sweep line swept! /* Now add this intensity to the buffers of all the windows in the working queue. Note that since we're only * storing the *index* of the triple, these buffers are going to consume less space than they would if we * stored everything together. */ for (MZWindow window : workingQueue) { // TODO: count the number of times we add intensities to each window's accumulator for MS1-style warnings. counterBuffer.rewind(); // Already flipped. mzWindowTripleBuffers[window.getIndex()] = // Must assign when calling appendOrRealloc. Utils.appendOrRealloc(mzWindowTripleBuffers[window.getIndex()], counterBuffer); } // We flipped after reading, so we should be good to rewind (to be safe) and write here. counterBuffer.rewind(); valBuffer.rewind(); writeBatch.put(ColumnFamilies.ID_TO_TRIPLE, Utils.toCompactArray(counterBuffer), Utils.toCompactArray(valBuffer)); // Rewind again for another read. counterBuffer.rewind(); triplesForThisTime.put(counterBuffer); } writeBatch.write(); assert (triplesForThisTime.position() == triplesForThisTime.capacity()); ByteBuffer timeBuffer = ByteBuffer.allocate(Float.BYTES).putFloat(time); timeBuffer.flip(); // Prep both bufers for reading so they can be written to the DB. triplesForThisTime.flip(); dbAndHandles.put(ColumnFamilies.TIMEPOINT_TO_TRIPLES, Utils.toCompactArray(timeBuffer), Utils.toCompactArray(triplesForThisTime)); timepoints.add(time); spectrumCounter++; if (spectrumCounter % 1000 == 0) { LOGGER.info("Extracted %d time spectra", spectrumCounter); } } LOGGER.info("Extracted %d total time spectra", spectrumCounter); // Now write all the mzWindow to triple indexes. RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch(); ByteBuffer idBuffer = ByteBuffer.allocate(Integer.BYTES); for (int i = 0; i < mzWindowTripleBuffers.length; i++) { idBuffer.clear(); idBuffer.putInt(windows.get(i).getIndex()); idBuffer.flip(); ByteBuffer triplesBuffer = mzWindowTripleBuffers[i]; triplesBuffer.flip(); // Prep for read. writeBatch.put(ColumnFamilies.WINDOW_ID_TO_TRIPLES, Utils.toCompactArray(idBuffer), Utils.toCompactArray(triplesBuffer)); } writeBatch.write(); dbAndHandles.put(ColumnFamilies.TIMEPOINTS, TIMEPOINTS_KEY, Utils.floatListToByteArray(timepoints)); dbAndHandles.flush(true); }
From source file:com.act.lcms.v2.fullindex.Searcher.java
/** * Searches an LCMS index for all (time, m/z, intensity) triples within some time and m/z ranges. * * Note that this method is very much a first-draft/WIP. There are many opportunities for optimization and * improvement here, but this works as an initial attempt. This method is littered with TODOs, which once TODone * should make this a near optimal method of searching through LCMS readings. * * @param mzRange The range of m/z values for which to search. * @param timeRange The time range for which to search. * @return A list of (time, m/z, intensity) triples that fall within the specified ranges. * @throws RocksDBException//w w w.j a v a2 s . c om * @throws ClassNotFoundException * @throws IOException */ public List<TMzI> searchIndexInRange(Pair<Double, Double> mzRange, Pair<Double, Double> timeRange) throws RocksDBException, ClassNotFoundException, IOException { // TODO: gracefully handle the case when only range is specified. // TODO: consider producing some sort of query plan structure that can be used for optimization/explanation. DateTime start = DateTime.now(); /* Demote the time range to floats, as we know that that's how we stored times in the DB. This tight coupling would * normally be a bad thing, but given that this class is joined at the hip with Builder necessarily, it * doesn't seem like a terrible thing at the moment. */ Pair<Float, Float> tRangeF = // My kingdom for a functor! Pair.of(timeRange.getLeft().floatValue(), timeRange.getRight().floatValue()); LOGGER.info("Running search for %.6f <= t <= %.6f, %.6f <= m/z <= %.6f", tRangeF.getLeft(), tRangeF.getRight(), mzRange.getLeft(), mzRange.getRight()); // TODO: short circuit these filters. The first failure after success => no more possible hits. List<Float> timesInRange = timepointsInRange(tRangeF); byte[][] timeIndexBytes = extractValueBytes(ColumnFamilies.TIMEPOINT_TO_TRIPLES, timesInRange, Float.BYTES, ByteBuffer::putFloat); // TODO: bail if all the timeIndexBytes lengths are zero. List<MZWindow> mzWindowsInRange = mzWindowsInRange(mzRange); byte[][] mzIndexBytes = extractValueBytes(ColumnFamilies.WINDOW_ID_TO_TRIPLES, mzWindowsInRange, Integer.BYTES, (buff, mz) -> buff.putInt(mz.getIndex())); // TODO: bail if all the mzIndexBytes are zero. /* TODO: if the number of entries in one range is significantly smaller than the other (like an order of magnitude * or more, skip extraction of the other set of ids and just filter at the end. This will be especially helpful * when the number of ids in the m/z domain is small, as each time point will probably have >10k ids. */ LOGGER.info("Found/loaded %d matching time ranges, %d matching m/z ranges", timesInRange.size(), mzWindowsInRange.size()); // TODO: there is no need to union the time indices since they are necessarily distinct. Just concatenate instead. Set<Long> unionTimeIds = unionIdBuffers(timeIndexBytes); Set<Long> unionMzIds = unionIdBuffers(mzIndexBytes); // TODO: handle the case where one of the sets is empty specially. Either keep all in the other set or drop all. // TODO: we might be able to do this faster by intersecting two sorted lists. Set<Long> intersectionIds = new HashSet<>(unionTimeIds); /* TODO: this is effectively a hash join, which isn't optimal for sets of wildly different cardinalities. * Consider using sort-merge join instead, which will reduce the object overhead (by a lot) and allow us to pass * over the union of the ids from each range just once when joining them. Additionally, just skip this whole step * and filter at the end if one of the set's sizes is less than 1k or so and the other is large. */ intersectionIds.retainAll(unionMzIds); LOGGER.info("Id intersection results: t = %d, mz = %d, t ^ mz = %d", unionTimeIds.size(), unionMzIds.size(), intersectionIds.size()); List<Long> idsToFetch = new ArrayList<>(intersectionIds); Collections.sort(idsToFetch); // Sort ids so we retrieve them in an order that exploits index locality. LOGGER.info("Collecting TMzI triples"); // Collect all the triples for the ids we extracted. // TODO: don't manifest all the bytes: just create a stream of results from the cursor to reduce memory overhead. List<TMzI> results = new ArrayList<>(idsToFetch.size()); byte[][] resultBytes = extractValueBytes(ColumnFamilies.ID_TO_TRIPLE, idsToFetch, Long.BYTES, ByteBuffer::putLong); for (byte[] tmziBytes : resultBytes) { results.add(TMzI.readNextFromByteBuffer(ByteBuffer.wrap(tmziBytes))); } // TODO: do this filtering inline with the extraction. We shouldn't have to load all the triples before filtering. LOGGER.info("Performing final filtering"); int preFilterTMzICount = results.size(); results = results.stream() .filter(tmzi -> tmzi.getTime() >= tRangeF.getLeft() && tmzi.getTime() <= tRangeF.getRight() && tmzi.getMz() >= mzRange.getLeft() && tmzi.getMz() <= mzRange.getRight()) .collect(Collectors.toList()); LOGGER.info("Precise filtering results: %d -> %d", preFilterTMzICount, results.size()); DateTime end = DateTime.now(); LOGGER.info("Search completed in %dms", end.getMillis() - start.getMillis()); // TODO: return a stream instead that can load the triples lazily. return results; }
From source file:org.apache.bookkeeper.statelib.impl.mvcc.MVCCStoreImpl.java
private IncrementResult<K, V> increment(long revision, WriteBatch batch, IncrementOp<K, V> op) { // parameters final K key = op.key(); final long amount = op.amount(); // raw key/*w w w. ja v a 2 s . c o m*/ final byte[] rawKey = keyCoder.encode(key); MVCCRecord record; try { record = getKeyRecord(key, rawKey); } catch (StateStoreRuntimeException e) { throw e; } // result final IncrementResultImpl<K, V> result = resultFactory.newIncrementResult(revision); try { long oldAmount = 0L; if (null != record) { // validate the update revision before applying the update to the record if (record.compareModRev(revision) >= 0) { result.code(Code.SMALLER_REVISION); return result; } if (ValueType.NUMBER != record.getValueType()) { result.code(Code.ILLEGAL_OP); return result; } record.setVersion(record.getVersion() + 1); oldAmount = record.getValue().getLong(0); } else { record = MVCCRecord.newRecord(); record.setCreateRev(revision); record.setVersion(0L); record.setValue(PooledByteBufAllocator.DEFAULT.buffer(Long.BYTES), ValueType.NUMBER); } long newAmount = oldAmount + amount; record.getValue().writerIndex(0); record.getValue().writeLong(newAmount); record.setModRev(revision); // write the mvcc record back batch.put(dataCfHandle, rawKey, recordCoder.encode(record)); // finalize the result result.code(Code.OK); if (op.option().getTotal()) { result.totalAmount(newAmount); } return result; } catch (RocksDBException rde) { result.close(); throw new StateStoreRuntimeException(rde); } catch (StateStoreRuntimeException e) { result.close(); throw e; } finally { if (null != record) { record.recycle(); } } }
From source file:net.dv8tion.jda.audio.AudioWebSocket.java
private void setupUdpKeepAliveThread() { udpKeepAliveThread = new Thread("AudioWebSocket UDP-KeepAlive Guild: " + guild.getId()) { @Override// w w w. j a v a 2 s . co m public void run() { while (socket.isOpen() && !udpSocket.isClosed() && !this.isInterrupted()) { long seq = 0; try { ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES + 1); buffer.put((byte) 0xC9); buffer.putLong(seq); DatagramPacket keepAlivePacket = new DatagramPacket(buffer.array(), buffer.array().length, address); udpSocket.send(keepAlivePacket); Thread.sleep(5000); //Wait 5 seconds to send next keepAlivePacket. } catch (NoRouteToHostException e) { LOG.warn("Closing AudioConnection due to inability to ping audio packets."); LOG.warn("Cannot send audio packet because JDA navigate the route to Discord.\n" + "Are you sure you have internet connection? It is likely that you've lost connection."); AudioWebSocket.this.close(true, -1); break; } catch (IOException e) { LOG.log(e); } catch (InterruptedException e) { //We were asked to close. // e.printStackTrace(); } } } }; udpKeepAliveThread.setPriority(Thread.NORM_PRIORITY + 1); udpKeepAliveThread.setDaemon(true); udpKeepAliveThread.start(); }
From source file:org.apache.airavata.gfac.core.GFacUtils.java
public static byte[] longToBytes(long x) { ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); buffer.putLong(x);//from w w w . j a va 2 s . com return buffer.array(); }