Example usage for com.google.common.primitives Longs BYTES

List of usage examples for com.google.common.primitives Longs BYTES

Introduction

In this page you can find the example usage for com.google.common.primitives Longs BYTES.

Prototype

int BYTES

To view the source code for com.google.common.primitives Longs BYTES.

Click Source Link

Document

The number of bytes required to represent a primitive long value.

Usage

From source file:org.jclouds.kinetic.strategy.internal.KineticStorageStrategyImpl.java

private void writeCommonMetadataAttr(UserDefinedFileAttributeView view, Blob blob) throws IOException {
    ContentMetadata metadata = blob.getMetadata().getContentMetadata();
    writeStringAttributeIfPresent(view, XATTR_CACHE_CONTROL, metadata.getCacheControl());
    writeStringAttributeIfPresent(view, XATTR_CONTENT_DISPOSITION, metadata.getContentDisposition());
    writeStringAttributeIfPresent(view, XATTR_CONTENT_ENCODING, metadata.getContentEncoding());
    writeStringAttributeIfPresent(view, XATTR_CONTENT_LANGUAGE, metadata.getContentLanguage());
    writeStringAttributeIfPresent(view, XATTR_CONTENT_TYPE, metadata.getContentType());
    Date expires = metadata.getExpires();
    if (expires != null) {
        ByteBuffer buf = ByteBuffer.allocate(Longs.BYTES).putLong(expires.getTime());
        buf.flip();//from ww w.java2s.  com
        view.write(XATTR_EXPIRES, buf);
    }
    for (Map.Entry<String, String> entry : blob.getMetadata().getUserMetadata().entrySet()) {
        writeStringAttributeIfPresent(view, XATTR_USER_METADATA_PREFIX + entry.getKey(), entry.getValue());
    }
}

From source file:co.cask.tigon.data.transaction.queue.hbase.HBaseQueueAdmin.java

private byte[] decodeGroupInfo(Map<Long, Integer> groupInfo, Map<byte[], byte[]> columns,
        Map<Long, Integer> oldGroupInfo) {
    byte[] smallest = null;

    for (Map.Entry<byte[], byte[]> entry : columns.entrySet()) {
        // Consumer state column is named as "<groupId><instanceId>"
        long groupId = Bytes.toLong(entry.getKey());

        // Map key is sorted by groupId then instanceId, hence keep putting the instance + 1 will gives the group size.
        oldGroupInfo.put(groupId, Bytes.toInt(entry.getKey(), Longs.BYTES) + 1);

        // Update smallest if the group still exists from the new groups.
        if (groupInfo.containsKey(groupId)
                && (smallest == null || Bytes.BYTES_COMPARATOR.compare(entry.getValue(), smallest) < 0)) {
            smallest = entry.getValue();
        }//w  ww  .  j  a v  a  2s  .c o  m
    }
    return smallest;
}

From source file:co.cask.cdap.data2.transaction.stream.AbstractStreamFileConsumer.java

/**
 * Returns the initial scanned states for the given entry key.
 *
 * Conceptually scan will perform from the given entry key till the end of entry represented
 * by that stream file (i.e. offset = Long.MAX_VALUE) as indicated by the row prefix (row prefix uniquely identify
 * the stream file).// w w w . j a va  2s  .  c  o  m
 * However, due to memory limit, scanning is done progressively until it sees an entry with state value
 * written with transaction write pointer later than the this consumer starts.
 *
 * @param row the entry row key.
 */
private SortedMap<byte[], byte[]> getInitRowStates(byte[] row) throws IOException {
    SortedMap<byte[], byte[]> rowStates = entryStates.get(row);

    if (rowStates != null) {
        // If scan is completed for this row prefix, simply return the cached entries.
        // Or if the cached states is beyond current row, just return as the caller only use the cached state to do
        // point lookup.
        if (entryStatesScanCompleted.contains(row) || !rowStates.tailMap(row).isEmpty()) {
            return rowStates;
        }
    } else {
        rowStates = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
        entryStates.put(row, rowStates);
    }

    // Scan from the given row till to max file offset
    // Last 8 bytes are the file offset, make it max value so that it scans till last offset.
    byte[] stopRow = Arrays.copyOf(row, row.length);
    Bytes.putLong(stopRow, stopRow.length - Longs.BYTES, Long.MAX_VALUE);

    StateScanner scanner = scanStates(row, stopRow);
    try {
        // Scan until MAX_SCAN_ROWS or exhausted the scanner
        int rowCached = 0;
        while (scanner.nextStateRow() && rowCached < MAX_SCAN_ROWS) {
            if (storeInitState(scanner.getRow(), scanner.getState(), rowStates)) {
                rowCached++;
            }
        }

        // If no row is cached, no need to scan again, as they'll be inserted after this consumer starts
        if (rowCached == 0) {
            entryStatesScanCompleted.add(row);
        }
    } finally {
        scanner.close();
    }
    return rowStates;
}

From source file:co.cask.cdap.data2.transaction.queue.hbase.HBaseConsumerStateStore.java

/**
 * Returns the column qualifier for the consumer state column. The qualifier is formed by
 * {@code <groupId><instanceId>}.
 * @param groupId Group ID of the consumer
 * @param instanceId Instance ID of the consumer
 * @return A new byte[] which is the column qualifier.
 *///from   w ww  . j a v  a2s.  co m
private byte[] getConsumerStateColumn(long groupId, int instanceId) {
    byte[] column = new byte[Longs.BYTES + Ints.BYTES];
    Bytes.putLong(column, 0, groupId);
    Bytes.putInt(column, Longs.BYTES, instanceId);
    return column;
}

From source file:co.cask.cdap.data2.transaction.stream.AbstractStreamFileConsumer.java

/**
 * Determines if need to cache initial entry states.
 *
 * @param row Entry row key//w  w  w. ja va 2  s . co  m
 * @param stateValue Entry state value
 * @param cache The cache to fill it if the row key and state value needs to be cached.
 * @return {@code true} if the entry is stored into cache, {@code false} if the entry is not stored.
 */
private boolean storeInitState(byte[] row, byte[] stateValue, Map<byte[], byte[]> cache) {
    // Logic is adpated from QueueEntryRow.canConsume(), with modification.

    if (stateValue == null) {
        // State value shouldn't be null, as the row is only written with state value.
        return false;
    }

    long offset = Bytes.toLong(row, row.length - Longs.BYTES);
    long stateWritePointer = QueueEntryRow.getStateWritePointer(stateValue);

    // If the entry offset is not accepted by the read filter, this consumer won't see this entry in future read.
    // If it is written after the current transaction, it happens with the current consumer config.
    // In both cases, no need to cache
    if (!readFilter.acceptOffset(offset) || stateWritePointer >= transaction.getWritePointer()) {
        return false;
    }

    // If state is PROCESSED and committed, need to memorize it so that it can be skipped.
    ConsumerEntryState state = QueueEntryRow.getState(stateValue);
    if (state == ConsumerEntryState.PROCESSED && transaction.isVisible(stateWritePointer)) {
        // No need to store the state value.
        cache.put(row, null);
        return true;
    }

    // Special case for FIFO.
    // For group size > 1 case, if the state is not committed, need to memorize current state value for claim entry.
    if (consumerConfig.getDequeueStrategy() == DequeueStrategy.FIFO && consumerConfig.getGroupSize() > 1) {
        int stateInstanceId = QueueEntryRow.getStateInstanceId(stateValue);

        // If the state was written by a consumer that is still live, and not by itself,
        // record the state value as null so that it'll get skipped in the claim entry logic.
        if (stateInstanceId < consumerConfig.getGroupSize()
                && stateInstanceId != consumerConfig.getInstanceId()) {
            cache.put(row, null);
        } else {
            // Otherwise memorize the value for checkAndPut operation in claim entry.
            cache.put(row, stateValue);
        }
        return true;
    }

    return false;
}

From source file:net.bither.bitherj.utils.Utils.java

public static long parseLongFromAddress(InetAddress address) {
    byte[] bytes = address.getAddress();
    if (bytes.length >= Longs.BYTES) {
        return Longs.fromByteArray(bytes);
    } else {//from w  w w  .ja  v  a2 s. c  o  m
        return Ints.fromByteArray(bytes);
    }
}

From source file:io.druid.query.aggregation.histogram.ApproximateHistogram.java

public int getDenseStorageSize() {
    return Ints.BYTES * 2 + Floats.BYTES * size + Longs.BYTES * size + Floats.BYTES * 2;
}

From source file:io.druid.query.aggregation.histogram.ApproximateHistogram.java

public int getSparseStorageSize() {
    return Ints.BYTES * 2 + Floats.BYTES * binCount + Longs.BYTES * binCount + Floats.BYTES * 2;
}

From source file:io.druid.query.aggregation.histogram.ApproximateHistogram.java

/**
 * Writes the dense representation of this ApproximateHistogram object to the given byte-buffer
 * //  w ww.ja  va  2  s. co m
 * Requires 16 + 12 * size bytes of storage
 *
 * @param buf ByteBuffer to write the ApproximateHistogram to
 */
public void toBytesDense(ByteBuffer buf) {
    buf.putInt(size);
    buf.putInt(binCount);

    buf.asFloatBuffer().put(positions);
    buf.position(buf.position() + Floats.BYTES * positions.length);
    buf.asLongBuffer().put(bins);
    buf.position(buf.position() + Longs.BYTES * bins.length);

    buf.putFloat(min);
    buf.putFloat(max);
}

From source file:io.druid.query.aggregation.histogram.ApproximateHistogram.java

/**
 * Constructs an ApproximateHistogram object from the given dense byte-buffer representation
 *
 * @param buf ByteBuffer to construct an ApproximateHistogram from
 *
 * @return ApproximateHistogram constructed from the given ByteBuffer
 *//*from   w w  w  .  j ava2s  .  c  o m*/
public static ApproximateHistogram fromBytesDense(ByteBuffer buf) {
    int size = buf.getInt();
    int binCount = buf.getInt();

    float[] positions = new float[size];
    long[] bins = new long[size];

    buf.asFloatBuffer().get(positions);
    buf.position(buf.position() + Floats.BYTES * positions.length);
    buf.asLongBuffer().get(bins);
    buf.position(buf.position() + Longs.BYTES * bins.length);

    float min = buf.getFloat();
    float max = buf.getFloat();

    return new ApproximateHistogram(binCount, positions, bins, min, max);
}