Example usage for com.google.common.primitives Longs toByteArray

List of usage examples for com.google.common.primitives Longs toByteArray

Introduction

In this page you can find the example usage for com.google.common.primitives Longs toByteArray.

Prototype

public static byte[] toByteArray(long value) 

Source Link

Document

Returns a big-endian representation of value in an 8-element byte array; equivalent to ByteBuffer.allocate(8).putLong(value).array() .

Usage

From source file:com.google.template.soy.passes.MsgIdFunctionPass.java

private static String formatMsgId(long id) {
    return BaseEncoding.base64Url().encode(Longs.toByteArray(id));
}

From source file:org.echocat.marquardt.common.domain.certificate.Certificate.java

private void serializeTo(@Nonnull @WillNotClose final OutputStream out) throws IOException {
    out.write(VERSION);/*from w  w  w  .  ja v a  2  s  .c  o  m*/
    new PublicKeyWithMechanism(_issuerPublicKey).writeTo(out);
    new PublicKeyWithMechanism(_clientPublicKey).writeTo(out);
    out.write(Longs.toByteArray(_expiresAt.getTime()));
    out.write(Longs.toByteArray(RolesSerializer.from(_roles)));
    _payload.writeTo(out);
}

From source file:co.paralleluniverse.galaxy.berkeleydb.BerkeleyDB.java

@Override
public short casOwner(long id, short oldNode, short newNode) {
    final DatabaseEntry key = new DatabaseEntry(Longs.toByteArray(id));
    final DatabaseEntry value = new DatabaseEntry();

    final Transaction txn = env.beginTransaction(null, null);
    try {//from w  w w  . j a va2 s.  c  om
        OperationStatus status;

        value.setData(Shorts.toByteArray(newNode));
        if (oldNode < 0) {
            status = ownerDirectory.putNoOverwrite(txn, key, value);
            if (status == OperationStatus.SUCCESS) {
                LOG.debug("CAS owner succeeded.");
                txn.commit();
                return newNode;
            }
        }

        status = ownerDirectory.get(txn, key, value, LockMode.RMW);
        if (status == OperationStatus.SUCCESS) {
            final short curOldNode = Shorts.fromByteArray(value.getData());
            if (LOG.isDebugEnabled())
                LOG.debug("CAS owner of {}: current old node: {} wanted old node: {}",
                        new Object[] { hex(id), curOldNode, oldNode });
            if (oldNode != curOldNode) {
                assert curOldNode >= 0;
                LOG.debug("CAS owner failed.");
                txn.commit();
                return curOldNode;
            }

            LOG.debug("CAS owner succeeded.");
            value.setData(Shorts.toByteArray(newNode));
            ownerDirectory.put(txn, key, value);
            txn.commit();
            return newNode;
        } else if (status == OperationStatus.NOTFOUND) {
            LOG.debug("CAS owner failed.");
            txn.commit();
            return (short) -1;
        }

        LOG.debug("Bad status: {}", status);
        throw new AssertionError();
    } catch (Exception e) {
        LOG.error("Exception during DB operation. Aborting transaction.", e);
        txn.abort();
        throw Throwables.propagate(e);
    }
}

From source file:org.voltdb.sysprocs.saverestore.CSVSnapshotWritePlan.java

static private List<Long> computeDedupedLocalSites(long txnId, SiteTracker tracker) {
    MessageDigest digest;/*from   www  .  j av  a2  s .  com*/
    try {
        digest = MessageDigest.getInstance("SHA-1");
    } catch (NoSuchAlgorithmException e) {
        throw new AssertionError(e);
    }

    /*
     * List of partitions to include if this snapshot is
     * going to be deduped. Attempts to break up the work
     * by seeding and RNG selecting
     * a random replica to do the work. Will not work in failure
     * cases, but we don't use dedupe when we want durability.
     *
     * Originally used the partition id as the seed, but it turns out
     * that nextInt(2) returns a 1 for seeds 0-4095. Now use SHA-1
     * on the txnid + partition id.
     */
    List<Long> sitesToInclude = new ArrayList<Long>();
    for (long localSite : tracker.getLocalSites()) {
        final int partitionId = tracker.getPartitionForSite(localSite);
        List<Long> sites = new ArrayList<Long>(
                tracker.getSitesForPartition(tracker.getPartitionForSite(localSite)));
        Collections.sort(sites);

        digest.update(Longs.toByteArray(txnId));
        final long seed = Longs.fromByteArray(Arrays.copyOf(digest.digest(Ints.toByteArray(partitionId)), 8));

        int siteIndex = new java.util.Random(seed).nextInt(sites.size());
        if (localSite == sites.get(siteIndex)) {
            sitesToInclude.add(localSite);
        }
    }
    return sitesToInclude;
}

From source file:dti.tdl.messaging.TDLMessageHandler.java

public static void constructFrame(TDLMessage message) {
    byte[] start = { (byte) 1 };
    byte[] startMsg = { (byte) 2 };
    byte[] endMsg = { (byte) 3 };
    byte[] end = { (byte) 4 };
    byte[] profile = message.getProfileId().substring(0, 4).getBytes(); //4 bytes profile id
    byte[] msgType = { message.getMsgType() }; // 1 byte message type
    byte[] from = hexStringToByteArray(message.getFromId());
    byte[] to = hexStringToByteArray(message.getToId());
    byte[] data = message.getMsg();
    int numBlk = 1;

    if (data.length > TDLMessageHandler.messageMaxBytes) {
        numBlk = (int) (data.length / TDLMessageHandler.messageMaxBytes);
        if (data.length % TDLMessageHandler.messageMaxBytes > 0) {
            numBlk++;//w w  w  .ja v a  2s  .  co m
        }

    }
    //String utf8msg = new String()
    long checksum = CRC32Checksum(data);

    byte[] checksumBytes = Longs.toByteArray(checksum);
    System.out.println("checksum len = " + checksumBytes.length);
    String checksumStr = "Checksum: ";
    for (int j = 0; j < checksumBytes.length; j++) {
        checksumStr = checksumStr + " " + (int) checksumBytes[j];

    }
    System.out.println(checksumStr);
    byte[] frame = null;
    int msgLength = data.length;
    int msgIdx = 0;
    for (int i = 0; i < numBlk; i++) {
        if (msgLength > TDLMessageHandler.messageMaxBytes) {
            msgLength = (int) (msgLength - TDLMessageHandler.messageMaxBytes);
        }
        int msgBlkLength = (int) TDLMessageHandler.messageMaxBytes;
        if (i == numBlk - 1) {
            msgBlkLength = msgLength;
        }
        msgIdx = (int) (TDLMessageHandler.messageMaxBytes * i);
        frame = new byte[start.length + 1 + from.length + to.length + profile.length + checksumBytes.length
                + startMsg.length + msgBlkLength + endMsg.length + end.length];
        System.arraycopy(start, 0, frame, 0, 1);
        System.arraycopy(msgType, 0, frame, 1, 1);
        System.arraycopy(from, 0, frame, 2, from.length);
        System.arraycopy(to, 0, frame, 2 + from.length, to.length);
        System.arraycopy(profile, 0, frame, 2 + from.length + to.length, profile.length);
        System.arraycopy(checksumBytes, 0, frame, 2 + from.length + to.length + profile.length,
                checksumBytes.length);
        System.arraycopy(startMsg, 0, frame,
                2 + from.length + to.length + profile.length + checksumBytes.length, startMsg.length);
        System.arraycopy(Arrays.copyOfRange(data, msgIdx, msgIdx + msgBlkLength), 0, frame,
                2 + from.length + to.length + profile.length + checksumBytes.length + startMsg.length,
                msgBlkLength);
        System.arraycopy(endMsg, 0, frame, 2 + from.length + to.length + profile.length + checksumBytes.length
                + startMsg.length + msgBlkLength, endMsg.length);
        System.arraycopy(end, 0, frame, 2 + from.length + to.length + profile.length + checksumBytes.length
                + startMsg.length + msgBlkLength + endMsg.length, end.length);

        String txMsg = null;
        StringBuilder builder = new StringBuilder();

        for (int j = 0; j < frame.length; j++) {
            if (j < frame.length - 1) {
                builder.append((int) frame[j] + ",");
            } else {
                builder.append((int) frame[j]);
            }
        }
        txMsg = builder.toString();

        String[] txBytesStrArray = txMsg.split(",");
        byte[] txBytes = new byte[txBytesStrArray.length];
        //String frameMsg = "Frame content (before): ";
        for (int j = 0; j < txBytesStrArray.length; j++) {
            int byteInt = Integer.parseInt(txBytesStrArray[j]);
            //frameMsg = frameMsg+" "+byteInt;
            txBytes[j] = (byte) byteInt;
        }
        //System.out.println(frameMsg);
        txStack.add(txMsg);
    }
}

From source file:org.dcache.pool.repository.ceph.CephFileStore.java

@Override
public URI create(PnfsId id) throws IOException {
    String imageName = toImageName(id);
    try {//w  w w.  j av  a2 s  .c  o m
        rbd.create(imageName, 0);
        ctx.setXattr(toObjName(imageName), CREATION_TIME_ATTR, Longs.toByteArray(System.currentTimeMillis()));
    } catch (RadosException e) {
        throwIfMappable(e, "Failed to create file: " + imageName);
        throw e;
    }
    return toUri(imageName);
}

From source file:com.facebook.buck.cxx.platform.ObjectFileScrubbers.java

public static void putLittleEndianLong(ByteBuffer buffer, long value) {
    byte[] bytes = Longs.toByteArray(value);
    byte[] flipped = { bytes[7], bytes[6], bytes[5], bytes[4], bytes[3], bytes[2], bytes[1], bytes[0] };
    buffer.put(flipped);// www .j  a v a  2  s. c  o  m
}

From source file:org.opendedup.mtools.ClusterRedundancyCheck.java

private void checkDedupFile(File mapFile) throws IOException {
    if (SDFSLogger.isDebug())
        SDFSLogger.getLog().debug("Cluster check " + mapFile.getPath());
    LongByteArrayMap mp = LongByteArrayMap
            .getMap(mapFile.getName().substring(0, mapFile.getName().length() - 4));
    long prevpos = 0;
    try {/*from  w ww.  ja v a  2 s .com*/
        ArrayList<SparseDataChunk> chunks = new ArrayList<SparseDataChunk>(MAX_BATCH_SIZE);
        byte[] val = new byte[0];
        mp.iterInit();
        SparseDataChunk ck = mp.nextValue(false);
        long corruptBlocks = 0;
        while (val != null) {
            fEvt.curCt += (mp.getIterPos() - prevpos);
            prevpos = mp.getIterPos();
            ck.setFpos((prevpos / mp.getFree().length) * Main.CHUNK_LENGTH);
            HashLocPair p = ck.getFingers().get(0);
            if (Main.chunkStoreLocal) {
                byte[] exists = Longs.toByteArray(HCServiceProxy.hashExists(p.hash, true));

                if (exists[0] == -1) {
                    if (SDFSLogger.isDebug())
                        SDFSLogger.getLog().debug(
                                "file [" + mapFile + "] could not find " + StringUtils.getHexString(p.hash));
                    corruptBlocks++;
                } else {
                    byte[] currenthl = p.hash;
                    exists[0] = currenthl[0];
                    try {
                        int ncopies = 0;
                        for (int i = 1; i < 8; i++) {
                            if (exists[i] > (byte) 0) {
                                ncopies++;
                            }
                        }
                        if (ncopies < Main.volume.getClusterCopies()
                                && ncopies < HCServiceProxy.cs.getStorageNodes().size()) {
                            byte[] nb = HCServiceProxy.fetchChunk(p.hash, exists, false);
                            exists = HCServiceProxy.writeChunk(p.hash, nb, exists).getHashLocs();
                            ncopies = 0;
                            for (int i = 1; i < 8; i++) {
                                if (exists[i] > (byte) 0) {
                                    ncopies++;
                                }
                            }
                            if (ncopies >= Main.volume.getClusterCopies()) {
                                this.newRendundantBlocks++;
                            } else
                                this.failedRendundantBlocks++;

                        } else if (ncopies < Main.volume.getClusterCopies()
                                && ncopies >= HCServiceProxy.cs.getStorageNodes().size()) {
                            this.failedRendundantBlocks++;
                        }
                        exists[0] = currenthl[0];

                        if (!brequals(currenthl, exists)) {
                            p.hashloc = exists;
                        }
                        mp.put(ck.getFpos(), ck);
                    } catch (IOException e) {
                        this.failedRendundantBlocks++;
                    }

                }

            } else {
                chunks.add(ck);
                if (chunks.size() >= MAX_BATCH_SIZE) {
                    corruptBlocks += batchCheck(chunks, mp);
                    chunks = new ArrayList<SparseDataChunk>(MAX_BATCH_SIZE);
                }
            }
            ck = mp.nextValue(false);

        }

        if (chunks.size() > 0) {
            corruptBlocks += batchCheck(chunks, mp);
        }
        if (corruptBlocks > 0) {
            this.corruptFiles++;
            SDFSLogger.getLog().info("************** map file " + mapFile.getPath() + " is suspect, ["
                    + corruptBlocks + "] missing blocks found.***************");
        }
    } catch (Exception e) {
        if (SDFSLogger.isDebug())
            SDFSLogger.getLog().debug("error while checking file [" + mapFile.getPath() + "]", e);
        throw new IOException(e);
    } finally {
        mp.close();
        mp = null;
    }
    this.files++;
}

From source file:com.indeed.lsmtree.recordcache.MemcachedCache.java

public boolean checkAvailability(String key) {
    long time = System.nanoTime();
    key += "-" + UUID.randomUUID().toString();
    OperationFuture<Boolean> future = memcache.set(key, CACHE_EXPIRY_SECONDS, Longs.toByteArray(time),
            identityTranscoder);/*  w w  w. j a va 2 s .  co m*/
    try {
        if (!future.get())
            return false;
    } catch (Exception e) {
        return false;
    }
    byte[] bytes = memcache.get(key, identityTranscoder);
    memcache.delete(key);
    return bytes != null && Longs.fromByteArray(bytes) == time;
}

From source file:spimedb.SpimeDB.java

public static byte[] uuidBytes() {
    return ArrayUtils.addAll(Longs.toByteArray(rng.nextLong()), Longs.toByteArray(rng.nextLong()));
}