Example usage for java.util UUID UUID

List of usage examples for java.util UUID UUID

Introduction

In this page you can find the example usage for java.util UUID UUID.

Prototype

public UUID(long mostSigBits, long leastSigBits) 

Source Link

Document

Constructs a new UUID using the specified data.

Usage

From source file:org.apache.hadoop.hbase.regionserver.wal.HLogKey.java

public void readFieldsFromPb(WALKey walKey, WALCellCodec.ByteStringUncompressor uncompressor)
        throws IOException {
    if (this.compressionContext != null) {
        this.encodedRegionName = uncompressor.uncompress(walKey.getEncodedRegionName(),
                compressionContext.regionDict);
        byte[] tablenameBytes = uncompressor.uncompress(walKey.getTableName(), compressionContext.tableDict);
        this.tablename = TableName.valueOf(tablenameBytes);
    } else {/*  w w w . ja  va  2s  . co m*/
        this.encodedRegionName = walKey.getEncodedRegionName().toByteArray();
        this.tablename = TableName.valueOf(walKey.getTableName().toByteArray());
    }
    clusterIds.clear();
    if (walKey.hasClusterId()) {
        //When we are reading the older log (0.95.1 release)
        //This is definitely the originating cluster
        clusterIds
                .add(new UUID(walKey.getClusterId().getMostSigBits(), walKey.getClusterId().getLeastSigBits()));
    }
    for (HBaseProtos.UUID clusterId : walKey.getClusterIdsList()) {
        clusterIds.add(new UUID(clusterId.getMostSigBits(), clusterId.getLeastSigBits()));
    }
    if (walKey.hasNonceGroup()) {
        this.nonceGroup = walKey.getNonceGroup();
    }
    if (walKey.hasNonce()) {
        this.nonce = walKey.getNonce();
    }
    this.scopes = null;
    if (walKey.getScopesCount() > 0) {
        this.scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
        for (FamilyScope scope : walKey.getScopesList()) {
            byte[] family = (compressionContext == null) ? scope.getFamily().toByteArray()
                    : uncompressor.uncompress(scope.getFamily(), compressionContext.familyDict);
            this.scopes.put(family, scope.getScopeType().getNumber());
        }
    }
    this.logSeqNum = walKey.getLogSequenceNumber();
    this.writeTime = walKey.getWriteTime();
}

From source file:org.apache.hadoop.hbase.wal.WALSplitUtil.java

/**
 * This function is used to construct mutations from a WALEntry. It also reconstructs WALKey &amp;
 * WALEdit from the passed in WALEntry// w  w  w . j a v a  2  s  . c  o  m
 * @param entry
 * @param cells
 * @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
 *          extracted from the passed in WALEntry.
 * @return list of Pair&lt;MutationType, Mutation&gt; to be replayed
 * @throws IOException
 */
public static List<MutationReplay> getMutationsFromWALEntry(AdminProtos.WALEntry entry, CellScanner cells,
        Pair<WALKey, WALEdit> logEntry, Durability durability) throws IOException {
    if (entry == null) {
        // return an empty array
        return Collections.emptyList();
    }

    long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber()
            : entry.getKey().getLogSequenceNumber();
    int count = entry.getAssociatedCellCount();
    List<MutationReplay> mutations = new ArrayList<>();
    Cell previousCell = null;
    Mutation m = null;
    WALKeyImpl key = null;
    WALEdit val = null;
    if (logEntry != null) {
        val = new WALEdit();
    }

    for (int i = 0; i < count; i++) {
        // Throw index out of bounds if our cell count is off
        if (!cells.advance()) {
            throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
        }
        Cell cell = cells.current();
        if (val != null)
            val.add(cell);

        boolean isNewRowOrType = previousCell == null || previousCell.getTypeByte() != cell.getTypeByte()
                || !CellUtil.matchingRows(previousCell, cell);
        if (isNewRowOrType) {
            // Create new mutation
            if (CellUtil.isDelete(cell)) {
                m = new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                // Deletes don't have nonces.
                mutations.add(new MutationReplay(ClientProtos.MutationProto.MutationType.DELETE, m,
                        HConstants.NO_NONCE, HConstants.NO_NONCE));
            } else {
                m = new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
                // Puts might come from increment or append, thus we need nonces.
                long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup()
                        : HConstants.NO_NONCE;
                long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
                mutations.add(
                        new MutationReplay(ClientProtos.MutationProto.MutationType.PUT, m, nonceGroup, nonce));
            }
        }
        if (CellUtil.isDelete(cell)) {
            ((Delete) m).add(cell);
        } else {
            ((Put) m).add(cell);
        }
        m.setDurability(durability);
        previousCell = cell;
    }

    // reconstruct WALKey
    if (logEntry != null) {
        org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.WALKey walKeyProto = entry.getKey();
        List<UUID> clusterIds = new ArrayList<>(walKeyProto.getClusterIdsCount());
        for (HBaseProtos.UUID uuid : entry.getKey().getClusterIdsList()) {
            clusterIds.add(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
        }
        key = new WALKeyImpl(walKeyProto.getEncodedRegionName().toByteArray(),
                TableName.valueOf(walKeyProto.getTableName().toByteArray()), replaySeqId,
                walKeyProto.getWriteTime(), clusterIds, walKeyProto.getNonceGroup(), walKeyProto.getNonce(),
                null);
        logEntry.setFirst(key);
        logEntry.setSecond(val);
    }

    return mutations;
}

From source file:org.apache.jackrabbit.oak.segment.file.TarReader.java

Set<UUID> getUUIDs() {
    Set<UUID> uuids = newHashSetWithExpectedSize(index.remaining() / TarEntry.SIZE);
    int position = index.position();
    while (position < index.limit()) {
        uuids.add(new UUID(index.getLong(position), index.getLong(position + 8)));
        position += TarEntry.SIZE;/*www .  ja va  2s.  c  o m*/
    }
    return uuids;
}

From source file:org.rapla.storage.impl.server.LocalAbstractCachableOperator.java

public String createId(RaplaType raplaType, String seed) throws RaplaException {

    byte[] data = new byte[16];
    MessageDigest md;//from w ww  .j  a  va 2s . c  o  m
    try {
        md = MessageDigest.getInstance("MD5");
    } catch (NoSuchAlgorithmException e) {
        throw new RaplaException(e.getMessage(), e);
    }
    data = md.digest(seed.getBytes());
    if (data.length != 16) {
        throw new RaplaException("Wrong algorithm");
    }
    data[6] &= 0x0f; /* clear version        */
    data[6] |= 0x40; /* set to version 4     */
    data[8] &= 0x3f; /* clear variant        */
    data[8] |= 0x80; /* set to IETF variant  */

    long msb = 0;
    long lsb = 0;
    for (int i = 0; i < 8; i++)
        msb = (msb << 8) | (data[i] & 0xff);
    for (int i = 8; i < 16; i++)
        lsb = (lsb << 8) | (data[i] & 0xff);
    long mostSigBits = msb;
    long leastSigBits = lsb;

    UUID uuid = new UUID(mostSigBits, leastSigBits);
    String result = replaceFirst(raplaType, uuid.toString());
    return result;
}

From source file:de.frank_durr.ble_v_monitor.MainActivity.java

/**
 * Creates a 128 bit UUID of a service or characteristic from a 128 base UUID and 16 bit
 * service/characteristic id.//w w  w .  j a  v  a 2 s . c  o m
 *
 * Example: Given
 * - 128 bit base UUID 550eXXXX-e29b-11d4-a716-446655440000
 * - 16 bit service ID: 0x1234
 * The resulting UUID is generated by replacing XXXX by the 16 bit id of the service:
 * - UUID: 550e1234-e29b-11d4-a716-446655440000
 *
 * @param baseMSB most significant bits of the base UUID
 * @param baseLSB least significant bits of the base UUID
 * @param id 16 bit id of the service or characteristic
 * @return UUID of the service of characteristic
 */
public static UUID getUUID(long baseMSB, long baseLSB, short id) {
    long msb = baseMSB & 0xffff0000ffffffffL;
    msb |= ((long) id) << 32;

    return new UUID(msb, baseLSB);
}

From source file:org.datanucleus.store.hbase.fieldmanager.FetchFieldManager.java

private UUID fetchUUIDInternal(AbstractMemberMetaData mmd, byte[] bytes) {
    if (bytes == null) {
        // Handle missing field
        String dflt = HBaseUtils.getDefaultValueForMember(mmd);
        return dflt != null ? UUID.fromString(dflt) : null;
    }/*from  w w  w  . jav  a2s  .  co m*/

    if (mmd.isSerialized()) {
        ByteArrayInputStream bis = null;
        ObjectInputStream ois = null;
        try {
            bis = new ByteArrayInputStream(bytes);
            ois = new ObjectInputStream(bis);
            return UUID.class.cast(ois.readObject());
        } catch (ClassNotFoundException e) {
            throw new NucleusException(e.getMessage(), e);
        } catch (IOException e) {
            throw new NucleusException(e.getMessage(), e);
        } finally {
            IOUtils.closeStream(ois);
            IOUtils.closeStream(bis);
        }
    } else {
        if (bytes.length == 16) {
            // serialized as bytes
            long upper = Bytes.toLong(bytes);
            long lower = Bytes.toLong(ArrayUtils.subarray(bytes, 8, 16));
            return new UUID(upper, lower);
        } else {
            final String value = new String(bytes, Charsets.UTF_8);
            return UUID.fromString(value);
        }
    }
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testOffloadConflict() throws Exception {
    Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet();
    CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>();
    Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet();

    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override/*  w w w.jav  a2 s.com*/
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            return errorLedgers.thenCompose((errors) -> {
                if (errors.remove(ledger.getId())) {
                    failedOffloads.add(Pair.of(ledger.getId(), uuid));
                    CompletableFuture<Void> future = new CompletableFuture<>();
                    future.completeExceptionally(new Exception("Some kind of error"));
                    return future;
                } else {
                    return super.offload(ledger, uuid, extraMetadata);
                }
            });
        }

        @Override
        public CompletableFuture<Void> deleteOffloaded(long ledgerId, UUID uuid,
                Map<String, String> offloadDriverMetadata) {
            deleted.add(Pair.of(ledgerId, uuid));
            return super.deleteOffloaded(ledgerId, uuid, offloadDriverMetadata);
        }
    };
    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(10, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);

    for (int i = 0; i < 15; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }

    Set<Long> errorSet = ConcurrentHashMap.newKeySet();
    errorSet.add(ledger.getLedgersInfoAsList().get(0).getLedgerId());
    errorLedgers.complete(errorSet);

    try {
        ledger.offloadPrefix(ledger.getLastConfirmedEntry());
    } catch (ManagedLedgerException e) {
        // expected
    }
    Assert.assertTrue(errorSet.isEmpty());
    Assert.assertEquals(failedOffloads.size(), 1);
    Assert.assertEquals(deleted.size(), 0);

    long expectedFailedLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    UUID expectedFailedUUID = new UUID(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidMsb(),
            ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidLsb());
    Assert.assertEquals(failedOffloads.stream().findFirst().get(),
            Pair.of(expectedFailedLedger, expectedFailedUUID));
    Assert.assertFalse(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());

    // try offload again
    ledger.offloadPrefix(ledger.getLastConfirmedEntry());

    Assert.assertEquals(failedOffloads.size(), 1);
    Assert.assertEquals(deleted.size(), 1);
    Assert.assertEquals(deleted.stream().findFirst().get(), Pair.of(expectedFailedLedger, expectedFailedUUID));
    UUID successUUID = new UUID(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidMsb(),
            ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidLsb());
    Assert.assertFalse(successUUID.equals(expectedFailedUUID));
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());
}

From source file:kx.c.java

UUID rg() {
    boolean oa = a;
    a = false;
    UUID g = new UUID(rj(), rj());
    a = oa;
    return g;
}

From source file:org.apache.jackrabbit.oak.plugins.segment.file.TarReader.java

@CheckForNull
private List<UUID> getReferences(TarEntry entry, UUID id, Map<UUID, List<UUID>> graph) throws IOException {
    if (graph != null) {
        return graph.get(id);
    } else {/* w w w.  ja  v a2  s.c  o m*/
        // a pre-compiled graph is not available, so read the
        // references directly from this segment
        ByteBuffer segment = access.read(entry.offset(), Math.min(entry.size(), 16 * 256));
        int pos = segment.position();
        int refCount = segment.get(pos + REF_COUNT_OFFSET) & 0xff;
        int refEnd = pos + 16 * (refCount + 1);
        List<UUID> refIds = newArrayList();
        for (int refPos = pos + 16; refPos < refEnd; refPos += 16) {
            refIds.add(new UUID(segment.getLong(refPos), segment.getLong(refPos + 8)));
        }
        return refIds;
    }
}

From source file:org.apache.jackrabbit.oak.segment.file.TarReader.java

@Nonnull
private List<UUID> getReferences(TarEntry entry, UUID id, Map<UUID, List<UUID>> graph) throws IOException {
    if (graph != null) {
        List<UUID> uuids = graph.get(id);
        return uuids == null ? Collections.<UUID>emptyList() : uuids;
    } else {//w  w w  .  j a  v  a  2  s. c o  m
        // a pre-compiled graph is not available, so read the
        // references directly from this segment
        ByteBuffer segment = access.read(entry.offset(), Math.min(entry.size(), 16 * 256));
        int pos = segment.position();
        int refCount = segment.get(pos + REF_COUNT_OFFSET) & 0xff;
        int refEnd = pos + 16 * (refCount + 1);
        List<UUID> refIds = newArrayList();
        for (int refPos = pos + 16; refPos < refEnd; refPos += 16) {
            refIds.add(new UUID(segment.getLong(refPos), segment.getLong(refPos + 8)));
        }
        return refIds;
    }
}