Example usage for java.util UUID getMostSignificantBits

List of usage examples for java.util UUID getMostSignificantBits

Introduction

In this page you can find the example usage for java.util UUID getMostSignificantBits.

Prototype

public long getMostSignificantBits() 

Source Link

Document

Returns the most significant 64 bits of this UUID's 128 bit value.

Usage

From source file:org.jboss.dashboard.workspace.WorkspacesManager.java

/**
 * Generate a unique workspace identifier
 */// w  w  w  .  j  a va  2s. c  om
public synchronized String generateWorkspaceId() throws Exception {
    UUID uuid = UUID.randomUUID();
    ByteBuffer bb = ByteBuffer.wrap(new byte[16]);
    bb.putLong(uuid.getMostSignificantBits());
    bb.putLong(uuid.getLeastSignificantBits());
    return Base64.encodeBase64URLSafeString(bb.array());
}

From source file:org.hillview.dataset.RemoteDataSet.java

/**
 * Zip operation on two IDataSet objects that need to reside on the same remote server.
 *//*from   ww  w.j  a  v  a  2s  .com*/
@Override
public <S> Observable<PartialResult<IDataSet<Pair<T, S>>>> zip(final IDataSet<S> other) {
    if (!(other instanceof RemoteDataSet<?>)) {
        throw new RuntimeException("Unexpected type in Zip " + other);
    }
    final RemoteDataSet<S> rds = (RemoteDataSet<S>) other;

    // zip commands are not valid if the RemoteDataSet instances point to different
    // actor systems or different nodes.
    final HostAndPort leftAddress = this.serverEndpoint;
    final HostAndPort rightAddress = rds.serverEndpoint;
    if (!leftAddress.equals(rightAddress)) {
        throw new RuntimeException("Zip command invalid for RemoteDataSets "
                + "across different servers | left: " + leftAddress + ", right:" + rightAddress);
    }

    final ZipOperation zip = new ZipOperation(rds.remoteHandle);
    final byte[] serializedOp = SerializationUtils.serialize(zip);
    final UUID operationId = UUID.randomUUID();
    final Command command = Command.newBuilder().setIdsIndex(this.remoteHandle)
            .setSerializedOp(ByteString.copyFrom(serializedOp)).setHighId(operationId.getMostSignificantBits())
            .setLowId(operationId.getLeastSignificantBits()).build();
    final SerializedSubject<PartialResult<IDataSet<Pair<T, S>>>, PartialResult<IDataSet<Pair<T, S>>>> subj = createSerializedSubject();
    final StreamObserver<PartialResponse> responseObserver = new NewDataSetObserver<Pair<T, S>>(subj);
    return subj.unsubscribeOn(ExecutorUtils.getUnsubscribeScheduler()).doOnSubscribe(
            () -> this.stub.withDeadlineAfter(TIMEOUT, TimeUnit.MILLISECONDS).zip(command, responseObserver))
            .doOnUnsubscribe(() -> this.unsubscribe(operationId));
}

From source file:com.opengamma.component.factory.engine.EngineConfigurationComponentFactory.java

/**
 * Creates a random logical server unique identifier. This is used if an explicit identifier is not set in the configuration file.
 * <p>/*from   ww w.j  a v  a 2  s .  co  m*/
 * This is a 24 character string using base-64 characters, created using the algorithm from {@link GUIDGenerator} for uniqueness.
 * 
 * @return the logical server unique identifier, not null
 */
protected String createLogicalServerId() {
    final UUID uuid = GUIDGenerator.generate();
    final byte[] bytes = new byte[16];
    long x = uuid.getMostSignificantBits();
    bytes[0] = (byte) x;
    bytes[1] = (byte) (x >> 8);
    bytes[2] = (byte) (x >> 16);
    bytes[3] = (byte) (x >> 24);
    bytes[4] = (byte) (x >> 32);
    bytes[5] = (byte) (x >> 40);
    bytes[6] = (byte) (x >> 48);
    bytes[7] = (byte) (x >> 56);
    x = uuid.getLeastSignificantBits();
    bytes[8] = (byte) x;
    bytes[9] = (byte) (x >> 8);
    bytes[10] = (byte) (x >> 16);
    bytes[11] = (byte) (x >> 24);
    bytes[12] = (byte) (x >> 32);
    bytes[13] = (byte) (x >> 40);
    bytes[14] = (byte) (x >> 48);
    bytes[15] = (byte) (x >> 56);
    return Base64.encodeBase64String(bytes);
}

From source file:com.cognitect.transit.TransitMPTest.java

public void testReadUUID() throws IOException {

    UUID uuid = UUID.randomUUID();
    final long hi64 = uuid.getMostSignificantBits();
    final long lo64 = uuid.getLeastSignificantBits();

    assertEquals(0, uuid.compareTo((UUID) readerOf("~u" + uuid.toString()).read()));

    List thing = new ArrayList() {
        {/*from   w  ww.j a v  a 2s . c o  m*/
            add("~#u");
            add(new ArrayList() {
                {
                    add(hi64);
                    add(lo64);
                }
            });
        }
    };

    assertEquals(0, uuid.compareTo((UUID) readerOf(thing).read()));
}

From source file:org.apache.hadoop.hbase.regionserver.wal.HLogKey.java

@Override
@Deprecated//from w  w w. ja  va  2 s  .c om
public void write(DataOutput out) throws IOException {
    LOG.warn("HLogKey is being serialized to writable - only expected in test code");
    WritableUtils.writeVInt(out, VERSION.code);
    if (compressionContext == null) {
        Bytes.writeByteArray(out, this.encodedRegionName);
        Bytes.writeByteArray(out, this.tablename.getName());
    } else {
        Compressor.writeCompressed(this.encodedRegionName, 0, this.encodedRegionName.length, out,
                compressionContext.regionDict);
        Compressor.writeCompressed(this.tablename.getName(), 0, this.tablename.getName().length, out,
                compressionContext.tableDict);
    }
    out.writeLong(this.logSeqNum);
    out.writeLong(this.writeTime);
    // Don't need to write the clusters information as we are using protobufs from 0.95
    // Writing only the first clusterId for testing the legacy read
    Iterator<UUID> iterator = clusterIds.iterator();
    if (iterator.hasNext()) {
        out.writeBoolean(true);
        UUID clusterId = iterator.next();
        out.writeLong(clusterId.getMostSignificantBits());
        out.writeLong(clusterId.getLeastSignificantBits());
    } else {
        out.writeBoolean(false);
    }
}

From source file:org.apache.hadoop.hbase.client.Mutation.java

/**
 * Marks that the clusters with the given clusterIds have consumed the mutation
 *
 * @param clusterIds of the clusters that have consumed the mutation
 *//*from   w  ww.  j a v  a2  s.c o m*/
public Mutation setClusterIds(List<UUID> clusterIds) {
    ByteArrayDataOutput out = ByteStreams.newDataOutput();
    out.writeInt(clusterIds.size());
    for (UUID clusterId : clusterIds) {
        out.writeLong(clusterId.getMostSignificantBits());
        out.writeLong(clusterId.getLeastSignificantBits());
    }
    setAttribute(CONSUMED_CLUSTER_IDS, out.toByteArray());
    return this;
}

From source file:org.springframework.amqp.rabbit.junit.BrokerRunning.java

/**
 * Generate the connection id for the connection used by the rule's
 * connection factory./*  w  w  w. j  av  a 2s.  c  o m*/
 * @return the id.
 */
public String generateId() {
    UUID uuid = UUID.randomUUID();
    ByteBuffer bb = ByteBuffer.wrap(new byte[16]);
    bb.putLong(uuid.getMostSignificantBits()).putLong(uuid.getLeastSignificantBits());
    return "SpringBrokerRunning." + Base64Utils.encodeToUrlSafeString(bb.array()).replaceAll("=", "");
}

From source file:com.urbancode.terraform.main.Main.java

/**
 * Initializes Terraform so it can execute the given commands.
 * Here is the order of operations:/*from  w w  w  .ja v a 2  s .  c om*/
 * Parses the credentials file and verifies the given credentials.
 * Generates a random string for this environment, which is appended to the output xml file.
 * Parses the xml file.
 * Runs the specified command (create, destroy, etc).
 * @throws XmlParsingException
 * @throws IOException
 * @throws CredentialsException
 * @throws CreationException
 * @throws DestructionException
 * @throws RestorationException
 */
public void execute() throws XmlParsingException, IOException, CredentialsException, CreationException,
        DestructionException, RestorationException {
    TerraformContext context = null;
    try {
        // parse xml and set context
        context = parseContext(inputXmlFile);

        Credentials credentials = parseCredentials(credsFile);

        context.setCredentials(credentials);
        if (AllowedCommands.CREATE.getCommandName().equalsIgnoreCase(command)) {
            // create new file if creating a new environment
            UUID uuid = UUID.randomUUID();
            //convert uuid to base 62 (allowed chars: 0-9 a-z A-Z)
            ByteBuffer bb = ByteBuffer.wrap(new byte[16]);
            bb.putLong(uuid.getMostSignificantBits());
            bb.putLong(uuid.getLeastSignificantBits());
            String suffix = Base64.encodeBase64URLSafeString(bb.array());
            suffix = suffix.replaceAll("-", "Y");
            suffix = suffix.replaceAll("_", "Z");
            suffix = suffix.substring(0, 4);
            if (context.getEnvironment() != null) {
                context.getEnvironment().addSuffixToEnvName(suffix);
                log.debug("UUID for env " + context.getEnvironment().getName() + " is " + suffix);
            } else {
                throw new NullPointerException("No environment on context!");
            }

            String name = context.getEnvironment().getName();
            log.debug("Output filename = " + name);
            outputXmlFile = new File("env-" + name + ".xml");

            log.debug("Calling create() on context");
            context.create();
        } else if (AllowedCommands.DESTROY.getCommandName().equalsIgnoreCase(command)) {
            String suffix = parseSuffix(context.getEnvironment().getName());
            context.getEnvironment().setSuffix(suffix);
            log.debug("found suffix " + suffix);
            // write out instance failure regardless of success or failure
            outputXmlFile = inputXmlFile;
            log.debug("Calling destroy() on context");
            context.destroy();
        } else if (AllowedCommands.SUSPEND.getCommandName().equalsIgnoreCase(command)) {
            outputXmlFile = inputXmlFile;
            log.debug("Calling restore() on context");
            log.info("Attempting to suspend power on all instances/VMs in the environment.");
            context.restore();
            if (context instanceof ContextVmware) {
                SuspendCommand newCommand = new SuspendCommand((ContextVmware) context);
                newCommand.execute();
            } else if (context instanceof ContextAWS) {
                com.urbancode.terraform.commands.aws.SuspendCommand newCommand = new com.urbancode.terraform.commands.aws.SuspendCommand(
                        (ContextAWS) context);
                newCommand.execute();
            } else {
                log.warn("Could not resolve context to call command \"" + command + "\"");
            }
        } else if (AllowedCommands.RESUME.getCommandName().equalsIgnoreCase(command)) {
            outputXmlFile = inputXmlFile;
            log.debug("Calling restore() on context");
            context.restore();
            log.info("Attempting to power on all instances/VMs in the environment.");
            if (context instanceof ContextVmware) {
                ResumeCommand newCommand = new ResumeCommand((ContextVmware) context);
                newCommand.execute();
            } else if (context instanceof ContextAWS) {
                com.urbancode.terraform.commands.aws.ResumeCommand newCommand = new com.urbancode.terraform.commands.aws.ResumeCommand(
                        (ContextAWS) context);
                newCommand.execute();
            }

            else {
                log.warn("Could not resolve context to call command \"" + command + "\"");
            }
        } else if (AllowedCommands.TAKE_SNAPSHOT.getCommandName().equalsIgnoreCase(command)) {
            outputXmlFile = inputXmlFile;
            log.debug("Calling restore() on context");
            context.restore();
            log.info("Attempting to take snapshots of all instances/VMs in the environment.");
            if (context instanceof ContextVmware) {
                TakeSnapshotCommand newCommand = new TakeSnapshotCommand((ContextVmware) context);
                newCommand.execute();
            } else if (context instanceof ContextAWS) {
                log.warn("Taking snapshots is not currently supported with Terraform and AWS.");
            }

            else {
                log.warn("Could not resolve context to call command \"" + command + "\"");
            }
        }
    } catch (ParserConfigurationException e1) {
        throw new XmlParsingException("ParserConfigurationException: " + e1.getMessage(), e1);
    } catch (SAXException e2) {
        throw new XmlParsingException("SAXException: " + e2.getMessage(), e2);
    } finally {
        if (context != null && context.doWriteContext() && outputXmlFile != null) {
            log.debug("Writing context out to " + outputXmlFile);
            writeEnvToXml(outputXmlFile, context);
        }
    }
}

From source file:org.hillview.dataset.RemoteDataSet.java

/**
 * Unsubscribes an operation. This method is safe to invoke multiple times because the
 * logic on the remote end is idempotent.
 *//* ww w.  j ava  2s  . co  m*/
private void unsubscribe(final UUID id) {
    HillviewLogger.instance.info("Unsubscribe called", "{0}", id);
    final UnsubscribeOperation op = new UnsubscribeOperation(id);
    final byte[] serializedOp = SerializationUtils.serialize(op);
    final Command command = Command.newBuilder().setIdsIndex(this.remoteHandle)
            .setSerializedOp(ByteString.copyFrom(serializedOp)).setHighId(id.getMostSignificantBits())
            .setLowId(id.getLeastSignificantBits()).build();
    this.stub.withDeadlineAfter(TIMEOUT, TimeUnit.MILLISECONDS).unsubscribe(command, new StreamObserver<Ack>() {
        @Override
        public void onNext(final Ack ack) {
        }

        @Override
        public void onError(final Throwable throwable) {
        }

        @Override
        public void onCompleted() {
        }
    });
}

From source file:com.github.ambry.commons.BlobIdTest.java

/**
 * Build a string that resembles a bad blobId.
 * @param version The version number to be embedded in the blobId.
 * @param type The {@link BlobIdType} of the blobId.
 * @param datacenterId The datacenter id to be embedded in the blobId.
 * @param accountId The account id to be embedded in the blobId.
 * @param containerId The container id to be embedded in the blobId.
 * @param partitionId The partition id to be embedded in the blobId.
 * @param uuidLength The length of the uuid.
 * @param uuid The UUID to be embedded in the blobId.
 * @param extraChars Extra characters to put at the end of the ID.
 * @return a base-64 encoded {@link String} representing the blobId.
 *///from   ww  w  . ja va 2 s.  c  o  m
private String buildBadBlobId(short version, BlobIdType type, Byte datacenterId, Short accountId,
        Short containerId, PartitionId partitionId, int uuidLength, String uuid, String extraChars) {
    int idLength;
    ByteBuffer idBuf;
    switch (version) {
    case BLOB_ID_V1:
        idLength = 2 + partitionId.getBytes().length + 4 + uuid.length() + extraChars.length();
        idBuf = ByteBuffer.allocate(idLength);
        idBuf.putShort(version);
        break;
    case BLOB_ID_V2:
        idLength = 2 + 1 + 1 + 2 + 2 + partitionId.getBytes().length + 4 + uuid.length() + extraChars.length();
        idBuf = ByteBuffer.allocate(idLength);
        idBuf.putShort(version);
        idBuf.put((byte) 0);
        idBuf.put(datacenterId);
        idBuf.putShort(accountId);
        idBuf.putShort(containerId);
        break;
    case BLOB_ID_V3:
    case BLOB_ID_V4:
    case BLOB_ID_V5:
    case BLOB_ID_V6:
        idLength = 2 + 1 + 1 + 2 + 2 + partitionId.getBytes().length + 4 + uuid.length() + extraChars.length();
        idBuf = ByteBuffer.allocate(idLength);
        idBuf.putShort(version);
        idBuf.put((byte) type.ordinal());
        idBuf.put(datacenterId);
        idBuf.putShort(accountId);
        idBuf.putShort(containerId);
        break;
    default:
        idLength = 2 + partitionId.getBytes().length + 4 + uuid.length() + extraChars.length();
        idBuf = ByteBuffer.allocate(idLength);
        idBuf.putShort(version);
        break;
    }
    idBuf.put(partitionId.getBytes());
    switch (version) {
    case BLOB_ID_V6:
        UUID uuidObj = UUID.fromString(uuid);
        idBuf.putLong(uuidObj.getMostSignificantBits());
        idBuf.putLong(uuidObj.getLeastSignificantBits());
        break;
    default:
        idBuf.putInt(uuidLength);
        idBuf.put(uuid.getBytes());
    }
    idBuf.put(extraChars.getBytes());
    return Base64.encodeBase64URLSafeString(idBuf.array());
}