Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.pulsar.compaction.CompactedTopicTest.java

/**
 * Build a compacted ledger, and return the id of the ledger, the position of the different
 * entries in the ledger, and a list of gaps, and the entry which should be returned after the gap.
 *///www.java  2  s .  c  o m
private Triple<Long, List<Pair<MessageIdData, Long>>, List<Pair<MessageIdData, Long>>> buildCompactedLedger(
        BookKeeper bk, int count) throws Exception {
    LedgerHandle lh = bk.createLedger(1, 1, Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE,
            Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD);
    List<Pair<MessageIdData, Long>> positions = new ArrayList<>();
    List<Pair<MessageIdData, Long>> idsInGaps = new ArrayList<>();

    AtomicLong ledgerIds = new AtomicLong(10L);
    AtomicLong entryIds = new AtomicLong(0L);
    CompletableFuture.allOf(IntStream.range(0, count).mapToObj((i) -> {
        List<MessageIdData> idsInGap = new ArrayList<MessageIdData>();
        if (r.nextInt(10) == 1) {
            long delta = r.nextInt(10) + 1;
            idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1)
                    .build());
            ledgerIds.addAndGet(delta);
            entryIds.set(0);
        }
        long delta = r.nextInt(5);
        if (delta != 0) {
            idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1)
                    .build());
        }
        MessageIdData id = MessageIdData.newBuilder().setLedgerId(ledgerIds.get())
                .setEntryId(entryIds.addAndGet(delta + 1)).build();

        @Cleanup
        RawMessage m = new RawMessageImpl(id, Unpooled.EMPTY_BUFFER);

        CompletableFuture<Void> f = new CompletableFuture<>();
        ByteBuf buffer = m.serialize();

        lh.asyncAddEntry(buffer, (rc, ledger, eid, ctx) -> {
            if (rc != BKException.Code.OK) {
                f.completeExceptionally(BKException.create(rc));
            } else {
                positions.add(Pair.of(id, eid));
                idsInGap.forEach((gid) -> idsInGaps.add(Pair.of(gid, eid)));
                f.complete(null);
            }
        }, null);
        return f;
    }).toArray(CompletableFuture[]::new)).get();
    lh.close();

    return Triple.of(lh.getId(), positions, idsInGaps);
}

From source file:org.apache.activemq.broker.region.cursors.KahaDBPendingMessageCursorTest.java

/**
 * Test that the the counter restores size and works after restart and more
 * messages are published/*from  w  w  w .j  av a2s.  c  o  m*/
 *
 * @throws Exception
 */
@Test(timeout = 60000)
public void testNonPersistentDurableMessageSize() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    Connection connection = new ActiveMQConnectionFactory(brokerConnectURI).createConnection();
    connection.setClientID("clientId");
    connection.start();
    Topic topic = publishTestMessagesDurable(connection, new String[] { "sub1" }, 200, publishedMessageSize,
            DeliveryMode.NON_PERSISTENT);

    SubscriptionKey subKey = new SubscriptionKey("clientId", "sub1");

    // verify the count and size
    verifyPendingStats(topic, subKey, 200, publishedMessageSize.get());
    verifyStoreStats(topic, 0, 0);
}

From source file:org.apache.hadoop.hbase.master.TestSplitLogManager.java

private void waitForCounter(final AtomicLong ctr, long oldval, long newval, long timems) throws Exception {
    Expr e = new Expr() {
        public long eval() {
            return ctr.get();
        }//w  w  w  .  j a v  a 2s . c  o m
    };
    waitForCounter(e, oldval, newval, timems);
    return;
}

From source file:org.apache.activemq.broker.region.cursors.KahaDBPendingMessageCursorTest.java

/**
 * Test that the the counter restores size and works after restart and more
 * messages are published/*from   w w w  . j av a  2 s .c  o  m*/
 *
 * @throws Exception
 */
@Test(timeout = 60000)
public void testDurableMessageSizeAfterRestartAndPublish() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    Connection connection = new ActiveMQConnectionFactory(brokerConnectURI).createConnection();
    connection.setClientID("clientId");
    connection.start();
    Topic topic = publishTestMessagesDurable(connection, new String[] { "sub1" }, 200, publishedMessageSize,
            DeliveryMode.PERSISTENT);

    SubscriptionKey subKey = new SubscriptionKey("clientId", "sub1");

    // verify the count and size
    verifyPendingStats(topic, subKey, 200, publishedMessageSize.get());
    verifyStoreStats(topic, 200, publishedMessageSize.get());

    // stop, restart broker and publish more messages
    stopBroker();
    this.setUpBroker(false);

    connection = new ActiveMQConnectionFactory(brokerConnectURI).createConnection();
    connection.setClientID("clientId");
    connection.start();

    topic = publishTestMessagesDurable(connection, new String[] { "sub1" }, 200, publishedMessageSize,
            DeliveryMode.PERSISTENT);

    // verify the count and size
    verifyPendingStats(topic, subKey, 400, publishedMessageSize.get());
    verifyStoreStats(topic, 400, publishedMessageSize.get());

}

From source file:org.apache.hadoop.hbase.regionserver.TestSplitLogWorker.java

private boolean waitForCounterBoolean(final AtomicLong ctr, final long oldval, final long newval, long timems,
        boolean failIfTimeout) throws Exception {

    long timeWaited = TEST_UTIL.waitFor(timems, 10, failIfTimeout, new Waiter.Predicate<Exception>() {
        @Override/*from   w  ww. jav a2s.  c om*/
        public boolean evaluate() throws Exception {
            return (ctr.get() >= newval);
        }
    });

    if (timeWaited > 0) {
        // when not timed out
        assertEquals(newval, ctr.get());
    }
    return true;
}

From source file:org.apache.hadoop.hbase.quotas.TestSpaceQuotasWithSnapshots.java

void waitForStableRegionSizeReport(Connection conn, TableName tn) throws Exception {
    // For some stability in the value before proceeding
    // Helps make sure that we got the actual last value, not some inbetween
    AtomicLong lastValue = new AtomicLong(-1);
    AtomicInteger counter = new AtomicInteger(0);
    TEST_UTIL.waitFor(15_000, 500, new Predicate<Exception>() {
        @Override/*from  w ww .  ja va2 s .  c o m*/
        public boolean evaluate() throws Exception {
            LOG.debug("Last observed size=" + lastValue.get());
            long actual = getRegionSizeReportForTable(conn, tn);
            if (actual == lastValue.get()) {
                int numMatches = counter.incrementAndGet();
                if (numMatches >= 5) {
                    return true;
                }
                // Not yet..
                return false;
            }
            counter.set(0);
            lastValue.set(actual);
            return false;
        }
    });
}

From source file:org.deeplearning4j.models.word2vec.Word2Vec.java

/**
 * Train on a list of vocab words//from   w  w w .j  a v a  2  s  . c o  m
 * @param sentence the list of vocab words to train on
 */
public void trainSentence(final List<VocabWord> sentence, AtomicLong nextRandom, double alpha) {
    if (sentence == null || sentence.isEmpty())
        return;
    for (int i = 0; i < sentence.size(); i++) {
        nextRandom.set(nextRandom.get() * 25214903917L + 11);
        skipGram(i, sentence, (int) nextRandom.get() % window, nextRandom, alpha);
    }

}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

public void scan() {
    WebTarget t = extractWebTarget(dockerScanner.getDockerClient());
    logger.info("Scanning {}", t);
    JsonNode response = t.path("/info").request().buildGet().invoke(JsonNode.class);

    JsonNode swarm = response.path("Swarm");
    JsonNode cluster = swarm.path("Cluster");
    String swarmClusterId = cluster.path("ID").asText();

    // need to parse these dates
    String createdAt = cluster.path("CreatedAt").asText();
    String updatedAt = cluster.path("UpdatedAt").asText();
    ObjectNode props = mapper.createObjectNode();
    props.put("swarmClusterId", swarmClusterId);
    props.put("createdAt", createdAt);
    props.put("updatedAt", updatedAt);

    JsonNode swarmNode = dockerScanner.getNeoRxClient().execCypher(
            "merge (c:DockerSwarm {swarmClusterId:{id}}) set c+={props},c.updateTs=timestamp() return c", "id",
            swarmClusterId, "props", props).blockingFirst(MissingNode.getInstance());

    if (isUnixDomainScoket(t.getUri().toString())) {
        // Only set managerApiUrl to a unix domain socket if it has not
        // already been set.
        // This is useful for trident
        if (!isUnixDomainScoket(swarmNode.path("managerApiUrl").asText())) {

            String LOCAL_DOCKER_DAEMON_SOCKET_URL = "unix:///var/run/docker.sock";
            logger.info("setting mangerApiUrl to {} for swarm {}", LOCAL_DOCKER_DAEMON_SOCKET_URL,
                    swarmClusterId);/*from  w  ww .  j  a  v a 2 s . co  m*/

            String name = "local";
            dockerScanner.getNeoRxClient()
                    .execCypher("match (c:DockerSwarm {name:{name}}) return c", "name", name).forEach(it -> {
                        String oldSwarmClusterId = it.path("swarmClusterId").asText();
                        if (!swarmClusterId.equals(oldSwarmClusterId)) {
                            dockerScanner.getNeoRxClient().execCypher(
                                    "match (c:DockerSwarm {swarmClusterId:{swarmClusterId}}) detach delete c",
                                    "swarmClusterId", oldSwarmClusterId);
                        }
                    });

            dockerScanner.getNeoRxClient().execCypher(
                    "match (c:DockerSwarm {swarmClusterId:{id}}) set c.managerApiUrl={managerApiUrl},c.name={name},c.tridentClusterId={name} return c",
                    "id", swarmClusterId, "managerApiUrl", LOCAL_DOCKER_DAEMON_SOCKET_URL, "name", name);

        }
    }

    AtomicBoolean fail = new AtomicBoolean(false);
    response = t.path("/nodes").request().buildGet().invoke(JsonNode.class);
    AtomicLong earliestTimestamp = new AtomicLong(Long.MAX_VALUE);
    response.elements().forEachRemaining(it -> {
        try {
            earliestTimestamp.set(
                    Math.min(earliestTimestamp.get(), saveDockerNode(swarmClusterId, flattenSwarmNode(it))));
        } catch (RuntimeException e) {
            logger.warn("problem", e);
            fail.set(true);
        }
    });

    if (!fail.get()) {
        if (earliestTimestamp.get() < System.currentTimeMillis()) {
            logger.info("deleting DockerHost nodes before with updateTs<{}", earliestTimestamp.get());
            dockerScanner.getNeoRxClient().execCypher(
                    "match (s:DockerSwarm {swarmClusterId:{id}})--(x:DockerHost) where s.updateTs>x.updateTs detach delete x",
                    "id", swarmClusterId);
        }
    }
    scanServicesForSwarm(swarmClusterId);
    scanTasksForSwarm(swarmClusterId);
}

From source file:org.codice.ddf.catalog.ui.query.monitor.impl.WorkspaceServiceImpl.java

private QueryResponse query(QueryRequest queryRequest) {
    AtomicLong hitCount = new AtomicLong(0);

    QueryFunction queryFunction = qr -> {
        SourceResponse sourceResponse = catalogFramework.query(qr);
        hitCount.compareAndSet(0, sourceResponse.getHits());
        return sourceResponse;
    };//www .  j  a va 2 s.  co  m

    ResultIterable results = ResultIterable.resultIterable(queryFunction, queryRequest, maxSubscriptions);

    List<Result> resultList = results.stream().collect(Collectors.toList());

    long totalHits = hitCount.get() != 0 ? hitCount.get() : resultList.size();

    return new QueryResponseImpl(queryRequest, resultList, totalHits);
}

From source file:org.apache.hadoop.hbase.quotas.TestSpaceQuotasWithSnapshots.java

void waitForStableQuotaSize(Connection conn, TableName tn, String ns) throws Exception {
    // For some stability in the value before proceeding
    // Helps make sure that we got the actual last value, not some inbetween
    AtomicLong lastValue = new AtomicLong(-1);
    AtomicInteger counter = new AtomicInteger(0);
    TEST_UTIL.waitFor(15_000, 500, new SpaceQuotaSnapshotPredicate(conn, tn, ns) {
        @Override/*w w w.  ja v a  2 s .com*/
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            LOG.debug("Last observed size=" + lastValue.get());
            if (snapshot.getUsage() == lastValue.get()) {
                int numMatches = counter.incrementAndGet();
                if (numMatches >= 5) {
                    return true;
                }
                // Not yet..
                return false;
            }
            counter.set(0);
            lastValue.set(snapshot.getUsage());
            return false;
        }
    });
}