Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:fi.luontola.cqrshotel.framework.EventStoreContract.java

private Runnable createRuntimeInvariantChecker(int batchSize) {
    long initialPosition = eventStore.getCurrentPosition();
    AtomicLong position = new AtomicLong(initialPosition);
    return () -> {
        long pos = position.get();
        List<Event> events = eventStore.getAllEvents(pos);
        assertAtomicBatches(batchSize, events);
        position.set(pos + events.size());
    };/*from w  ww  .j  a v  a  2s .c om*/
}

From source file:com.pinterest.pinlater.client.PinLaterQueryIssuer.java

private void issueEnqueueRequests(PinLater.ServiceIface iface) throws InterruptedException {
    Preconditions.checkNotNull(queueName, "Queue was not specified.");
    final AtomicLong queriesIssued = new AtomicLong(0);
    final Semaphore permits = new Semaphore(concurrency);
    while (numQueries == -1 || queriesIssued.get() < numQueries) {
        final PinLaterEnqueueRequest request = new PinLaterEnqueueRequest();
        request.setQueueName(queueName);
        for (int i = 0; i < batchSize; i++) {
            PinLaterJob job = new PinLaterJob(
                    ByteBuffer.wrap(new String("task_" + random.nextInt(Integer.MAX_VALUE)).getBytes()));
            job.setPriority(priority);/*from  ww w .j  av  a 2s .c o  m*/
            request.addToJobs(job);
        }
        final long startTimeNanos = System.nanoTime();
        queriesIssued.incrementAndGet();
        permits.acquire();
        iface.enqueueJobs(REQUEST_CONTEXT, request)
                .respond(new Function<Try<PinLaterEnqueueResponse>, BoxedUnit>() {
                    @Override
                    public BoxedUnit apply(Try<PinLaterEnqueueResponse> responseTry) {
                        permits.release();
                        statsLogger
                                .requestComplete(Duration.fromNanoseconds(System.nanoTime() - startTimeNanos));
                        if (responseTry.isThrow()) {
                            LOG.info("Exception for request: " + request + " : " + ((Throw) responseTry).e());
                        }
                        return BoxedUnit.UNIT;
                    }
                });
    }
    permits.acquire(concurrency);
    LOG.info("Enqueue queries issued: " + queriesIssued);
}

From source file:com.pinterest.pinlater.client.PinLaterQueryIssuer.java

private void issueDequeueAckRequests(final PinLater.ServiceIface iface) throws InterruptedException {
    Preconditions.checkNotNull(queueName, "Queue was not specified.");
    final AtomicLong queriesIssued = new AtomicLong(0);
    final Semaphore permits = new Semaphore(concurrency);
    while (numQueries == -1 || queriesIssued.get() < numQueries) {
        final PinLaterDequeueRequest request = new PinLaterDequeueRequest();
        request.setQueueName(queueName);
        request.setLimit(batchSize);//  w  w w  .  jav  a2s .c  om
        final long startTimeNanos = System.nanoTime();
        queriesIssued.incrementAndGet();
        permits.acquire();
        iface.dequeueJobs(REQUEST_CONTEXT, request)
                .flatMap(new Function<PinLaterDequeueResponse, Future<Void>>() {
                    @Override
                    public Future<Void> apply(PinLaterDequeueResponse response) {
                        if (response.getJobsSize() == 0) {
                            return Future.Void();
                        }

                        PinLaterJobAckRequest jobAckRequest = new PinLaterJobAckRequest(queueName);
                        for (String job : response.getJobs().keySet()) {
                            if (random.nextInt(100) < dequeueSuccessPercent) {
                                jobAckRequest.addToJobsSucceeded(new PinLaterJobAckInfo(job));
                            } else {
                                jobAckRequest.addToJobsFailed(new PinLaterJobAckInfo(job));
                            }
                        }
                        return iface.ackDequeuedJobs(REQUEST_CONTEXT, jobAckRequest);
                    }
                }).respond(new Function<Try<Void>, BoxedUnit>() {
                    @Override
                    public BoxedUnit apply(Try<Void> voidTry) {
                        permits.release();
                        statsLogger
                                .requestComplete(Duration.fromNanoseconds(System.nanoTime() - startTimeNanos));
                        if (voidTry.isThrow()) {
                            LOG.info("Exception for request: " + request + " : " + ((Throw) voidTry).e());
                        }
                        return BoxedUnit.UNIT;
                    }
                });
    }
    permits.acquire(concurrency);
    LOG.info("Dequeue/ack queries issued: " + queriesIssued);
}

From source file:org.apache.activemq.store.kahadb.MultiKahaDBMessageStoreSizeStatTest.java

@Test(timeout = 60000)
public void testMessageSizeAfterRestartAndPublishMultiQueue() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();
    AtomicLong publishedMessageSize2 = new AtomicLong();

    Destination dest = publishTestQueueMessages(200, publishedMessageSize);

    // verify the count and size
    verifyStats(dest, 200, publishedMessageSize.get());
    assertTrue(broker.getPersistenceAdapter().size() > publishedMessageSize.get());

    Destination dest2 = publishTestQueueMessages(200, "test.queue2", publishedMessageSize2);

    // verify the count and size
    verifyStats(dest2, 200, publishedMessageSize2.get());
    assertTrue(/*from   ww w.  j  ava2  s . co m*/
            broker.getPersistenceAdapter().size() > publishedMessageSize.get() + publishedMessageSize2.get());

    // stop, restart broker and publish more messages
    stopBroker();
    this.setUpBroker(false);
    dest = publishTestQueueMessages(200, publishedMessageSize);
    dest2 = publishTestQueueMessages(200, "test.queue2", publishedMessageSize2);

    // verify the count and size after publishing messages
    verifyStats(dest, 400, publishedMessageSize.get());
    verifyStats(dest2, 400, publishedMessageSize2.get());

    assertTrue(
            broker.getPersistenceAdapter().size() > publishedMessageSize.get() + publishedMessageSize2.get());
    assertTrue(broker.getPersistenceAdapter()
            .size() >= (dest.getMessageStore().getMessageSize() + dest2.getMessageStore().getMessageSize()));

}

From source file:org.apache.hadoop.hbase.replication.regionserver.TestGlobalThrottler.java

@Test
public void testQuota() throws IOException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor table = new HTableDescriptor(tableName);
    HColumnDescriptor fam = new HColumnDescriptor(famName);
    fam.setScope(HConstants.REPLICATION_SCOPE_SERIAL);
    table.addFamily(fam);/*from w  ww  . jav a2 s  .  c om*/
    utility1.getAdmin().createTable(table);
    utility2.getAdmin().createTable(table);

    Thread watcher = new Thread(() -> {
        Replication replication = (Replication) utility1.getMiniHBaseCluster().getRegionServer(0)
                .getReplicationSourceService();
        AtomicLong bufferUsed = replication.getReplicationManager().getTotalBufferUsed();
        testQuotaPass = true;
        while (!Thread.interrupted()) {
            long size = bufferUsed.get();
            if (size > 0) {
                testQuotaNonZero = true;
            }
            if (size > 600) {
                // We read logs first then check throttler, so if the buffer quota limiter doesn't
                // take effect, it will push many logs and exceed the quota.
                testQuotaPass = false;
            }
            Threads.sleep(50);
        }
    });
    watcher.start();

    try (Table t1 = utility1.getConnection().getTable(tableName);
            Table t2 = utility2.getConnection().getTable(tableName)) {
        for (int i = 0; i < 50; i++) {
            Put put = new Put(ROWS[i]);
            put.addColumn(famName, VALUE, VALUE);
            t1.put(put);
        }
        long start = EnvironmentEdgeManager.currentTime();
        while (EnvironmentEdgeManager.currentTime() - start < 180000) {
            Scan scan = new Scan();
            scan.setCaching(50);
            int count = 0;
            try (ResultScanner results = t2.getScanner(scan)) {
                for (Result result : results) {
                    count++;
                }
            }
            if (count < 50) {
                LOG.info("Waiting all logs pushed to slave. Expected 50 , actual " + count);
                Threads.sleep(200);
                continue;
            }
            break;
        }
    }

    watcher.interrupt();
    Assert.assertTrue(testQuotaPass);
    Assert.assertTrue(testQuotaNonZero);
}

From source file:org.petalslink.dsb.federation.core.server.DefaultPropagationStrategy.java

/**
 * {@inheritDoc}/*from  w  ww  . ja va 2  s .co m*/
 */
public void lookup(EndpointQuery query, String clientId, String id) throws FederationException {
    if (logger.isDebugEnabled()) {
        logger.debug("Got lookup call from client '" + clientId + "'");
    }

    // FIXME : If there are not clients... What to do?
    // get all the connected clients
    Set<org.petalslink.dsb.federation.core.api.FederationClient> clients = this.federationServer.getClients();

    // store the number of requests (all minus the client)...
    AtomicLong counter = new AtomicLong(clients.size() - 1);

    if (counter.get() <= 0) {
        // one way call, if there are not enough client, we must call reply
        // ourselves
        this.submitEmptyReply(clientId, id);
    } else {
        // TODO : Define a global timeout !
        this.idClientMap.put(id, clientId);
        this.latches.put(id, counter);
        this.endpoints.put(id, new HashSet<ServiceEndpoint>());

        // dummy implementation, call all then aggregate all the results...
        for (org.petalslink.dsb.federation.core.api.FederationClient federationClient : clients) {
            // submit lookup in separate threads
            if (!clientId.equals(federationClient.getName())) {
                this.submitLookup(query, id, counter, federationClient);
            } else {
                if (logger.isInfoEnabled()) {
                    logger.info("Do not call the client which did the request");
                }
            }
        }
    }
}

From source file:org.apache.pulsar.client.impl.BinaryProtoLookupService.java

@Override
public CompletableFuture<List<String>> getTopicsUnderNamespace(NamespaceName namespace, Mode mode) {
    CompletableFuture<List<String>> topicsFuture = new CompletableFuture<List<String>>();

    AtomicLong opTimeoutMs = new AtomicLong(client.getConfiguration().getOperationTimeoutMs());
    Backoff backoff = new Backoff(100, TimeUnit.MILLISECONDS, opTimeoutMs.get() * 2, TimeUnit.MILLISECONDS, 0,
            TimeUnit.MILLISECONDS);
    getTopicsUnderNamespace(serviceNameResolver.resolveHost(), namespace, backoff, opTimeoutMs, topicsFuture,
            mode);/*from   w  w w.ja  v  a 2  s.  co m*/
    return topicsFuture;
}

From source file:de.ufinke.cubaja.sort.SortManager.java

private void initTimer(final Log logger, final String prefix, final String key, final AtomicLong counter) {

    TimerTask task = new TimerTask() {

        public void run() {

            logger.trace(prefix + text.get(key, counter.get()));
        }//from  w w w . j  a va  2 s .com
    };

    timer = new Timer();
    timer.schedule(task, logInterval, logInterval);
}

From source file:ubicrypt.core.Utils.java

public static Observable<Long> write(final Path fullPath, final InputStream inputStream) {
    return Observable.create(subscriber -> {
        try {/*from   w  w w . j a v  a2  s .c o  m*/
            final AtomicLong offset = new AtomicLong(0);
            final AsynchronousFileChannel afc = AsynchronousFileChannel.open(fullPath, StandardOpenOption.WRITE,
                    StandardOpenOption.CREATE);
            afc.lock(new Object(), new CompletionHandler<FileLock, Object>() {
                @Override
                public void completed(final FileLock lock, final Object attachment) {
                    //acquired lock
                    final byte[] buf = new byte[1 << 16];
                    try {
                        final int len = inputStream.read(buf);
                        if (len == -1) {
                            unsubscribe(subscriber, inputStream, lock);
                            return;
                        }
                        afc.write(ByteBuffer.wrap(Arrays.copyOfRange(buf, 0, len)), offset.get(), null,
                                new CompletionHandler<Integer, Object>() {
                                    @Override
                                    public void completed(final Integer result, final Object attachment) {
                                        //written chunk of bytes
                                        subscriber.onNext(offset.addAndGet(result));
                                        final byte[] buf = new byte[1 << 16];
                                        int len;
                                        try {
                                            len = inputStream.read(buf);
                                            if (len == -1) {
                                                unsubscribe(subscriber, inputStream, lock);
                                                log.debug("written:{}", fullPath);
                                                return;
                                            }
                                        } catch (final IOException e) {
                                            subscriber.onError(e);
                                            return;
                                        }
                                        if (len == -1) {
                                            unsubscribe(subscriber, inputStream, lock);
                                            log.debug("written:{}", fullPath);
                                            return;
                                        }
                                        afc.write(ByteBuffer.wrap(Arrays.copyOfRange(buf, 0, len)),
                                                offset.get(), null, this);
                                    }

                                    @Override
                                    public void failed(final Throwable exc, final Object attachment) {
                                        subscriber.onError(exc);
                                    }
                                });
                    } catch (final Exception e) {
                        close(inputStream, lock);
                        subscriber.onError(e);
                    }
                }

                @Override
                public void failed(final Throwable exc, final Object attachment) {
                    log.error("error on getting lock for:{}, error:{}", fullPath, exc.getMessage());
                    try {
                        inputStream.close();
                    } catch (final IOException e) {
                    }
                    subscriber.onError(exc);
                }
            });

        } catch (final Exception e) {
            log.error("error on file:{}", fullPath);
            subscriber.onError(e);
        }
    });
}

From source file:io.warp10.script.functions.URLFETCH.java

@Override
public Object apply(WarpScriptStack stack) throws WarpScriptException {

    if (!stack.isAuthenticated()) {
        throw new WarpScriptException(getName() + " requires the stack to be authenticated.");
    }//from  ww w. j  a va  2 s .  c  o m

    Object o = stack.pop();

    if (!(o instanceof String) && !(o instanceof List)) {
        throw new WarpScriptException(getName() + " expects a URL or list thereof on top of the stack.");
    }

    List<URL> urls = new ArrayList<URL>();

    try {
        if (o instanceof String) {
            urls.add(new URL(o.toString()));
        } else {
            for (Object oo : (List) o) {
                urls.add(new URL(oo.toString()));
            }
        }
    } catch (MalformedURLException mue) {
        throw new WarpScriptException(getName() + " encountered an invalid URL.");
    }

    //
    // Check URLs
    //

    for (URL url : urls) {
        if (!StandaloneWebCallService.checkURL(url)) {
            throw new WarpScriptException(getName() + " encountered an invalid URL '" + url + "'");
        }
    }

    //
    // Check that we do not exceed the maxurlfetch limit
    //

    AtomicLong urlfetchCount = (AtomicLong) stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_COUNT);
    AtomicLong urlfetchSize = (AtomicLong) stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_SIZE);

    if (urlfetchCount.get()
            + urls.size() > (long) stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_LIMIT)) {
        throw new WarpScriptException(getName() + " is limited to "
                + stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_LIMIT) + " calls.");
    }

    List<Object> results = new ArrayList<Object>();

    for (URL url : urls) {
        urlfetchCount.addAndGet(1);

        HttpURLConnection conn = null;

        try {
            conn = (HttpURLConnection) url.openConnection();
            conn.setDoInput(true);
            conn.setDoOutput(false);
            conn.setRequestMethod("GET");

            byte[] buf = new byte[8192];

            ByteArrayOutputStream baos = new ByteArrayOutputStream();

            InputStream in = conn.getInputStream();

            while (true) {
                int len = in.read(buf);

                if (len < 0) {
                    break;
                }

                if (urlfetchSize.get() + baos.size()
                        + len > (long) stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_MAXSIZE)) {
                    throw new WarpScriptException(getName()
                            + " would exceed maximum size of content which can be retrieved via URLFETCH ("
                            + stack.getAttribute(WarpScriptStack.ATTRIBUTE_URLFETCH_MAXSIZE) + " bytes)");
                }

                baos.write(buf, 0, len);
            }

            urlfetchSize.addAndGet(baos.size());

            List<Object> res = new ArrayList<Object>();

            res.add(conn.getResponseCode());
            Map<String, List<String>> hdrs = conn.getHeaderFields();

            if (hdrs.containsKey(null)) {
                List<String> statusMsg = hdrs.get(null);
                if (statusMsg.size() > 0) {
                    res.add(statusMsg.get(0));
                } else {
                    res.add("");
                }
            } else {
                res.add("");
            }
            hdrs.remove(null);
            res.add(hdrs);
            res.add(Base64.encodeBase64String(baos.toByteArray()));

            results.add(res);
        } catch (IOException ioe) {
            throw new WarpScriptException(getName() + " encountered an error while fetching '" + url + "'");
        } finally {
            if (null != conn) {
                conn.disconnect();
            }
        }
    }

    stack.push(results);

    return stack;
}