Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.hadoop.hdfs.TestAutoEditRollWhenAvatarFailover.java

/**
 * Test if we can get block locations after killing primary avatar,
 * failing over to standby avatar (making it the new primary),
 * restarting a new standby avatar, killing the new primary avatar and
 * failing over to the restarted standby.
 * /* w w w .j  av a2s . c om*/
 * Write logs for a while to make sure automatic rolling are triggered.
 */
@Test
public void testDoubleFailOverWithAutomaticRoll() throws Exception {
    setUp(false, "testDoubleFailOverWithAutomaticRoll");

    // To make sure it's never the case that both primary and standby
    // issue rolling, we use a injection handler. 
    final AtomicBoolean startKeepThread = new AtomicBoolean(true);
    final AtomicInteger countAutoRolled = new AtomicInteger(0);
    final AtomicBoolean needFail = new AtomicBoolean(false);
    final AtomicLong currentThreadId = new AtomicLong(-1);
    final Object waitFor10Rolls = new Object();
    InjectionHandler.set(new InjectionHandler() {
        @Override
        protected void _processEvent(InjectionEventI event, Object... args) {
            if (event == InjectionEvent.FSEDIT_AFTER_AUTOMATIC_ROLL) {
                countAutoRolled.incrementAndGet();
                if (countAutoRolled.get() >= 10) {
                    synchronized (waitFor10Rolls) {
                        waitFor10Rolls.notifyAll();
                    }
                }

                if (!startKeepThread.get()) {
                    currentThreadId.set(-1);
                } else if (currentThreadId.get() == -1) {
                    currentThreadId.set(Thread.currentThread().getId());
                } else if (currentThreadId.get() != Thread.currentThread().getId()) {
                    LOG.warn("[Thread " + Thread.currentThread().getId() + "] expected: " + currentThreadId);
                    needFail.set(true);
                }

                LOG.info("[Thread " + Thread.currentThread().getId() + "] finish automatic log rolling, count "
                        + countAutoRolled.get());

                // Increase the rolling time a little bit once after 7 auto rolls 
                if (countAutoRolled.get() % 7 == 3) {
                    DFSTestUtil.waitNMilliSecond(75);
                }
            }
        }
    });

    FileSystem fs = cluster.getFileSystem();

    // Add some transactions during a period of time before failing over.
    long startTime = System.currentTimeMillis();
    for (int i = 0; i < 100; i++) {
        fs.setTimes(new Path("/"), 0, 0);
        DFSTestUtil.waitNMilliSecond(100);
        if (i % 10 == 0) {
            LOG.info("================== executed " + i + " queries");
        }
        if (countAutoRolled.get() >= 10) {
            LOG.info("Automatic rolled 10 times.");
            long duration = System.currentTimeMillis() - startTime;
            TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs, which is too short",
                    duration > 4500);
            break;
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);

    // Tune the rolling timeout temporarily to avoid race conditions
    // only triggered in tests
    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(5000);
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(5000);

    LOG.info("================== killing primary 1");

    cluster.killPrimary();

    // Fail over and make sure after fail over, automatic edits roll still
    // will happen.
    countAutoRolled.set(0);
    startKeepThread.set(false);
    currentThreadId.set(-1);
    LOG.info("================== failing over 1");
    cluster.failOver();
    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);
    LOG.info("================== restarting standby");
    cluster.restartStandby();
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);
    LOG.info("================== Finish restarting standby");

    // Wait for automatic rolling happens if there is no new transaction.
    startKeepThread.set(true);

    startTime = System.currentTimeMillis();
    long waitDeadLine = startTime + 20000;
    synchronized (waitFor10Rolls) {
        while (System.currentTimeMillis() < waitDeadLine && countAutoRolled.get() < 10) {
            waitFor10Rolls.wait(waitDeadLine - System.currentTimeMillis());
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);
    long duration = System.currentTimeMillis() - startTime;
    TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs", duration > 9000);

    // failover back 
    countAutoRolled.set(0);
    startKeepThread.set(false);
    currentThreadId.set(-1);

    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(6000);
    cluster.getStandbyAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(6000);

    LOG.info("================== killing primary 2");
    cluster.killPrimary();
    LOG.info("================== failing over 2");
    cluster.failOver();

    cluster.getPrimaryAvatar(0).avatar.namesystem.getFSImage().getEditLog().setTimeoutRollEdits(1000);

    // Make sure after failover back, automatic rolling can still happen.
    startKeepThread.set(true);

    for (int i = 0; i < 100; i++) {
        fs.setTimes(new Path("/"), 0, 0);
        DFSTestUtil.waitNMilliSecond(200);
        if (i % 10 == 0) {
            LOG.info("================== executed " + i + " queries");
        }
        if (countAutoRolled.get() > 10) {
            LOG.info("Automatic rolled 10 times.");
            duration = System.currentTimeMillis() - startTime;
            TestCase.assertTrue("Automatic rolled 10 times in just " + duration + " msecs, which is too short",
                    duration > 9000);
            break;
        }
    }
    TestCase.assertTrue("Only " + countAutoRolled + " automatic rolls triggered, which is lower than expected.",
            countAutoRolled.get() >= 10);

    InjectionHandler.clear();

    if (needFail.get()) {
        TestCase.fail("Automatic rolling doesn't happen in the same thread when should.");
    }
}

From source file:eu.fthevenet.binjr.data.codec.CsvDecoder.java

@Override
public Map<TimeSeriesInfo<T>, TimeSeriesProcessor<T>> decode(InputStream in, List<TimeSeriesInfo<T>> seriesInfo)
        throws IOException, DecodingDataFromAdapterException {
    try (Profiler ignored = Profiler.start("Building time series from csv data", logger::trace)) {
        try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, encoding))) {
            CSVFormat csvFormat = CSVFormat.DEFAULT.withAllowMissingColumnNames(false).withFirstRecordAsHeader()
                    .withSkipHeaderRecord().withDelimiter(delimiter);
            Iterable<CSVRecord> records = csvFormat.parse(reader);
            Map<TimeSeriesInfo<T>, TimeSeriesProcessor<T>> series = new HashMap<>();
            final AtomicLong nbpoints = new AtomicLong(0);
            for (CSVRecord csvRecord : records) {
                nbpoints.incrementAndGet();
                ZonedDateTime timeStamp = dateParser.apply(csvRecord.get(0));
                for (TimeSeriesInfo<T> info : seriesInfo) {
                    T val = numberParser.apply(csvRecord.get(info.getBinding().getLabel()));
                    XYChart.Data<ZonedDateTime, T> point = new XYChart.Data<>(timeStamp, val);
                    TimeSeriesProcessor<T> l = series.computeIfAbsent(info, k -> timeSeriesFactory.create());
                    l.addSample(point);/* w ww .j  a  v a2  s .  c  o  m*/
                }
            }
            logger.trace(() -> String.format("Built %d series with %d samples each (%d total samples)",
                    seriesInfo.size(), nbpoints.get(), seriesInfo.size() * nbpoints.get()));
            return series;
        }
    }
}

From source file:com.ning.arecibo.collector.persistent.TestTimelineAggregator.java

private void checkSamplesForATimeline(final Integer startTimeMinutesAgo, final Integer endTimeMinutesAgo,
        final long expectedChunks) throws InterruptedException {
    final AtomicLong timelineChunkSeen = new AtomicLong(0);

    timelineDAO.getSamplesByHostIdsAndSampleKindIds(ImmutableList.<Integer>of(hostId),
            ImmutableList.<Integer>of(minHeapUsedKindId, maxHeapUsedKindId),
            START_TIME.minusMinutes(startTimeMinutesAgo), START_TIME.minusMinutes(endTimeMinutesAgo),
            new TimelineChunkConsumer() {

                @Override/*from w ww . j  a va2  s  .c  o m*/
                public void processTimelineChunk(final TimelineChunk chunk) {
                    Assert.assertEquals((Integer) chunk.getHostId(), hostId);
                    Assert.assertTrue(chunk.getSampleKindId() == minHeapUsedKindId
                            || chunk.getSampleKindId() == maxHeapUsedKindId);
                    timelineChunkSeen.incrementAndGet();
                }
            });

    Assert.assertEquals(timelineChunkSeen.get(), expectedChunks);
}

From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java

/**
 * Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, as well as
 * retrieving existing attributes./*  ww  w.  j  a v  a  2  s . co m*/
 */
@Test
public void testGetOrAssignStreamSegmentId() {
    final int segmentCount = 10;
    final int transactionsPerSegment = 5;
    final long noSegmentId = ContainerMetadata.NO_STREAM_SEGMENT_ID;
    AtomicLong currentSegmentId = new AtomicLong(Integer.MAX_VALUE);
    Supplier<Long> nextSegmentId = () -> currentSegmentId.decrementAndGet() % 2 == 0 ? noSegmentId
            : currentSegmentId.get();

    @Cleanup
    TestContext context = new TestContext();

    HashSet<String> storageSegments = new HashSet<>();
    for (int i = 0; i < segmentCount; i++) {
        String segmentName = getName(i);
        storageSegments.add(segmentName);
        setAttributes(segmentName, nextSegmentId.get(), storageSegments.size() % ATTRIBUTE_COUNT, context);

        for (int j = 0; j < transactionsPerSegment; j++) {
            // There is a small chance of a name conflict here, but we don't care. As long as we get at least one
            // Transaction per segment, we should be fine.
            String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName,
                    UUID.randomUUID());
            storageSegments.add(transactionName);
            setAttributes(transactionName, nextSegmentId.get(), storageSegments.size() % ATTRIBUTE_COUNT,
                    context);
        }
    }

    // We setup all necessary handlers, except the one for create. We do not need to create new Segments here.
    setupOperationLog(context);
    Predicate<String> isSealed = segmentName -> segmentName.hashCode() % 2 == 0;
    Function<String, Long> getInitialLength = segmentName -> (long) Math.abs(segmentName.hashCode());
    setupStorageGetHandler(context, storageSegments, segmentName -> new StreamSegmentInformation(segmentName,
            getInitialLength.apply(segmentName), isSealed.test(segmentName), false, new ImmutableDate()));

    // First, map all the parents (stand-alone segments).
    for (String name : storageSegments) {
        if (StreamSegmentNameUtils.getParentStreamSegmentName(name) == null) {
            long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
            Assert.assertNotEquals("No id was assigned for StreamSegment " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
            SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
            Assert.assertNotNull("No metadata was created for StreamSegment " + name, sm);
            long expectedLength = getInitialLength.apply(name);
            boolean expectedSeal = isSealed.test(name);
            Assert.assertEquals("Metadata does not have the expected length for StreamSegment " + name,
                    expectedLength, sm.getDurableLogLength());
            Assert.assertEquals(
                    "Metadata does not have the expected value for isSealed for StreamSegment " + name,
                    expectedSeal, sm.isSealed());

            val segmentState = context.stateStore.get(name, TIMEOUT).join();
            Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
            SegmentMetadataComparer.assertSameAttributes(
                    "Unexpected attributes in metadata for StreamSegment " + name, expectedAttributes, sm);
        }
    }

    // Now, map all the Transactions.
    for (String name : storageSegments) {
        String parentName = StreamSegmentNameUtils.getParentStreamSegmentName(name);
        if (parentName != null) {
            long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
            Assert.assertNotEquals("No id was assigned for Transaction " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
            SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
            Assert.assertNotNull("No metadata was created for Transaction " + name, sm);
            long expectedLength = getInitialLength.apply(name);
            boolean expectedSeal = isSealed.test(name);
            Assert.assertEquals("Metadata does not have the expected length for Transaction " + name,
                    expectedLength, sm.getDurableLogLength());
            Assert.assertEquals(
                    "Metadata does not have the expected value for isSealed for Transaction " + name,
                    expectedSeal, sm.isSealed());

            val segmentState = context.stateStore.get(name, TIMEOUT).join();
            Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
            SegmentMetadataComparer.assertSameAttributes(
                    "Unexpected attributes in metadata for Transaction " + name, expectedAttributes, sm);

            // Check parenthood.
            Assert.assertNotEquals("No parent defined in metadata for Transaction " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, sm.getParentId());
            long parentId = context.metadata.getStreamSegmentId(parentName, false);
            Assert.assertEquals("Unexpected parent defined in metadata for Transaction " + name, parentId,
                    sm.getParentId());
        }
    }
}

From source file:org.apache.hadoop.ipc.DecayRpcScheduler.java

/**
 * Decay the stored counts for each user and clean as necessary.
 * This method should be called periodically in order to keep
 * counts current.//  w  w  w.j a v  a2s . c o  m
 */
private void decayCurrentCounts() {
    try {
        long totalDecayedCount = 0;
        long totalRawCount = 0;
        Iterator<Map.Entry<Object, List<AtomicLong>>> it = callCounts.entrySet().iterator();

        while (it.hasNext()) {
            Map.Entry<Object, List<AtomicLong>> entry = it.next();
            AtomicLong decayedCount = entry.getValue().get(0);
            AtomicLong rawCount = entry.getValue().get(1);

            // Compute the next value by reducing it by the decayFactor
            totalRawCount += rawCount.get();
            long currentValue = decayedCount.get();
            long nextValue = (long) (currentValue * decayFactor);
            totalDecayedCount += nextValue;
            decayedCount.set(nextValue);

            if (nextValue == 0) {
                // We will clean up unused keys here. An interesting optimization
                // might be to have an upper bound on keyspace in callCounts and only
                // clean once we pass it.
                it.remove();
            }
        }

        // Update the total so that we remain in sync
        totalDecayedCallCount.set(totalDecayedCount);
        totalRawCallCount.set(totalRawCount);

        // Now refresh the cache of scheduling decisions
        recomputeScheduleCache();

        // Update average response time with decay
        updateAverageResponseTime(true);
    } catch (Exception ex) {
        LOG.error("decayCurrentCounts exception: " + ExceptionUtils.getFullStackTrace(ex));
        throw ex;
    }
}

From source file:org.apache.hadoop.hbase.client.TestClientPushback.java

@Test(timeout = 60000)
public void testClientTracksServerPushback() throws Exception {
    Configuration conf = UTIL.getConfiguration();
    ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf);
    HTable table = (HTable) conn.getTable(tableName);

    HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0);
    Region region = rs.getOnlineRegions(tableName).get(0);

    LOG.debug("Writing some data to " + tableName);
    // write some data
    Put p = new Put(Bytes.toBytes("row"));
    p.addColumn(family, qualifier, Bytes.toBytes("value1"));
    table.put(p);/*from www  .j a  va 2 s  .c  om*/

    // get the current load on RS. Hopefully memstore isn't flushed since we wrote the the data
    int load = (int) ((((HRegion) region).addAndGetGlobalMemstoreSize(0) * 100) / flushSizeBytes);
    LOG.debug("Done writing some data to " + tableName);

    // get the stats for the region hosting our table
    ClientBackoffPolicy backoffPolicy = conn.getBackoffPolicy();
    assertTrue("Backoff policy is not correctly configured",
            backoffPolicy instanceof ExponentialClientBackoffPolicy);

    ServerStatisticTracker stats = conn.getStatisticsTracker();
    assertNotNull("No stats configured for the client!", stats);
    // get the names so we can query the stats
    ServerName server = rs.getServerName();
    byte[] regionName = region.getRegionInfo().getRegionName();

    // check to see we found some load on the memstore
    ServerStatistics serverStats = stats.getServerStatsForTesting(server);
    ServerStatistics.RegionStatistics regionStats = serverStats.getStatsForRegion(regionName);
    assertEquals("We did not find some load on the memstore", load, regionStats.getMemstoreLoadPercent());

    // check that the load reported produces a nonzero delay
    long backoffTime = backoffPolicy.getBackoffTime(server, regionName, serverStats);
    assertNotEquals("Reported load does not produce a backoff", backoffTime, 0);
    LOG.debug("Backoff calculated for " + region.getRegionInfo().getRegionNameAsString() + " @ " + server
            + " is " + backoffTime);

    // Reach into the connection and submit work directly to AsyncProcess so we can
    // monitor how long the submission was delayed via a callback
    List<Row> ops = new ArrayList<Row>(1);
    ops.add(p);
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicLong endTime = new AtomicLong();
    long startTime = EnvironmentEdgeManager.currentTime();

    table.mutator.ap.submit(tableName, ops, true, new Batch.Callback<Result>() {
        @Override
        public void update(byte[] region, byte[] row, Result result) {
            endTime.set(EnvironmentEdgeManager.currentTime());
            latch.countDown();
        }
    }, true);
    // Currently the ExponentialClientBackoffPolicy under these test conditions
    // produces a backoffTime of 151 milliseconds. This is long enough so the
    // wait and related checks below are reasonable. Revisit if the backoff
    // time reported by above debug logging has significantly deviated.
    latch.await(backoffTime * 2, TimeUnit.MILLISECONDS);
    assertNotEquals("AsyncProcess did not submit the work time", endTime.get(), 0);
    assertTrue("AsyncProcess did not delay long enough", endTime.get() - startTime >= backoffTime);
}

From source file:be.dataminded.nifi.plugins.ExecuteOracleSQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = null;/* w w w .j a v a 2s  . c  o m*/
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
    final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();
    final Integer fetchSize = context.getProperty(FETCH_SIZE).asInteger();

    final StopWatch stopWatch = new StopWatch(true);
    final String selectQuery;
    if (context.getProperty(SQL_SELECT_QUERY).isSet()) {
        selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, Charset.defaultCharset())));
        selectQuery = queryContents.toString();
    }

    try (final Connection con = dbcpService.getConnection(); final Statement st = con.createStatement()) {
        st.setQueryTimeout(queryTimeout); // timeout in seconds
        st.setFetchSize(fetchSize); // hint fetch size
        final AtomicLong nrOfRows = new AtomicLong(0L);
        if (fileToProcess == null) {
            fileToProcess = session.create();
        }
        fileToProcess = session.write(fileToProcess, out -> {
            try {
                logger.debug("Executing query {}", new Object[] { selectQuery });
                final ResultSet resultSet = st.executeQuery(selectQuery);
                nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, convertNamesForAvro));
            } catch (final SQLException e) {
                throw new ProcessException(e);
            }
        });

        // set attribute how many rows were selected
        fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

        logger.info("{} contains {} Avro records; transferring to 'success'",
                new Object[] { fileToProcess, nrOfRows.get() });
        session.getProvenanceReporter().modifyContent(fileToProcess, "Retrieved " + nrOfRows.get() + " rows",
                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(fileToProcess, SUCCESS);
    } catch (final ProcessException | SQLException e) {
        if (fileToProcess == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { selectQuery, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure",
                        new Object[] { selectQuery, fileToProcess, e });
                fileToProcess = session.penalize(fileToProcess);
            } else {
                logger.error("Unable to execute SQL select query {} due to {}; routing to failure",
                        new Object[] { selectQuery, e });
                context.yield();
            }
            session.transfer(fileToProcess, FAILURE);
        }
    }
}

From source file:org.lol.reddit.reddit.api.RedditAPIIndividualSubredditDataRequester.java

public void performRequest(final Collection<String> subredditCanonicalIds, final TimestampBound timestampBound,
        final RequestResponseHandler<HashMap<String, RedditSubreddit>, SubredditRequestFailure> handler) {

    // TODO if there's a bulk API to do this, that would be good... :)

    final HashMap<String, RedditSubreddit> result = new HashMap<String, RedditSubreddit>();
    final AtomicBoolean stillOkay = new AtomicBoolean(true);
    final AtomicInteger requestsToGo = new AtomicInteger(subredditCanonicalIds.size());
    final AtomicLong oldestResult = new AtomicLong(Long.MAX_VALUE);

    final RequestResponseHandler<RedditSubreddit, SubredditRequestFailure> innerHandler = new RequestResponseHandler<RedditSubreddit, SubredditRequestFailure>() {
        @Override/*from  w ww  .  ja  v  a2s .com*/
        public void onRequestFailed(SubredditRequestFailure failureReason) {
            synchronized (result) {
                if (stillOkay.get()) {
                    stillOkay.set(false);
                    handler.onRequestFailed(failureReason);
                }
            }
        }

        @Override
        public void onRequestSuccess(RedditSubreddit innerResult, long timeCached) {
            synchronized (result) {
                if (stillOkay.get()) {

                    result.put(innerResult.getKey(), innerResult);
                    oldestResult.set(Math.min(oldestResult.get(), timeCached));

                    if (requestsToGo.decrementAndGet() == 0) {
                        handler.onRequestSuccess(result, oldestResult.get());
                    }
                }
            }
        }
    };

    for (String subredditCanonicalId : subredditCanonicalIds) {
        performRequest(subredditCanonicalId, timestampBound, innerHandler);
    }
}

From source file:org.lendingclub.mercator.aws.ELBScanner.java

protected void mapElbToInstance(JsonNode instances, String elbArn, String region) {

    AtomicLong oldestRelationshipTs = new AtomicLong(Long.MAX_VALUE);
    for (JsonNode i : instances) {

        String instanceName = i.path("instanceId").asText();
        String instanceArn = String.format("arn:aws:ec2:%s:%s:instance/%s", region, getAccountId(),
                instanceName);//from w w  w  .ja  v a 2  s .com
        // logger.info("{} instanceArn: {}",elbArn,instanceArn);
        String cypher = "match (x:AwsElb {aws_arn:{elbArn}}), (y:AwsEc2Instance {aws_arn:{instanceArn}}) "
                + "merge (x)-[r:DISTRIBUTES_TRAFFIC_TO]->(y) set r.updateTs=timestamp() return x,r,y";
        getNeoRxClient().execCypher(cypher, "elbArn", elbArn, "instanceArn", instanceArn).forEach(r -> {
            oldestRelationshipTs
                    .set(Math.min(r.path("r").path("updateTs").asLong(), oldestRelationshipTs.get()));
        });

        if (oldestRelationshipTs.get() > 0 && oldestRelationshipTs.get() < Long.MAX_VALUE) {
            cypher = "match (x:AwsElb {aws_arn:{elbArn}})-[r:DISTRIBUTES_TRAFFIC_TO]-(y:AwsEc2Instance) where r.updateTs<{oldest}  delete r";
            getNeoRxClient().execCypher(cypher, "elbArn", elbArn, "oldest", oldestRelationshipTs.get());
        }
    }
}

From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java

@Test
public void testDeliveringStats() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    Connection connection = cf.createConnection();
    connection.start();/*from ww  w  .  j a  va2s. c  o  m*/
    Session session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE);
    MessageProducer producer = session.createProducer(session.createQueue(defaultQueueName));
    producer.send(session.createTextMessage("test"));

    verifyPendingStats(defaultQueueName, 1, publishedMessageSize.get());
    verifyPendingDurableStats(defaultQueueName, 1, publishedMessageSize.get());
    verifyDeliveringStats(defaultQueueName, 0, 0);

    MessageConsumer consumer = session.createConsumer(session.createQueue(defaultQueueName));
    Message msg = consumer.receive();
    verifyDeliveringStats(defaultQueueName, 1, publishedMessageSize.get());
    msg.acknowledge();

    verifyPendingStats(defaultQueueName, 0, 0);
    verifyPendingDurableStats(defaultQueueName, 0, 0);
    verifyDeliveringStats(defaultQueueName, 0, 0);

    connection.close();
}