Example usage for java.util.concurrent ConcurrentMap entrySet

List of usage examples for java.util.concurrent ConcurrentMap entrySet

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentMap entrySet.

Prototype

Set<Map.Entry<K, V>> entrySet();

Source Link

Document

Returns a Set view of the mappings contained in this map.

Usage

From source file:org.opendaylight.ovsdb.plugin.impl.ConfigurationServiceImpl.java

private UUID getCurrentControllerUuid(Node node, final String controllerTableName, final String target) {
    ConcurrentMap<String, Row> rows = this.getRows(node, controllerTableName);

    if (rows != null) {
        for (Map.Entry<String, Row> entry : rows.entrySet()) {
            Controller currController = this.getTypedRow(node, Controller.class, entry.getValue());
            Column<GenericTableSchema, String> column = currController.getTargetColumn();
            String currTarget = column.getData();
            if (currTarget != null && currTarget.equalsIgnoreCase(target)) {
                return currController.getUuid();
            }//from w  w  w.  j ava 2 s . co  m
        }
    }
    return null;
}

From source file:io.druid.client.cache.MemcachedCache.java

public static MemcachedCache create(final MemcachedCacheConfig config) {
    final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>();
    final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>();
    final AbstractMonitor monitor = new AbstractMonitor() {
        final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>(
                new HashMap<String, Long>());

        @Override/*from w ww . ja v  a 2s. c om*/
        public boolean doMonitor(ServiceEmitter emitter) {
            final Map<String, Long> priorValues = this.priorValues.get();
            final Map<String, Long> currentValues = getCurrentValues();
            final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder();
            for (Map.Entry<String, Long> entry : currentValues.entrySet()) {
                emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                        .build("query/cache/memcached/total", entry.getValue()));
                final Long prior = priorValues.get(entry.getKey());
                if (prior != null) {
                    emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                            .build("query/cache/memcached/delta", entry.getValue() - prior));
                }
            }

            if (!this.priorValues.compareAndSet(priorValues, currentValues)) {
                log.error("Prior value changed while I was reporting! updating anyways");
                this.priorValues.set(currentValues);
            }
            return true;
        }

        private Map<String, Long> getCurrentValues() {
            final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
            for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            return builder.build();
        }
    };
    try {
        LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize());

        // always use compression
        transcoder.setCompressionThreshold(0);

        OperationQueueFactory opQueueFactory;
        long maxQueueBytes = config.getMaxOperationQueueSize();
        if (maxQueueBytes > 0) {
            opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes);
        } else {
            opQueueFactory = new LinkedOperationQueueFactory();
        }

        final Predicate<String> interesting = new Predicate<String>() {
            // See net.spy.memcached.MemcachedConnection.registerMetrics()
            private final Set<String> interestingMetrics = ImmutableSet.of(
                    "[MEM] Reconnecting Nodes (ReconnectQueue)",
                    //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted
                    "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write",
                    "[MEM] Average Bytes read from OS per read",
                    "[MEM] Average Time on wire for operations (s)",
                    "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry",
                    "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success");

            @Override
            public boolean apply(@Nullable String input) {
                return input != null && interestingMetrics.contains(input);
            }
        };

        final MetricCollector metricCollector = new MetricCollector() {
            @Override
            public void addCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                counters.putIfAbsent(name, new AtomicLong(0L));

                if (log.isDebugEnabled()) {
                    log.debug("Add Counter [%s]", name);
                }
            }

            @Override
            public void removeCounter(String name) {
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.addAndGet(amount);

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s] %d", name, amount);
                }
            }

            @Override
            public void decrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.decrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s]", name);
                }
            }

            @Override
            public void decrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0L));
                    counter = counters.get(name);
                }
                counter.addAndGet(-amount);

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s] %d", name, amount);
                }
            }

            @Override
            public void addMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                meters.putIfAbsent(name, new AtomicLong(0L));
                if (log.isDebugEnabled()) {
                    log.debug("Adding meter [%s]", name);
                }
            }

            @Override
            public void removeMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove meter [%s]", name);
                }
            }

            @Override
            public void markMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong meter = meters.get(name);
                if (meter == null) {
                    meters.putIfAbsent(name, new AtomicLong(0L));
                    meter = meters.get(name);
                }
                meter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment counter [%s]", name);
                }
            }

            @Override
            public void addHistogram(String name) {
                log.debug("Ignoring add histogram [%s]", name);
            }

            @Override
            public void removeHistogram(String name) {
                log.debug("Ignoring remove histogram [%s]", name);
            }

            @Override
            public void updateHistogram(String name, int amount) {
                log.debug("Ignoring update histogram [%s]: %d", name, amount);
            }
        };

        final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder()
                // 1000 repetitions gives us good distribution with murmur3_128
                // (approx < 5% difference in counts across nodes, with 5 cache nodes)
                .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128)
                .setProtocol(ConnectionFactoryBuilder.Protocol.BINARY)
                .setLocatorType(ConnectionFactoryBuilder.Locator.CONSISTENT).setDaemon(true)
                .setFailureMode(FailureMode.Cancel).setTranscoder(transcoder).setShouldOptimize(true)
                .setOpQueueMaxBlockTime(config.getTimeout()).setOpTimeout(config.getTimeout())
                .setReadBufferSize(config.getReadBufferSize()).setOpQueueFactory(opQueueFactory)
                .setMetricCollector(metricCollector).setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds
                .build();

        final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts());

        final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier;

        if (config.getNumConnections() > 1) {
            clientSupplier = new LoadBalancingPool<MemcachedClientIF>(config.getNumConnections(),
                    new Supplier<MemcachedClientIF>() {
                        @Override
                        public MemcachedClientIF get() {
                            try {
                                return new MemcachedClient(connectionFactory, hosts);
                            } catch (IOException e) {
                                log.error(e, "Unable to create memcached client");
                                throw Throwables.propagate(e);
                            }
                        }
                    });
        } else {
            clientSupplier = Suppliers.<ResourceHolder<MemcachedClientIF>>ofInstance(StupidResourceHolder
                    .<MemcachedClientIF>create(new MemcachedClient(connectionFactory, hosts)));
        }

        return new MemcachedCache(clientSupplier, config, monitor);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.github.podd.example.ExamplePoddClient.java

public Map<Path, String> uploadToStorage(final List<Path> bagsToUpload, final String sshServerFingerprint,
        final String sshHost, final int portNo, final String username, final Path pathToPublicKey,
        final Path localRootPath, final Path remoteRootPath, final PasswordFinder keyExtractor)
        throws PoddClientException, NoSuchAlgorithmException, IOException {
    final Map<Path, String> results = new ConcurrentHashMap<>();

    final ConcurrentMap<Path, ConcurrentMap<PoddDigestUtils.Algorithm, String>> digests = PoddDigestUtils
            .getDigests(bagsToUpload);//from  w  w w  .  j a v a  2  s. c  om

    try (SSHClient sshClient = new SSHClient(ExamplePoddClient.DEFAULT_CONFIG);) {
        sshClient.useCompression();
        sshClient.addHostKeyVerifier(sshServerFingerprint);
        sshClient.connect(sshHost, portNo);
        if (!Files.exists(pathToPublicKey)) {
            throw new PoddClientException("Could not find public key: " + pathToPublicKey);
        }
        if (!SecurityUtils.isBouncyCastleRegistered()) {
            throw new PoddClientException("Bouncy castle needed");
        }
        final FileKeyProvider rsa = new PKCS8KeyFile();
        rsa.init(pathToPublicKey.toFile(), keyExtractor);
        sshClient.authPublickey(username, rsa);
        // Session session = sshClient.startSession();
        try (SFTPClient sftp = sshClient.newSFTPClient();) {
            for (final Path nextBag : bagsToUpload) {
                // Check to make sure that the bag was under the local root path
                final Path localPath = nextBag.toAbsolutePath();
                if (!localPath.startsWith(localRootPath)) {
                    this.log.error(
                            "Local bag path was not a direct descendant of the local root path: {} {} {}",
                            localRootPath, nextBag, localPath);
                    throw new PoddClientException(
                            "Local bag path was not a direct descendant of the local root path: " + localPath
                                    + " " + localRootPath);
                }

                // Take the local root path out to get the subpath to use on the remote
                final Path remoteSubPath = localPath.subpath(localRootPath.getNameCount(),
                        nextBag.getNameCount() - 1);

                this.log.info("Remote sub path: {}", remoteSubPath);

                final Path remoteDirPath = remoteRootPath.resolve(remoteSubPath);
                this.log.info("Remote dir path: {}", remoteDirPath);

                final Path remoteBagPath = remoteDirPath.resolve(nextBag.getFileName());

                this.log.info("Remote bag path: {}", remoteBagPath);

                boolean fileFound = false;
                boolean sizeCorrect = false;
                try {
                    // check details of a remote bag
                    final FileAttributes attribs = sftp.lstat(remoteBagPath.toAbsolutePath().toString());
                    final long localSize = Files.size(nextBag);
                    final long remoteSize = attribs.getSize();

                    if (localSize <= 0) {
                        this.log.error("Local bag was empty: {}", nextBag);
                        sizeCorrect = false;
                        fileFound = false;
                    } else if (remoteSize <= 0) {
                        this.log.warn("Remote bag was empty: {} {}", nextBag, attribs);
                        sizeCorrect = false;
                        fileFound = false;
                    } else if (localSize == remoteSize) {
                        this.log.info("Found file on remote already with same size as local: {} {}", nextBag,
                                remoteBagPath);
                        sizeCorrect = true;
                        fileFound = true;
                    } else {
                        sizeCorrect = false;
                        fileFound = true;
                        // We always assume that a non-zero local file is correct
                        // The bags contain time-stamps that will be modified when they are
                        // regenerated, likely changing the file-size, and hopefully changing
                        // the digest checksums
                        // throw new PoddClientException(
                        // "Could not automatically compare file sizes (need manual intervention to delete one) : "
                        // + nextBag + " " + remoteBagPath + " localSize=" + localSize
                        // + " remoteSize=" + remoteSize);
                    }
                } catch (final IOException e) {
                    // lstat() throws an IOException if the file does not exist
                    // Ignore
                    sizeCorrect = false;
                    fileFound = false;
                }

                final ConcurrentMap<Algorithm, String> bagDigests = digests.get(nextBag);
                if (bagDigests.isEmpty()) {
                    this.log.error("No bag digests were generated for bag: {}", nextBag);
                }
                for (final Entry<Algorithm, String> entry : bagDigests.entrySet()) {
                    final Path localDigestPath = localPath
                            .resolveSibling(localPath.getFileName() + entry.getKey().getExtension());
                    // Create the local digest file
                    Files.copy(
                            new ReaderInputStream(new StringReader(entry.getValue()), StandardCharsets.UTF_8),
                            localDigestPath);
                    final Path remoteDigestPath = remoteBagPath
                            .resolveSibling(remoteBagPath.getFileName() + entry.getKey().getExtension());
                    boolean nextDigestFileFound = false;
                    boolean nextDigestCorrect = false;
                    try {
                        final Path tempFile = Files.createTempFile("podd-digest-",
                                entry.getKey().getExtension());
                        final SFTPFileTransfer sftpFileTransfer = new SFTPFileTransfer(sftp.getSFTPEngine());
                        sftpFileTransfer.download(remoteBagPath.toAbsolutePath().toString(),
                                tempFile.toAbsolutePath().toString());
                        nextDigestFileFound = true;

                        final List<String> allLines = Files.readAllLines(tempFile, StandardCharsets.UTF_8);
                        if (allLines.isEmpty()) {
                            nextDigestCorrect = false;
                        } else if (allLines.size() > 1) {
                            nextDigestCorrect = false;
                        }
                        // Check if the digests match exactly
                        else if (allLines.get(0).equals(entry.getValue())) {
                            nextDigestCorrect = true;
                        } else {
                            nextDigestCorrect = false;
                        }
                    } catch (final IOException e) {
                        nextDigestFileFound = false;
                        nextDigestCorrect = false;
                    }
                    if (nextDigestFileFound && nextDigestCorrect) {
                        this.log.info(
                                "Not copying digest to remote as it exists and contains the same content as the local digest");
                    } else if (nextDigestFileFound && !nextDigestCorrect) {
                        this.log.error("Found remote digest but content was not correct: {} {}",
                                localDigestPath, remoteDigestPath);
                        sftp.rm(remoteDigestPath.toString());
                        this.log.info("Copying digest to remote: {}", remoteDigestPath);
                        sftp.put(new FileSystemFile(localDigestPath.toString()), remoteDigestPath.toString());
                    } else if (!nextDigestFileFound) {
                        this.log.info("About to make directories on remote: {}", remoteDirPath);
                        sftp.mkdirs(remoteDirPath.toString());
                        this.log.info("Copying digest to remote: {}", remoteDigestPath);
                        sftp.put(new FileSystemFile(localDigestPath.toString()), remoteDigestPath.toString());
                    }
                }

                if (fileFound && sizeCorrect) {
                    this.log.info("Not copying bag to remote as it exists and is the same size as local bag");
                } else if (fileFound && !sizeCorrect) {
                    this.log.error("Found remote bag but size was not correct: {} {}", nextBag, remoteBagPath);
                    sftp.rm(remoteBagPath.toString());
                    this.log.info("Copying bag to remote: {}", remoteBagPath);
                    sftp.put(new FileSystemFile(localPath.toString()), remoteBagPath.toString());
                } else if (!fileFound) {
                    this.log.info("About to make directories on remote: {}", remoteDirPath);
                    sftp.mkdirs(remoteDirPath.toString());
                    this.log.info("Copying bag to remote: {}", remoteBagPath);
                    sftp.put(new FileSystemFile(localPath.toString()), remoteBagPath.toString());
                }

            }
        }
    } catch (final IOException e) {
        throw new PoddClientException("Could not copy a bag to the remote location", e);
    }

    return results;
}

From source file:com.weibo.api.motan.util.StatsUtil.java

public static void logAccessStatistic(boolean clear) {
    DecimalFormat mbFormat = new DecimalFormat("#0.00");
    long currentTimeMillis = System.currentTimeMillis();

    ConcurrentMap<String, AccessStatisticResult> totalResults = new ConcurrentHashMap<String, AccessStatisticResult>();

    for (Map.Entry<String, AccessStatisticItem> entry : accessStatistics.entrySet()) {
        AccessStatisticItem item = entry.getValue();

        AccessStatisticResult result = item.getStatisticResult(currentTimeMillis,
                MotanConstants.STATISTIC_PEROID);

        if (clear) {
            item.clearStatistic(currentTimeMillis, MotanConstants.STATISTIC_PEROID);
        }//w w  w  .  ja  v a 2  s  . c  om

        String key = entry.getKey();
        String[] keys = key.split(SEPARATE);
        if (keys.length != 3) {
            continue;
        }
        String application = keys[1];
        String module = keys[2];
        key = application + "|" + module;
        AccessStatisticResult appResult = totalResults.get(key);
        if (appResult == null) {
            totalResults.putIfAbsent(key, new AccessStatisticResult());
            appResult = totalResults.get(key);
        }

        appResult.totalCount += result.totalCount;
        appResult.bizExceptionCount += result.bizExceptionCount;
        appResult.slowCount += result.slowCount;
        appResult.costTime += result.costTime;
        appResult.bizTime += result.bizTime;
        appResult.otherExceptionCount += result.otherExceptionCount;

        Snapshot snapshot = InternalMetricsFactory.getRegistryInstance(entry.getKey()).histogram(HISTOGRAM_NAME)
                .getSnapshot();

        if (application.equals(APPLICATION_STATISTIC)) {
            continue;
        }
        if (result.totalCount == 0) {
            LoggerUtil.accessStatsLog("[motan-accessStatistic] app: " + application + " module: " + module
                    + " item: " + keys[0]
                    + " total_count: 0 slow_count: 0 biz_excp: 0 other_excp: 0 avg_time: 0.00ms biz_time: 0.00ms avg_tps: 0 max_tps: 0 min_tps: 0");
        } else {
            LoggerUtil.accessStatsLog(
                    "[motan-accessStatistic] app: {} module: {} item: {} total_count: {} slow_count: {} p75: {} p95: {} p98: {} p99: {} p999: {} biz_excp: {} other_excp: {} avg_time: {}ms biz_time: {}ms avg_tps: {} max_tps: {} min_tps: {} ",
                    application, module, keys[0], result.totalCount, result.slowCount,
                    mbFormat.format(snapshot.get75thPercentile()),
                    mbFormat.format(snapshot.get95thPercentile()),
                    mbFormat.format(snapshot.get98thPercentile()),
                    mbFormat.format(snapshot.get99thPercentile()),
                    mbFormat.format(snapshot.get999thPercentile()), result.bizExceptionCount,
                    result.otherExceptionCount, mbFormat.format(result.costTime / result.totalCount),
                    mbFormat.format(result.bizTime / result.totalCount),
                    (result.totalCount / MotanConstants.STATISTIC_PEROID), result.maxCount, result.minCount);
        }

    }

    if (!totalResults.isEmpty()) {
        for (Map.Entry<String, AccessStatisticResult> entry : totalResults.entrySet()) {
            String application = entry.getKey().split(SEPARATE)[0];
            String module = entry.getKey().split(SEPARATE)[1];
            AccessStatisticResult totalResult = entry.getValue();
            Snapshot snapshot = InternalMetricsFactory.getRegistryInstance(entry.getKey())
                    .histogram(HISTOGRAM_NAME).getSnapshot();
            if (totalResult.totalCount > 0) {
                LoggerUtil.accessStatsLog(
                        "[motan-totalAccessStatistic] app: {} module: {} total_count: {} slow_count: {} p75: {} p95: {} p98: {} p99: {} p999: {} biz_excp: {} other_excp: {} avg_time: {}ms biz_time: {}ms avg_tps: {}",
                        application, module, totalResult.totalCount, totalResult.slowCount,
                        mbFormat.format(snapshot.get75thPercentile()),
                        mbFormat.format(snapshot.get95thPercentile()),
                        mbFormat.format(snapshot.get98thPercentile()),
                        mbFormat.format(snapshot.get99thPercentile()),
                        mbFormat.format(snapshot.get999thPercentile()), totalResult.bizExceptionCount,
                        totalResult.otherExceptionCount,
                        mbFormat.format(totalResult.costTime / totalResult.totalCount),
                        mbFormat.format(totalResult.bizTime / totalResult.totalCount),
                        (totalResult.totalCount / MotanConstants.STATISTIC_PEROID));
            } else {
                LoggerUtil.accessStatsLog("[motan-totalAccessStatistic] app: " + application + " module: "
                        + module
                        + " total_count: 0 slow_count: 0 biz_excp: 0 other_excp: 0 avg_time: 0.00ms biz_time: 0.00ms avg_tps: 0");
            }

        }
    } else {
        LoggerUtil.accessStatsLog("[motan-totalAccessStatistic] app: " + URLParamType.application.getValue()
                + " module: " + URLParamType.module.getValue()
                + " total_count: 0 slow_count: 0 biz_excp: 0 other_excp: 0 avg_time: 0.00ms biz_time: 0.00ms avg_tps: 0");
    }

}

From source file:org.apache.ambari.server.orm.dao.AlertsDAO.java

/**
 * Writes all cached {@link AlertCurrentEntity} instances to the database and
 * clears the cache./*  w  w  w  .j  av  a2  s . c  o m*/
 */
@Transactional
public void flushCachedEntitiesToJPA() {
    if (!m_configuration.isAlertCacheEnabled()) {
        LOG.warn("Unable to flush cached alerts to JPA because caching is not enabled");
        return;
    }

    // capture for logging purposes
    long cachedEntityCount = m_currentAlertCache.size();

    ConcurrentMap<AlertCacheKey, AlertCurrentEntity> map = m_currentAlertCache.asMap();
    Set<Entry<AlertCacheKey, AlertCurrentEntity>> entries = map.entrySet();
    for (Entry<AlertCacheKey, AlertCurrentEntity> entry : entries) {
        merge(entry.getValue());
    }

    m_currentAlertCache.invalidateAll();

    LOG.info("Flushed {} cached alerts to the database", cachedEntityCount);
}

From source file:org.apache.blur.command.BaseCommandManager.java

private void cancelAllExecuting(String commandExecutionId, ConcurrentMap<Long, ResponseFuture<?>> runningMap) {
    for (Entry<Long, ResponseFuture<?>> e : runningMap.entrySet()) {
        Long instanceExecutionId = e.getKey();
        ResponseFuture<?> future = e.getValue();
        Command<?> commandExecuting = future.getCommandExecuting();
        if (commandExecuting.getCommandExecutionId().equals(commandExecutionId)) {
            LOG.info("Canceling Command with executing id [{0}] command [{1}]", instanceExecutionId,
                    commandExecuting);//from  ww w  .  j av a  2s  .c  o  m
            future.cancel(true);
        }
    }
}

From source file:org.apache.druid.client.cache.MemcachedCache.java

public static MemcachedCache create(final MemcachedCacheConfig config) {
    final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>();
    final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>();
    final AbstractMonitor monitor = new AbstractMonitor() {
        final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>(
                new HashMap<String, Long>());

        @Override//from   w w  w. ja  v a  2s .co m
        public boolean doMonitor(ServiceEmitter emitter) {
            final Map<String, Long> priorValues = this.priorValues.get();
            final Map<String, Long> currentValues = getCurrentValues();
            final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder();
            for (Map.Entry<String, Long> entry : currentValues.entrySet()) {
                emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                        .build("query/cache/memcached/total", entry.getValue()));
                final Long prior = priorValues.get(entry.getKey());
                if (prior != null) {
                    emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                            .build("query/cache/memcached/delta", entry.getValue() - prior));
                }
            }

            if (!this.priorValues.compareAndSet(priorValues, currentValues)) {
                log.error("Prior value changed while I was reporting! updating anyways");
                this.priorValues.set(currentValues);
            }
            return true;
        }

        private Map<String, Long> getCurrentValues() {
            final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
            for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            return builder.build();
        }
    };
    try {
        LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize());

        // always use compression
        transcoder.setCompressionThreshold(0);

        OperationQueueFactory opQueueFactory;
        long maxQueueBytes = config.getMaxOperationQueueSize();
        if (maxQueueBytes > 0) {
            opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes);
        } else {
            opQueueFactory = new LinkedOperationQueueFactory();
        }

        final Predicate<String> interesting = new Predicate<String>() {
            // See net.spy.memcached.MemcachedConnection.registerMetrics()
            private final Set<String> interestingMetrics = ImmutableSet.of(
                    "[MEM] Reconnecting Nodes (ReconnectQueue)",
                    //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted
                    "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write",
                    "[MEM] Average Bytes read from OS per read",
                    "[MEM] Average Time on wire for operations (s)",
                    "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry",
                    "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success");

            @Override
            public boolean apply(@Nullable String input) {
                return input != null && interestingMetrics.contains(input);
            }
        };

        final MetricCollector metricCollector = new MetricCollector() {
            @Override
            public void addCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                counters.putIfAbsent(name, new AtomicLong(0L));

                if (log.isDebugEnabled()) {
                    log.debug("Add Counter [%s]", name);
                }
            }

            @Override
            public void removeCounter(String name) {
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.addAndGet(amount);

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s] %d", name, amount);
                }
            }

            @Override
            public void decrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.decrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s]", name);
                }
            }

            @Override
            public void decrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0L));
                    counter = counters.get(name);
                }
                counter.addAndGet(-amount);

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s] %d", name, amount);
                }
            }

            @Override
            public void addMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                meters.putIfAbsent(name, new AtomicLong(0L));
                if (log.isDebugEnabled()) {
                    log.debug("Adding meter [%s]", name);
                }
            }

            @Override
            public void removeMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove meter [%s]", name);
                }
            }

            @Override
            public void markMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong meter = meters.get(name);
                if (meter == null) {
                    meters.putIfAbsent(name, new AtomicLong(0L));
                    meter = meters.get(name);
                }
                meter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment counter [%s]", name);
                }
            }

            @Override
            public void addHistogram(String name) {
                log.debug("Ignoring add histogram [%s]", name);
            }

            @Override
            public void removeHistogram(String name) {
                log.debug("Ignoring remove histogram [%s]", name);
            }

            @Override
            public void updateHistogram(String name, int amount) {
                log.debug("Ignoring update histogram [%s]: %d", name, amount);
            }
        };

        final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder()
                // 1000 repetitions gives us good distribution with murmur3_128
                // (approx < 5% difference in counts across nodes, with 5 cache nodes)
                .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128)
                .setProtocol(ConnectionFactoryBuilder.Protocol
                        .valueOf(StringUtils.toUpperCase(config.getProtocol())))
                .setLocatorType(
                        ConnectionFactoryBuilder.Locator.valueOf(StringUtils.toUpperCase(config.getLocator())))
                .setDaemon(true).setFailureMode(FailureMode.Cancel).setTranscoder(transcoder)
                .setShouldOptimize(true).setOpQueueMaxBlockTime(config.getTimeout())
                .setOpTimeout(config.getTimeout()).setReadBufferSize(config.getReadBufferSize())
                .setOpQueueFactory(opQueueFactory).setMetricCollector(metricCollector)
                .setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds
                .build();

        final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts());

        final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier;

        if (config.getNumConnections() > 1) {
            clientSupplier = new MemcacheClientPool(config.getNumConnections(),
                    new Supplier<MemcachedClientIF>() {
                        @Override
                        public MemcachedClientIF get() {
                            try {
                                return new MemcachedClient(connectionFactory, hosts);
                            } catch (IOException e) {
                                log.error(e, "Unable to create memcached client");
                                throw Throwables.propagate(e);
                            }
                        }
                    });
        } else {
            clientSupplier = Suppliers
                    .ofInstance(StupidResourceHolder.create(new MemcachedClient(connectionFactory, hosts)));
        }

        return new MemcachedCache(clientSupplier, config, monitor);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}

From source file:org.apache.flink.yarn.YARNHighAvailabilityITCase.java

/**
 * Stops a container running {@link YarnTaskExecutorRunner}.
 *///from  w  w  w  . j a  v a  2s  .  co m
private void stopTaskManagerContainer() throws Exception {
    // find container id of taskManager:
    ContainerId taskManagerContainer = null;
    NodeManager nodeManager = null;
    NMTokenIdentifier nmIdent = null;
    UserGroupInformation remoteUgi = UserGroupInformation.getCurrentUser();

    for (int nmId = 0; nmId < NUM_NODEMANAGERS; nmId++) {
        NodeManager nm = yarnCluster.getNodeManager(nmId);
        ConcurrentMap<ContainerId, Container> containers = nm.getNMContext().getContainers();
        for (Map.Entry<ContainerId, Container> entry : containers.entrySet()) {
            String command = StringUtils.join(entry.getValue().getLaunchContext().getCommands(), " ");
            if (command.contains(YarnTaskExecutorRunner.class.getSimpleName())) {
                taskManagerContainer = entry.getKey();
                nodeManager = nm;
                nmIdent = new NMTokenIdentifier(taskManagerContainer.getApplicationAttemptId(), null, "", 0);
                // allow myself to do stuff with the container
                // remoteUgi.addCredentials(entry.getValue().getCredentials());
                remoteUgi.addTokenIdentifier(nmIdent);
            }
        }
    }

    assertNotNull("Unable to find container with TaskManager", taskManagerContainer);
    assertNotNull("Illegal state", nodeManager);

    StopContainersRequest scr = StopContainersRequest
            .newInstance(Collections.singletonList(taskManagerContainer));

    nodeManager.getNMContext().getContainerManager().stopContainers(scr);

    // cleanup auth for the subsequent tests.
    remoteUgi.getTokenIdentifiers().remove(nmIdent);
}

From source file:org.apache.flink.yarn.YARNSessionCapacitySchedulerITCase.java

/**
 * Test TaskManager failure and also if the vcores are set correctly (see issue FLINK-2213).
 *///from w w  w .  j  a  v a 2 s  .  com
@Test(timeout = 100000) // timeout after 100 seconds
public void testTaskManagerFailure() {
    LOG.info("Starting testTaskManagerFailure()");
    Runner runner = startWithArgs(
            new String[] { "-j", flinkUberjar.getAbsolutePath(), "-t", flinkLibFolder.getAbsolutePath(), "-n",
                    "1", "-jm", "768", "-tm", "1024", "-s", "3", // set the slots 3 to check if the vCores are set properly!
                    "-nm", "customName", "-Dfancy-configuration-value=veryFancy",
                    "-Dyarn.maximum-failed-containers=3", "-D" + ConfigConstants.YARN_VCORES + "=2" },
            "Number of connected TaskManagers changed to 1. Slots available: 3", RunTypes.YARN_SESSION);

    Assert.assertEquals(2, getRunningContainers());

    // ------------------------ Test if JobManager web interface is accessible -------

    YarnClient yc = null;
    try {
        yc = YarnClient.createYarnClient();
        yc.init(yarnConfiguration);
        yc.start();

        List<ApplicationReport> apps = yc.getApplications(EnumSet.of(YarnApplicationState.RUNNING));
        Assert.assertEquals(1, apps.size()); // Only one running
        ApplicationReport app = apps.get(0);
        Assert.assertEquals("customName", app.getName());
        String url = app.getTrackingUrl();
        if (!url.endsWith("/")) {
            url += "/";
        }
        if (!url.startsWith("http://")) {
            url = "http://" + url;
        }
        LOG.info("Got application URL from YARN {}", url);

        String response = TestBaseUtils.getFromHTTP(url + "taskmanagers/");

        JsonNode parsedTMs = new ObjectMapper().readTree(response);
        ArrayNode taskManagers = (ArrayNode) parsedTMs.get("taskmanagers");
        Assert.assertNotNull(taskManagers);
        Assert.assertEquals(1, taskManagers.size());
        Assert.assertEquals(3, taskManagers.get(0).get("slotsNumber").asInt());

        // get the configuration from webinterface & check if the dynamic properties from YARN show up there.
        String jsonConfig = TestBaseUtils.getFromHTTP(url + "jobmanager/config");
        Map<String, String> parsedConfig = WebMonitorUtils.fromKeyValueJsonArray(jsonConfig);

        Assert.assertEquals("veryFancy", parsedConfig.get("fancy-configuration-value"));
        Assert.assertEquals("3", parsedConfig.get("yarn.maximum-failed-containers"));
        Assert.assertEquals("2", parsedConfig.get(ConfigConstants.YARN_VCORES));

        // -------------- FLINK-1902: check if jobmanager hostname/port are shown in web interface
        // first, get the hostname/port
        String oC = outContent.toString();
        Pattern p = Pattern.compile("Flink JobManager is now running on ([a-zA-Z0-9.-]+):([0-9]+)");
        Matcher matches = p.matcher(oC);
        String hostname = null;
        String port = null;
        while (matches.find()) {
            hostname = matches.group(1).toLowerCase();
            port = matches.group(2);
        }
        LOG.info("Extracted hostname:port: {} {}", hostname, port);

        Assert.assertEquals("unable to find hostname in " + jsonConfig, hostname,
                parsedConfig.get(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY));
        Assert.assertEquals("unable to find port in " + jsonConfig, port,
                parsedConfig.get(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY));

        // test logfile access
        String logs = TestBaseUtils.getFromHTTP(url + "jobmanager/log");
        Assert.assertTrue(logs.contains("Starting YARN ApplicationMaster"));
        Assert.assertTrue(logs.contains("Starting JobManager"));
        Assert.assertTrue(logs.contains("Starting JobManager Web Frontend"));
    } catch (Throwable e) {
        LOG.warn("Error while running test", e);
        Assert.fail(e.getMessage());
    }

    // ------------------------ Kill container with TaskManager and check if vcores are set correctly -------

    // find container id of taskManager:
    ContainerId taskManagerContainer = null;
    NodeManager nodeManager = null;
    UserGroupInformation remoteUgi = null;
    NMTokenIdentifier nmIdent = null;
    try {
        remoteUgi = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        LOG.warn("Unable to get curr user", e);
        Assert.fail();
    }
    for (int nmId = 0; nmId < NUM_NODEMANAGERS; nmId++) {
        NodeManager nm = yarnCluster.getNodeManager(nmId);
        ConcurrentMap<ContainerId, Container> containers = nm.getNMContext().getContainers();
        for (Map.Entry<ContainerId, Container> entry : containers.entrySet()) {
            String command = Joiner.on(" ").join(entry.getValue().getLaunchContext().getCommands());
            if (command.contains(YarnTaskManager.class.getSimpleName())) {
                taskManagerContainer = entry.getKey();
                nodeManager = nm;
                nmIdent = new NMTokenIdentifier(taskManagerContainer.getApplicationAttemptId(), null, "", 0);
                // allow myself to do stuff with the container
                // remoteUgi.addCredentials(entry.getValue().getCredentials());
                remoteUgi.addTokenIdentifier(nmIdent);
            }
        }
        sleep(500);
    }

    Assert.assertNotNull("Unable to find container with TaskManager", taskManagerContainer);
    Assert.assertNotNull("Illegal state", nodeManager);

    yc.stop();

    List<ContainerId> toStop = new LinkedList<ContainerId>();
    toStop.add(taskManagerContainer);
    StopContainersRequest scr = StopContainersRequest.newInstance(toStop);

    try {
        nodeManager.getNMContext().getContainerManager().stopContainers(scr);
    } catch (Throwable e) {
        LOG.warn("Error stopping container", e);
        Assert.fail("Error stopping container: " + e.getMessage());
    }

    // stateful termination check:
    // wait until we saw a container being killed and AFTERWARDS a new one launched
    boolean ok = false;
    do {
        LOG.debug("Waiting for correct order of events. Output: {}", errContent.toString());

        String o = errContent.toString();
        int killedOff = o.indexOf("Container killed by the ApplicationMaster");
        if (killedOff != -1) {
            o = o.substring(killedOff);
            ok = o.indexOf("Launching TaskManager") > 0;
        }
        sleep(1000);
    } while (!ok);

    // send "stop" command to command line interface
    runner.sendStop();
    // wait for the thread to stop
    try {
        runner.join(1000);
    } catch (InterruptedException e) {
        LOG.warn("Interrupted while stopping runner", e);
    }
    LOG.warn("stopped");

    // ----------- Send output to logger
    System.setOut(originalStdout);
    System.setErr(originalStderr);
    String oC = outContent.toString();
    String eC = errContent.toString();
    LOG.info("Sending stdout content through logger: \n\n{}\n\n", oC);
    LOG.info("Sending stderr content through logger: \n\n{}\n\n", eC);

    // ------ Check if everything happened correctly
    Assert.assertTrue("Expect to see failed container", eC.contains("New messages from the YARN cluster"));

    Assert.assertTrue("Expect to see failed container",
            eC.contains("Container killed by the ApplicationMaster"));

    Assert.assertTrue("Expect to see new container started",
            eC.contains("Launching TaskManager") && eC.contains("on host"));

    // cleanup auth for the subsequent tests.
    remoteUgi.getTokenIdentifiers().remove(nmIdent);

    LOG.info("Finished testTaskManagerFailure()");
}

From source file:org.apache.hadoop.hbase.client.MetaCache.java

/**
 * Delete all cached entries of a server.
 *///w w  w .j av a  2s .  com
public void clearCache(final ServerName serverName) {
    if (!this.cachedServers.contains(serverName)) {
        return;
    }

    boolean deletedSomething = false;
    synchronized (this.cachedServers) {
        // We block here, because if there is an error on a server, it's likely that multiple
        //  threads will get the error  simultaneously. If there are hundreds of thousand of
        //  region location to check, it's better to do this only once. A better pattern would
        //  be to check if the server is dead when we get the region location.
        if (!this.cachedServers.contains(serverName)) {
            return;
        }
        for (ConcurrentMap<byte[], RegionLocations> tableLocations : cachedRegionLocations.values()) {
            for (Entry<byte[], RegionLocations> e : tableLocations.entrySet()) {
                RegionLocations regionLocations = e.getValue();
                if (regionLocations != null) {
                    RegionLocations updatedLocations = regionLocations.removeByServer(serverName);
                    if (updatedLocations != regionLocations) {
                        if (updatedLocations.isEmpty()) {
                            deletedSomething |= tableLocations.remove(e.getKey(), regionLocations);
                        } else {
                            deletedSomething |= tableLocations.replace(e.getKey(), regionLocations,
                                    updatedLocations);
                        }
                    }
                }
            }
        }
        this.cachedServers.remove(serverName);
    }
    if (deletedSomething && LOG.isTraceEnabled()) {
        LOG.trace("Removed all cached region locations that map to " + serverName);
    }
}