Example usage for java.util.concurrent ConcurrentMap putIfAbsent

List of usage examples for java.util.concurrent ConcurrentMap putIfAbsent

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentMap putIfAbsent.

Prototype

V putIfAbsent(K key, V value);

Source Link

Document

If the specified key is not already associated with a value, associates it with the given value.

Usage

From source file:org.rifidi.edge.rest.SensorManagerServiceRestletImpl.java

static Series<Header> getMessageHeaders(Message message) {
    ConcurrentMap<String, Object> attrs = message.getAttributes();
    Series<Header> headers = (Series<Header>) attrs.get(HEADERS_KEY);
    if (headers == null) {
        headers = new Series<Header>(Header.class);
        Series<Header> prev = (Series<Header>) attrs.putIfAbsent(HEADERS_KEY, headers);
        if (prev != null) {
            headers = prev;/*  ww  w  .  ja  va 2s.  co m*/
        }
    }
    return headers;
}

From source file:com.github.podd.example.ExamplePoddClient.java

/**
 * Generates the RDF triples necessary for the given TrayScan parameters and adds the details to
 * the relevant model in the upload queue.
 * //from  www . j av a  2  s.  c om
 * @param projectUriMap
 *            A map of relevant project URIs and their artifact identifiers using their
 *            standardised labels.
 * @param experimentUriMap
 *            A map of relevant experiment URIs and their project URIs using their standardised
 *            labels.
 * @param trayUriMap
 *            A map of relevant tray URIs and their experiment URIs using their standardised
 *            labels.
 * @param potUriMap
 *            A map of relevant pot URIs and their tray URIs using their standardised labels.
 * @param uploadQueue
 *            The upload queue containing all of the models to be uploaded.
 * @param projectYear
 *            The TrayScan parameter detailing the project year for the next tray.
 * @param projectNumber
 *            The TrayScan parameter detailing the project number for the next tray.
 * @param experimentNumber
 *            The TrayScan parameter detailing the experiment number for the next tray.
 * @param plantName
 *            The name of this plant
 * @param plantNotes
 *            Specific notes about this plant
 * @param species
 *            The species for the current line.
 * @param genus
 *            The genus for the current line.
 * @throws PoddClientException
 *             If there is a PODD Client exception.
 * @throws GraphUtilException
 *             If there was an illformed graph.
 */
public void generateTrayRDF(
        final ConcurrentMap<String, ConcurrentMap<URI, InferredOWLOntologyID>> projectUriMap,
        final ConcurrentMap<String, ConcurrentMap<URI, URI>> experimentUriMap,
        final ConcurrentMap<String, ConcurrentMap<URI, URI>> trayUriMap,
        final ConcurrentMap<String, ConcurrentMap<URI, URI>> potUriMap,
        final ConcurrentMap<URI, ConcurrentMap<URI, Model>> materialUriMap,
        final ConcurrentMap<URI, ConcurrentMap<URI, Model>> genotypeUriMap,
        final ConcurrentMap<InferredOWLOntologyID, Model> uploadQueue, final ExampleCSVLine nextLine)
        throws PoddClientException, GraphUtilException {
    Objects.requireNonNull(nextLine, "Line was null");
    Objects.requireNonNull(nextLine.projectID, "ProjectID in line was null");

    final Map<URI, InferredOWLOntologyID> projectDetails = this.getProjectDetails(projectUriMap,
            nextLine.projectID);

    final URI nextProjectUri = projectDetails.keySet().iterator().next();
    final InferredOWLOntologyID nextProjectID = projectDetails.get(nextProjectUri);

    this.log.debug("Found PODD Project name to URI mapping: {} {}", nextLine.projectID, projectDetails);

    final Map<URI, URI> experimentDetails = this.getExperimentDetails(nextLine.experimentID);

    final URI nextExperimentUri = experimentDetails.keySet().iterator().next();

    // Create or find an existing model for the necessary modifications to this
    // project/artifact
    Model nextResult = new LinkedHashModel();
    final Model putIfAbsent = uploadQueue.putIfAbsent(nextProjectID, nextResult);
    if (putIfAbsent != null) {
        nextResult = putIfAbsent;
    }

    final URI nextTrayUri = this.getTrayUri(trayUriMap, nextLine.trayID, nextProjectID, nextExperimentUri);

    // Check whether plantId already has an assigned URI
    final URI nextPotUri = this.getPotUri(potUriMap, nextLine.plantID, nextProjectID, nextTrayUri);

    // Check whether genus/specieis/plantName already has an assigned URI (and automatically
    // assign a temporary URI if it does not)
    final URI nextGenotypeUri = this.getGenotypeUri(genotypeUriMap, nextLine.genus, nextLine.species,
            nextLine.plantName, nextLine.plantLineNumber, nextLine.control, nextProjectID, nextProjectUri);

    // // Check whether the material for the given genotype for the given pot already has an
    // assigned URI (and automatically
    // // assign a temporary URI if it does not)
    final URI nextMaterialUri = this.getMaterialUri(materialUriMap, nextGenotypeUri, nextProjectID, nextPotUri,
            nextLine.potNumber, nextLine.plantLineNumber, nextLine.control);

    // Add new poddScience:Container for tray
    nextResult.add(nextTrayUri, RDF.TYPE, PODD.PODD_SCIENCE_TRAY);
    // Link tray to experiment
    nextResult.add(nextExperimentUri, PODD.PODD_SCIENCE_HAS_TRAY, nextTrayUri);
    // TrayID => Add poddScience:hasBarcode to tray
    nextResult.add(nextTrayUri, PODD.PODD_SCIENCE_HAS_BARCODE,
            RestletPoddClientImpl.vf.createLiteral(nextLine.trayID));
    // TrayNotes => Add rdfs:label to tray
    nextResult.add(nextTrayUri, RDFS.LABEL, RestletPoddClientImpl.vf.createLiteral(nextLine.trayNotes));
    // TrayTypeName => Add poddScience:hasTrayType to tray
    nextResult.add(nextTrayUri, PODD.PODD_SCIENCE_HAS_TRAY_TYPE,
            RestletPoddClientImpl.vf.createLiteral(nextLine.trayTypeName));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_TRAY_NUMBER,
            RestletPoddClientImpl.vf.createLiteral(nextLine.trayNumber, XMLSchema.STRING));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_TRAY_ROW_NUMBER,
            RestletPoddClientImpl.vf.createLiteral(nextLine.trayRowNumber, XMLSchema.STRING));

    // Add new poddScience:Container for pot
    nextResult.add(nextPotUri, RDF.TYPE, PODD.PODD_SCIENCE_POT);
    // Link pot to tray
    nextResult.add(nextTrayUri, PODD.PODD_SCIENCE_HAS_POT, nextPotUri);

    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_MATERIAL, nextMaterialUri);

    // PlantID => Add poddScience:hasBarcode to pot
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_BARCODE,
            RestletPoddClientImpl.vf.createLiteral(nextLine.plantID));

    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_NUMBER,
            RestletPoddClientImpl.vf.createLiteral(nextLine.potNumber, XMLSchema.STRING));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_TYPE,
            RestletPoddClientImpl.vf.createLiteral(nextLine.potType, XMLSchema.STRING));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_COLUMN_NUMBER_OVERALL,
            RestletPoddClientImpl.vf.createLiteral(nextLine.columnNumber, XMLSchema.STRING));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_COLUMN_NUMBER_REPLICATE,
            RestletPoddClientImpl.vf.createLiteral(nextLine.columnNumberRep, XMLSchema.STRING));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_COLUMN_NUMBER_TRAY,
            RestletPoddClientImpl.vf.createLiteral(nextLine.columnNumberTray, XMLSchema.STRING));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_COLUMN_LETTER_TRAY,
            RestletPoddClientImpl.vf.createLiteral(nextLine.columnLetter));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_POSITION_TRAY,
            RestletPoddClientImpl.vf.createLiteral(nextLine.position));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_NUMBER_TRAY,
            RestletPoddClientImpl.vf.createLiteral(nextLine.potNumberTray, XMLSchema.STRING));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_NUMBER_REPLICATE,
            RestletPoddClientImpl.vf.createLiteral(nextLine.potReplicateNumber, XMLSchema.STRING));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_REPLICATE,
            RestletPoddClientImpl.vf.createLiteral(nextLine.replicateNumber, XMLSchema.STRING));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_ROW_NUMBER_REPLICATE,
            RestletPoddClientImpl.vf.createLiteral(nextLine.rowNumberRep, XMLSchema.STRING));
    nextResult.add(nextPotUri, PODD.PODD_SCIENCE_HAS_POT_ROW_NUMBER_TRAY,
            RestletPoddClientImpl.vf.createLiteral(nextLine.rowNumberTray, XMLSchema.STRING));

    if (nextGenotypeUri.stringValue().startsWith(RestletPoddClientImpl.TEMP_UUID_PREFIX)) {
        // Add all of the statements for the genotype to the update to make sure that temporary
        // descriptions are added
        nextResult.addAll(genotypeUriMap.get(nextProjectUri).get(nextGenotypeUri));
    }

    if (nextMaterialUri.stringValue().startsWith(RestletPoddClientImpl.TEMP_UUID_PREFIX)) {
        // Add all of the statements for the genotype to the update to make sure that temporary
        // descriptions are added
        nextResult.addAll(materialUriMap.get(nextPotUri).get(nextMaterialUri));
    }

    String potLabel;
    // PlantNotes => Add rdfs:label to pot
    if (nextLine.plantNotes == null || nextLine.plantNotes.isEmpty()) {
        potLabel = "Pot " + nextLine.plantName;
    } else {
        potLabel = "Pot " + nextLine.plantName + " : " + nextLine.plantNotes;
    }
    nextResult.add(nextPotUri, RDFS.LABEL, RestletPoddClientImpl.vf.createLiteral(potLabel));

}

From source file:com.github.podd.example.ExamplePoddClient.java

/**
 * @param potUriMap//  www  . jav  a 2s  .  c  o  m
 * @param plantId
 * @param nextProjectID
 * @param nextTrayURI
 * @return
 * @throws PoddClientException
 * @throws GraphUtilException
 */
private URI getPotUri(final ConcurrentMap<String, ConcurrentMap<URI, URI>> potUriMap, final String plantId,
        final InferredOWLOntologyID nextProjectID, final URI nextTrayURI)
        throws PoddClientException, GraphUtilException {
    URI nextPotURI;
    if (potUriMap.containsKey(plantId)) {
        nextPotURI = potUriMap.get(plantId).keySet().iterator().next();
    } else {
        final Model plantIdSparqlResults = this.doSPARQL(
                String.format(ExampleSpreadsheetConstants.TEMPLATE_SPARQL_BY_TYPE_LABEL_STRSTARTS,
                        RenderUtils.escape(plantId), RenderUtils.getSPARQLQueryString(PODD.PODD_SCIENCE_POT)),
                Arrays.asList(nextProjectID));

        if (plantIdSparqlResults.isEmpty()) {
            this.log.debug(
                    "Could not find an existing container for pot barcode, assigning a temporary URI: {} {}",
                    plantId, nextProjectID);

            nextPotURI = RestletPoddClientImpl.vf
                    .createURI(RestletPoddClientImpl.TEMP_UUID_PREFIX + "pot:" + UUID.randomUUID().toString());
        } else {
            nextPotURI = GraphUtil.getUniqueSubjectURI(plantIdSparqlResults, RDF.TYPE, PODD.PODD_SCIENCE_POT);
        }

        ConcurrentMap<URI, URI> nextPotUriMap = new ConcurrentHashMap<>();
        final ConcurrentMap<URI, URI> putIfAbsent2 = potUriMap.putIfAbsent(plantId, nextPotUriMap);
        if (putIfAbsent2 != null) {
            nextPotUriMap = putIfAbsent2;
        }
        nextPotUriMap.put(nextPotURI, nextTrayURI);
    }
    return nextPotURI;
}

From source file:com.github.podd.example.ExamplePoddClient.java

/**
 * @param trayUriMap/* w  ww .j  a  v  a 2s.c o m*/
 * @param trayId
 * @param nextProjectID
 * @param nextExperimentUri
 * @return
 * @throws PoddClientException
 * @throws GraphUtilException
 */
private URI getTrayUri(final ConcurrentMap<String, ConcurrentMap<URI, URI>> trayUriMap, final String trayId,
        final InferredOWLOntologyID nextProjectID, final URI nextExperimentUri)
        throws PoddClientException, GraphUtilException {
    // Check whether trayId already has an assigned URI
    URI nextTrayURI;
    if (trayUriMap.containsKey(trayId)) {
        nextTrayURI = trayUriMap.get(trayId).keySet().iterator().next();
    } else {
        final Model trayIdSparqlResults = this.doSPARQL(
                String.format(ExampleSpreadsheetConstants.TEMPLATE_SPARQL_BY_TYPE_LABEL_STRSTARTS,
                        RenderUtils.escape(trayId), RenderUtils.getSPARQLQueryString(PODD.PODD_SCIENCE_TRAY)),
                Arrays.asList(nextProjectID));

        if (trayIdSparqlResults.isEmpty()) {
            this.log.debug(
                    "Could not find an existing container for tray barcode, assigning a temporary URI: {} {}",
                    trayId, nextProjectID);

            nextTrayURI = RestletPoddClientImpl.vf
                    .createURI(RestletPoddClientImpl.TEMP_UUID_PREFIX + "tray:" + UUID.randomUUID().toString());
        } else {
            nextTrayURI = GraphUtil.getUniqueSubjectURI(trayIdSparqlResults, RDF.TYPE, PODD.PODD_SCIENCE_TRAY);
        }

        ConcurrentMap<URI, URI> nextTrayUriMap = new ConcurrentHashMap<>();
        final ConcurrentMap<URI, URI> putIfAbsent2 = trayUriMap.putIfAbsent(trayId, nextTrayUriMap);
        if (putIfAbsent2 != null) {
            nextTrayUriMap = putIfAbsent2;
        }
        nextTrayUriMap.put(nextTrayURI, nextExperimentUri);
    }
    return nextTrayURI;
}

From source file:org.danann.cernunnos.DynamicCacheHelper.java

public V getCachedObject(TaskRequest req, TaskResponse res, K key, Factory<K, V> factory) {
    final CacheMode cacheMode = CacheMode.valueOf((String) this.cacheModelPhrase.evaluate(req, res));

    if (this.logger.isDebugEnabled()) {
        this.logger.debug("Getting cached object for '" + key + "' using cache mode " + cacheMode
                + " and factory " + factory);
    }/*from w  w w  .  j  av a 2 s .com*/

    //Load the cache only if cache-all is enabled
    final ConcurrentMap<Tuple<Serializable, K>, Object> cache;
    final Tuple<Serializable, K> compoundCacheKey;
    switch (cacheMode) {
    case NONE: {
        return factory.createObject(key);
    }

    default:
    case ONE: {
        cache = null;
        compoundCacheKey = null;
    }
        break;

    case ALL: {
        cache = (ConcurrentMap<Tuple<Serializable, K>, Object>) this.cachePhrase.evaluate(req, res);
        final Serializable cacheNamespace = factory.getCacheNamespace(key);
        compoundCacheKey = new Tuple<Serializable, K>(cacheNamespace, key);
    }
        break;
    }

    //Determine the object to synchronize around
    final Object syncTarget = factory.getMutex(key);

    //get or create & cache the target object
    V instance = null;
    synchronized (syncTarget) {
        //Get the object from the local variables if no cache is available
        if (cache == null) {
            //Try for a thread-local instance first
            if (this.compareKeys(key, this.threadKeyHolder.get())) {
                instance = this.threadInstanceHolder.get();
            }
            //Next try for a singleton instance
            else if (this.compareKeys(key, this.key)) {
                instance = this.instance;
            }
        }
        //Look in the passed cache for the instance
        else {
            final Object object = cache.get(compoundCacheKey);

            //If the cached object is a ThreadLocal use it for the instance
            if (object instanceof ThreadLocal<?>) {
                instance = ((ThreadLocal<V>) object).get();
            }
            //If not assume it is the instance 
            else {
                instance = (V) object;
            }
        }

        //If no instance was found create and cache one
        if (instance == null) {
            instance = factory.createObject(key);
            final boolean threadSafe = factory.isThreadSafe(key, instance);

            if (this.logger.isDebugEnabled()) {
                this.logger.debug(
                        "Cache miss for '" + key + "' created '" + instance + "' threadSafe=" + threadSafe);
            }

            //If no cache is available store the instance in the local variables
            if (cache == null) {
                if (threadSafe) {
                    this.instance = instance;
                    this.key = key;
                } else {
                    this.threadInstanceHolder.set(instance);
                    this.threadKeyHolder.set(key);
                }
            }
            //Cache available store there
            else {
                if (threadSafe) {
                    cache.put(compoundCacheKey, instance);
                } else {
                    ThreadLocal<V> threadInstanceHolder = (ThreadLocal<V>) cache.get(compoundCacheKey);
                    if (threadInstanceHolder == null) {
                        threadInstanceHolder = new ThreadLocal<V>();

                        while (true) {
                            Object existing = cache.putIfAbsent(compoundCacheKey, threadInstanceHolder);
                            if (existing == null) {
                                //nothing existed for that key, put was successful
                                break;
                            }

                            if (existing instanceof ThreadLocal) {
                                //Existing ThreadLocal, just use it
                                threadInstanceHolder = (ThreadLocal) existing;
                                break;
                            }

                            //something other than a ThreadLocal already exists, try replacing with the ThreadLocal
                            final boolean replaced = cache.replace(compoundCacheKey, threadInstanceHolder,
                                    existing);
                            if (replaced) {
                                //Replace worked!
                                break;
                            }

                            //Replace didn't work, try the whole process again, yay non-blocking!
                        }

                        if (cache instanceof EvictionAwareCache) {
                            ((EvictionAwareCache) cache)
                                    .registerCacheEvictionListener(ThreadLocalCacheEvictionListener.INSTANCE);
                        }
                    }

                    threadInstanceHolder.set(instance);
                }
            }
        } else if (this.logger.isDebugEnabled()) {
            this.logger.debug("Cache hit for '" + key + "' using '" + instance + "'");
        }
    }

    return instance;
}

From source file:com.github.podd.example.ExamplePoddClient.java

/**
 * Parses the mapping of line numbers to the line names used to identify lines in the
 * randomisation process./*from w w  w  .  j av  a 2s  . co m*/
 * 
 * @param in
 *            An {@link InputStream} containing the CSV file with the mapping of line numbers to
 *            line names
 * @return A map from line numbers to line names.
 * @throws IOException
 *             If there is an {@link IOException}.
 * @throws PoddClientException
 *             If there is a problem communicating with the PODD server.
 */
public ConcurrentMap<String, String> processLineNameMappingList(final InputStream in)
        throws IOException, PoddClientException {
    // -----------------------------------------------------------------------------------------
    // Now process the CSV file line by line using the caches to reduce multiple queries to the
    // server where possible
    // -----------------------------------------------------------------------------------------

    List<String> headers = null;
    final ConcurrentMap<String, String> result = new ConcurrentHashMap<>();
    // Supressing try-with-resources warning generated erroneously by Eclipse:
    // https://bugs.eclipse.org/bugs/show_bug.cgi?id=371614
    try (@SuppressWarnings("resource")
    final InputStreamReader inputStreamReader = new InputStreamReader(in, StandardCharsets.UTF_8);
            final CSVReader reader = new CSVReader(inputStreamReader);) {
        String[] nextLine;
        while ((nextLine = reader.readNext()) != null) {
            if (headers == null) {
                // header line is mandatory in PODD CSV
                headers = Arrays.asList(nextLine);
                try {
                    if (headers.size() != 2) {
                        throw new IllegalArgumentException("Did not find required number of headers");
                    }

                    if (!headers.get(0).equals(ExampleLineMappingConstants.RAND_LINE_NUMBER)) {
                        throw new IllegalArgumentException(
                                "Missing " + ExampleLineMappingConstants.RAND_LINE_NUMBER + " header");
                    }

                    if (!headers.get(1).equals(ExampleLineMappingConstants.RAND_CLIENT_LINE_NAME)) {
                        throw new IllegalArgumentException(
                                "Missing " + ExampleLineMappingConstants.RAND_CLIENT_LINE_NAME + " header");
                    }
                } catch (final IllegalArgumentException e) {
                    this.log.error("Could not verify headers for line name mappings file: {}", e.getMessage());
                    throw new PoddClientException("Could not verify headers for line name mappings file", e);
                }
            } else {
                if (nextLine.length != headers.size()) {
                    this.log.error("Line and header sizes were different: {} {}", headers, nextLine);
                }

                final String putIfAbsent = result.putIfAbsent(nextLine[0], nextLine[1]);
                if (putIfAbsent != null) {
                    this.log.error(
                            "Found multiple mappings for line name and number: linenumber={} duplicate={} original={}",
                            nextLine[0], nextLine[1], putIfAbsent);
                }
            }
        }
    }

    if (headers == null) {
        this.log.error("Document did not contain a valid header line");
    }

    if (result.isEmpty()) {
        this.log.error("Document did not contain any valid rows");
    }

    return result;
}

From source file:io.druid.client.cache.MemcachedCache.java

public static MemcachedCache create(final MemcachedCacheConfig config) {
    final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>();
    final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>();
    final AbstractMonitor monitor = new AbstractMonitor() {
        final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>(
                new HashMap<String, Long>());

        @Override/*  w  w  w .j  a v a2s  .  co m*/
        public boolean doMonitor(ServiceEmitter emitter) {
            final Map<String, Long> priorValues = this.priorValues.get();
            final Map<String, Long> currentValues = getCurrentValues();
            final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder();
            for (Map.Entry<String, Long> entry : currentValues.entrySet()) {
                emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                        .build("query/cache/memcached/total", entry.getValue()));
                final Long prior = priorValues.get(entry.getKey());
                if (prior != null) {
                    emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                            .build("query/cache/memcached/delta", entry.getValue() - prior));
                }
            }

            if (!this.priorValues.compareAndSet(priorValues, currentValues)) {
                log.error("Prior value changed while I was reporting! updating anyways");
                this.priorValues.set(currentValues);
            }
            return true;
        }

        private Map<String, Long> getCurrentValues() {
            final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
            for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            return builder.build();
        }
    };
    try {
        LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize());

        // always use compression
        transcoder.setCompressionThreshold(0);

        OperationQueueFactory opQueueFactory;
        long maxQueueBytes = config.getMaxOperationQueueSize();
        if (maxQueueBytes > 0) {
            opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes);
        } else {
            opQueueFactory = new LinkedOperationQueueFactory();
        }

        final Predicate<String> interesting = new Predicate<String>() {
            // See net.spy.memcached.MemcachedConnection.registerMetrics()
            private final Set<String> interestingMetrics = ImmutableSet.of(
                    "[MEM] Reconnecting Nodes (ReconnectQueue)",
                    //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted
                    "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write",
                    "[MEM] Average Bytes read from OS per read",
                    "[MEM] Average Time on wire for operations (s)",
                    "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry",
                    "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success");

            @Override
            public boolean apply(@Nullable String input) {
                return input != null && interestingMetrics.contains(input);
            }
        };

        final MetricCollector metricCollector = new MetricCollector() {
            @Override
            public void addCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                counters.putIfAbsent(name, new AtomicLong(0L));

                if (log.isDebugEnabled()) {
                    log.debug("Add Counter [%s]", name);
                }
            }

            @Override
            public void removeCounter(String name) {
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.addAndGet(amount);

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s] %d", name, amount);
                }
            }

            @Override
            public void decrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.decrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s]", name);
                }
            }

            @Override
            public void decrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0L));
                    counter = counters.get(name);
                }
                counter.addAndGet(-amount);

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s] %d", name, amount);
                }
            }

            @Override
            public void addMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                meters.putIfAbsent(name, new AtomicLong(0L));
                if (log.isDebugEnabled()) {
                    log.debug("Adding meter [%s]", name);
                }
            }

            @Override
            public void removeMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove meter [%s]", name);
                }
            }

            @Override
            public void markMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong meter = meters.get(name);
                if (meter == null) {
                    meters.putIfAbsent(name, new AtomicLong(0L));
                    meter = meters.get(name);
                }
                meter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment counter [%s]", name);
                }
            }

            @Override
            public void addHistogram(String name) {
                log.debug("Ignoring add histogram [%s]", name);
            }

            @Override
            public void removeHistogram(String name) {
                log.debug("Ignoring remove histogram [%s]", name);
            }

            @Override
            public void updateHistogram(String name, int amount) {
                log.debug("Ignoring update histogram [%s]: %d", name, amount);
            }
        };

        final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder()
                // 1000 repetitions gives us good distribution with murmur3_128
                // (approx < 5% difference in counts across nodes, with 5 cache nodes)
                .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128)
                .setProtocol(ConnectionFactoryBuilder.Protocol.BINARY)
                .setLocatorType(ConnectionFactoryBuilder.Locator.CONSISTENT).setDaemon(true)
                .setFailureMode(FailureMode.Cancel).setTranscoder(transcoder).setShouldOptimize(true)
                .setOpQueueMaxBlockTime(config.getTimeout()).setOpTimeout(config.getTimeout())
                .setReadBufferSize(config.getReadBufferSize()).setOpQueueFactory(opQueueFactory)
                .setMetricCollector(metricCollector).setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds
                .build();

        final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts());

        final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier;

        if (config.getNumConnections() > 1) {
            clientSupplier = new LoadBalancingPool<MemcachedClientIF>(config.getNumConnections(),
                    new Supplier<MemcachedClientIF>() {
                        @Override
                        public MemcachedClientIF get() {
                            try {
                                return new MemcachedClient(connectionFactory, hosts);
                            } catch (IOException e) {
                                log.error(e, "Unable to create memcached client");
                                throw Throwables.propagate(e);
                            }
                        }
                    });
        } else {
            clientSupplier = Suppliers.<ResourceHolder<MemcachedClientIF>>ofInstance(StupidResourceHolder
                    .<MemcachedClientIF>create(new MemcachedClient(connectionFactory, hosts)));
        }

        return new MemcachedCache(clientSupplier, config, monitor);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}

From source file:org.apache.druid.client.cache.MemcachedCache.java

public static MemcachedCache create(final MemcachedCacheConfig config) {
    final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>();
    final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>();
    final AbstractMonitor monitor = new AbstractMonitor() {
        final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>(
                new HashMap<String, Long>());

        @Override/*from   w  w w.  j ava  2s  .  c  o  m*/
        public boolean doMonitor(ServiceEmitter emitter) {
            final Map<String, Long> priorValues = this.priorValues.get();
            final Map<String, Long> currentValues = getCurrentValues();
            final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder();
            for (Map.Entry<String, Long> entry : currentValues.entrySet()) {
                emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                        .build("query/cache/memcached/total", entry.getValue()));
                final Long prior = priorValues.get(entry.getKey());
                if (prior != null) {
                    emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                            .build("query/cache/memcached/delta", entry.getValue() - prior));
                }
            }

            if (!this.priorValues.compareAndSet(priorValues, currentValues)) {
                log.error("Prior value changed while I was reporting! updating anyways");
                this.priorValues.set(currentValues);
            }
            return true;
        }

        private Map<String, Long> getCurrentValues() {
            final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
            for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            return builder.build();
        }
    };
    try {
        LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize());

        // always use compression
        transcoder.setCompressionThreshold(0);

        OperationQueueFactory opQueueFactory;
        long maxQueueBytes = config.getMaxOperationQueueSize();
        if (maxQueueBytes > 0) {
            opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes);
        } else {
            opQueueFactory = new LinkedOperationQueueFactory();
        }

        final Predicate<String> interesting = new Predicate<String>() {
            // See net.spy.memcached.MemcachedConnection.registerMetrics()
            private final Set<String> interestingMetrics = ImmutableSet.of(
                    "[MEM] Reconnecting Nodes (ReconnectQueue)",
                    //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted
                    "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write",
                    "[MEM] Average Bytes read from OS per read",
                    "[MEM] Average Time on wire for operations (s)",
                    "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry",
                    "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success");

            @Override
            public boolean apply(@Nullable String input) {
                return input != null && interestingMetrics.contains(input);
            }
        };

        final MetricCollector metricCollector = new MetricCollector() {
            @Override
            public void addCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                counters.putIfAbsent(name, new AtomicLong(0L));

                if (log.isDebugEnabled()) {
                    log.debug("Add Counter [%s]", name);
                }
            }

            @Override
            public void removeCounter(String name) {
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.addAndGet(amount);

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s] %d", name, amount);
                }
            }

            @Override
            public void decrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.decrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s]", name);
                }
            }

            @Override
            public void decrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0L));
                    counter = counters.get(name);
                }
                counter.addAndGet(-amount);

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s] %d", name, amount);
                }
            }

            @Override
            public void addMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                meters.putIfAbsent(name, new AtomicLong(0L));
                if (log.isDebugEnabled()) {
                    log.debug("Adding meter [%s]", name);
                }
            }

            @Override
            public void removeMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove meter [%s]", name);
                }
            }

            @Override
            public void markMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong meter = meters.get(name);
                if (meter == null) {
                    meters.putIfAbsent(name, new AtomicLong(0L));
                    meter = meters.get(name);
                }
                meter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment counter [%s]", name);
                }
            }

            @Override
            public void addHistogram(String name) {
                log.debug("Ignoring add histogram [%s]", name);
            }

            @Override
            public void removeHistogram(String name) {
                log.debug("Ignoring remove histogram [%s]", name);
            }

            @Override
            public void updateHistogram(String name, int amount) {
                log.debug("Ignoring update histogram [%s]: %d", name, amount);
            }
        };

        final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder()
                // 1000 repetitions gives us good distribution with murmur3_128
                // (approx < 5% difference in counts across nodes, with 5 cache nodes)
                .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128)
                .setProtocol(ConnectionFactoryBuilder.Protocol
                        .valueOf(StringUtils.toUpperCase(config.getProtocol())))
                .setLocatorType(
                        ConnectionFactoryBuilder.Locator.valueOf(StringUtils.toUpperCase(config.getLocator())))
                .setDaemon(true).setFailureMode(FailureMode.Cancel).setTranscoder(transcoder)
                .setShouldOptimize(true).setOpQueueMaxBlockTime(config.getTimeout())
                .setOpTimeout(config.getTimeout()).setReadBufferSize(config.getReadBufferSize())
                .setOpQueueFactory(opQueueFactory).setMetricCollector(metricCollector)
                .setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds
                .build();

        final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts());

        final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier;

        if (config.getNumConnections() > 1) {
            clientSupplier = new MemcacheClientPool(config.getNumConnections(),
                    new Supplier<MemcachedClientIF>() {
                        @Override
                        public MemcachedClientIF get() {
                            try {
                                return new MemcachedClient(connectionFactory, hosts);
                            } catch (IOException e) {
                                log.error(e, "Unable to create memcached client");
                                throw Throwables.propagate(e);
                            }
                        }
                    });
        } else {
            clientSupplier = Suppliers
                    .ofInstance(StupidResourceHolder.create(new MemcachedClient(connectionFactory, hosts)));
        }

        return new MemcachedCache(clientSupplier, config, monitor);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.github.podd.example.ExamplePoddClient.java

/**
 * Gets a material URI matching the given pot and genotype URIs, creating a new entry if
 * necessary and giving it a temporary URI.
 * /*from  w w w  .  j a  v  a  2 s . c o m*/
 * @param materialUriMap
 * @param nextProjectID
 * @param nextPotUri
 * @return
 */
private URI getMaterialUri(final ConcurrentMap<URI, ConcurrentMap<URI, Model>> materialUriMap,
        final URI nextGenotypeUri, final InferredOWLOntologyID nextProjectID, final URI nextPotUri,
        final String potNumber, final String lineNumber, final String control) {
    URI nextMaterialURI = null;
    if (materialUriMap.containsKey(nextPotUri)) {
        final ConcurrentMap<URI, Model> nextPotMaterialMap = materialUriMap.get(nextPotUri);

        for (final URI existingMaterialURI : nextPotMaterialMap.keySet()) {
            final Model nextModel = nextPotMaterialMap.get(existingMaterialURI);

            if (nextModel.contains(existingMaterialURI, PODD.PODD_SCIENCE_REFERS_TO_GENOTYPE,
                    nextGenotypeUri)) {
                nextMaterialURI = existingMaterialURI;
            } else {
                this.log.debug("Did not find any materials with the given genotype in this pot: {} {}",
                        nextPotUri, nextGenotypeUri);
            }
        }
    }

    // If no material was found, then create a new description and assign it a temporary URI
    if (nextMaterialURI == null) {
        this.log.debug(
                "Could not find an existing material for description provided, assigning a temporary URI: {} {} {}",
                nextProjectID, nextPotUri, nextGenotypeUri);

        nextMaterialURI = RestletPoddClientImpl.vf
                .createURI(RestletPoddClientImpl.TEMP_UUID_PREFIX + "material:" + UUID.randomUUID().toString());

        final Model newModel = new LinkedHashModel();
        newModel.add(nextPotUri, PODD.PODD_SCIENCE_HAS_MATERIAL, nextMaterialURI);
        newModel.add(nextMaterialURI, RDF.TYPE, PODD.PODD_SCIENCE_MATERIAL);

        newModel.add(nextMaterialURI, RDFS.LABEL, RestletPoddClientImpl.vf
                .createLiteral("Material for pot " + potNumber + " containing line " + lineNumber));
        newModel.add(nextMaterialURI, PODD.PODD_SCIENCE_REFERS_TO_GENOTYPE, nextGenotypeUri);
        if (control.equalsIgnoreCase("Yes")) {
            newModel.add(nextMaterialURI, PODD.PODD_SCIENCE_HAS_CONTROL, PODD.PODD_SCIENCE_HAS_CONTROL_YES);
        } else if (control.equalsIgnoreCase("No")) {
            newModel.add(nextMaterialURI, PODD.PODD_SCIENCE_HAS_CONTROL, PODD.PODD_SCIENCE_HAS_CONTROL_NO);
        } else {
            this.log.warn("Did not recognise control label: {} (should be Yes or No", control);
            newModel.add(nextMaterialURI, PODD.PODD_SCIENCE_HAS_CONTROL, PODD.PODD_SCIENCE_HAS_CONTROL_UNKNOWN);
        }

        ConcurrentMap<URI, Model> nextGenotypeUriMap = new ConcurrentHashMap<>();
        final ConcurrentMap<URI, Model> putIfAbsent = materialUriMap.putIfAbsent(nextPotUri,
                nextGenotypeUriMap);
        if (putIfAbsent != null) {
            nextGenotypeUriMap = putIfAbsent;
        }
        final Model putIfAbsent2 = nextGenotypeUriMap.putIfAbsent(nextMaterialURI, newModel);
        if (putIfAbsent2 != null) {
            this.log.error("ERROR: Generated two temporary Material URIs that were identical! : {} {}",
                    nextPotUri, nextMaterialURI);
        }
    }
    return nextMaterialURI;

}

From source file:com.github.podd.example.ExamplePoddClient.java

/**
 * Gets a genotype URI matching the given genus, species, and plantName (line) from the given
 * cache, creating a new entry if necessary and giving it a temporary URI.
 * /*from  www  . j a v a  2  s  .c  o  m*/
 * @param genotypeUriMap
 * @param genus
 * @param species
 * @param plantName
 * @param control
 * @param nextProjectID
 * @param nextProjectUri
 * @return
 */
private URI getGenotypeUri(final ConcurrentMap<URI, ConcurrentMap<URI, Model>> genotypeUriMap,
        final String genus, final String species, final String plantName, final String plantLineNumber,
        final String control, final InferredOWLOntologyID nextProjectID, final URI nextProjectUri) {
    URI nextGenotypeURI = null;
    if (genotypeUriMap.containsKey(nextProjectUri)) {
        final ConcurrentMap<URI, Model> nextProjectGenotypeMap = genotypeUriMap.get(nextProjectUri);

        for (final URI existingGenotypeURI : nextProjectGenotypeMap.keySet()) {
            final Model nextModel = nextProjectGenotypeMap.get(existingGenotypeURI);

            if (nextModel.contains(existingGenotypeURI, PODD.PODD_SCIENCE_HAS_GENUS,
                    RestletPoddClientImpl.vf.createLiteral(genus))) {
                if (nextModel.contains(existingGenotypeURI, PODD.PODD_SCIENCE_HAS_SPECIES,
                        RestletPoddClientImpl.vf.createLiteral(species))) {
                    if (nextModel.contains(existingGenotypeURI, PODD.PODD_SCIENCE_HAS_LINE,
                            RestletPoddClientImpl.vf.createLiteral(plantName))) {
                        nextGenotypeURI = existingGenotypeURI;
                        break;
                    } else {
                        this.log.debug(
                                "Did not find any genotypes with the given genus and species and line in this project: {} {} {} {}",
                                nextProjectUri, genus, species, plantName);
                    }
                } else {
                    this.log.debug(
                            "Did not find any genotypes with the given genus and species in this project: {} {} {}",
                            nextProjectUri, genus, species);
                }
            } else {
                this.log.debug("Did not find any genotypes with the given genus in this project: {} {}",
                        nextProjectUri, genus);
            }
        }
    }

    // If no genotype was found, then create a new description and assign it a temporary URI
    if (nextGenotypeURI == null) {
        this.log.debug(
                "Could not find an existing genotype for description provided, assigning a temporary URI: {} {} {} {}",
                nextProjectID, genus, species, plantName);

        nextGenotypeURI = RestletPoddClientImpl.vf.createURI(RestletPoddClientImpl.TEMP_UUID_PREFIX
                + "genotype:" + plantLineNumber + ":" + UUID.randomUUID().toString());

        final Model newModel = new LinkedHashModel();
        newModel.add(nextProjectUri, PODD.PODD_SCIENCE_HAS_GENOTYPE, nextGenotypeURI);
        newModel.add(nextGenotypeURI, RDF.TYPE, PODD.PODD_SCIENCE_GENOTYPE);
        newModel.add(nextGenotypeURI, RDFS.LABEL,
                RestletPoddClientImpl.vf.createLiteral(genus + " " + species + " (" + plantName + ")"));
        newModel.add(nextGenotypeURI, RDFS.COMMENT, RestletPoddClientImpl.vf.createLiteral("Plant line in : "
                + genus + " " + species + " named, " + plantName + " : labelled as number " + plantLineNumber));
        newModel.add(nextGenotypeURI, PODD.PODD_SCIENCE_HAS_GENUS,
                RestletPoddClientImpl.vf.createLiteral(genus));
        newModel.add(nextGenotypeURI, PODD.PODD_SCIENCE_HAS_SPECIES,
                RestletPoddClientImpl.vf.createLiteral(species));
        newModel.add(nextGenotypeURI, PODD.PODD_SCIENCE_HAS_LINE,
                RestletPoddClientImpl.vf.createLiteral(plantName));
        newModel.add(nextGenotypeURI, PODD.PODD_SCIENCE_HAS_LINE_NUMBER,
                RestletPoddClientImpl.vf.createLiteral(plantLineNumber));

        ConcurrentMap<URI, Model> nextGenotypeUriMap = new ConcurrentHashMap<>();
        final ConcurrentMap<URI, Model> putIfAbsent = genotypeUriMap.putIfAbsent(nextProjectUri,
                nextGenotypeUriMap);
        if (putIfAbsent != null) {
            nextGenotypeUriMap = putIfAbsent;
        }
        final Model putIfAbsent2 = nextGenotypeUriMap.putIfAbsent(nextGenotypeURI, newModel);
        if (putIfAbsent2 != null) {
            this.log.error("ERROR: Generated two temporary Genotype URIs that were identical! : {} {}",
                    nextProjectUri, nextGenotypeURI);
        }
    }
    return nextGenotypeURI;

}