Example usage for java.util.concurrent ConcurrentMap put

List of usage examples for java.util.concurrent ConcurrentMap put

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentMap put.

Prototype

V put(K key, V value);

Source Link

Document

Associates the specified value with the specified key in this map (optional operation).

Usage

From source file:com.edgenius.wiki.service.impl.PageServiceImpl.java

@SuppressWarnings("unchecked")
public void startEditing(String pageUuid, User user) {

    ConcurrentMap<String, Long> map = null;

    Element ele = pageEditingCache.get(pageUuid);
    if (ele != null) {
        //if ele is not null, then keep existed editing users list
        //this maybe cause users not expired in 1 hour(echache.xml setting), but it is not harmful
        pageEditingCache.remove(pageUuid);
        map = (ConcurrentMap<String, Long>) ele.getValue();
    }//from  w w  w. j a va 2 s  .  c om
    if (map == null)
        map = new ConcurrentHashMap<String, Long>();

    map.put(user.getUsername(), System.currentTimeMillis());

    ele = new Element(pageUuid, map);
    pageEditingCache.put(ele);
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

/**
 * Offload message data of a particular type of store (current or incoming) to
 * disk.//  www.  j av  a  2  s.  co  m
 *
 * @param messageStore The message store to write to disk
 * @param messagesOnDisk Map to update and let others know that this message
 *                       store is on disk
 * @param partitionId Id of the partition we want to offload the message store
 *                    of
 * @param superstep Superstep for which we want to offload message data for.
 *                  It is equal the current superstep number for offloading
 *                  currentMessageStore, and is equal to next superstep
 *                  number for offloading incomingMessageStore
 * @throws IOException
 */
private void writeMessageData(MessageStore<I, Writable> messageStore,
        ConcurrentMap<Integer, Boolean> messagesOnDisk, int partitionId, long superstep) throws IOException {
    File file = new File(getMessagesPath(partitionId, superstep));
    checkState(!file.exists(), "writeMessageData: message store file for partition " + partitionId
            + " for messages in superstep " + superstep + " already exist (impossible).");

    checkState(file.createNewFile(),
            "offloadPartition: cannot create message store file for " + "partition " + partitionId);

    FileOutputStream fileout = new FileOutputStream(file);
    BufferedOutputStream bufferout = new BufferedOutputStream(fileout);
    DataOutputStream outputStream = new DataOutputStream(bufferout);
    messageStore.writePartition(outputStream, partitionId);
    messageStore.clearPartition(partitionId);
    outputStream.close();
    messagesOnDisk.put(partitionId, true);
}

From source file:org.dkpro.lab.engine.impl.MultiThreadBatchTaskEngine.java

@Override
protected void executeConfiguration(BatchTask aConfiguration, TaskContext aContext, Map<String, Object> aConfig,
        Set<String> aExecutedSubtasks) throws ExecutionException, LifeCycleException {
    if (log.isTraceEnabled()) {
        // Show all subtasks executed so far
        for (String est : aExecutedSubtasks) {
            log.trace("-- Already executed: " + est);
        }//from  w  w  w  .j  av  a 2  s  .c  o m
    }

    // Set up initial scope used by sub-batch-tasks using the inherited scope. The scope is
    // extended as the subtasks of this batch are executed with the present configuration.
    // FIXME: That means that sub-batch-tasks in two different configurations cannot see
    // each other. Is that intended? Mind that the "executedSubtasks" set is intentionally
    // maintained *across* configurations, so maybe the scope should also be maintained
    // *across* configurations? - REC 2014-06-15
    Set<String> scope = new HashSet<>();
    if (aConfiguration.getScope() != null) {
        scope.addAll(aConfiguration.getScope());
    }

    // Configure subtasks
    for (Task task : aConfiguration.getTasks()) {
        // Now the setup is complete
        aContext.getLifeCycleManager().configure(aContext, task, aConfig);
    }

    Queue<Task> queue = new LinkedList<>(aConfiguration.getTasks());
    // keeps track of the execution threads; 
    // TODO MW: do we really need this or can we work with the futures list only?
    Map<Task, ExecutionThread> threads = new HashMap<>();
    // keeps track of submitted Futures and their associated tasks
    Map<Future<?>, Task> futures = new HashMap<Future<?>, Task>();
    // will be instantiated with all exceptions from current loop
    ConcurrentMap<Task, Throwable> exceptionsFromLastLoop = null;
    ConcurrentMap<Task, Throwable> exceptionsFromCurrentLoop = new ConcurrentHashMap<>();

    int outerLoopCounter = 0;

    // main loop
    do {
        outerLoopCounter++;

        threads.clear();
        futures.clear();
        ExecutorService executor = Executors.newFixedThreadPool(maxThreads);

        // set the exceptions from the last loop
        exceptionsFromLastLoop = new ConcurrentHashMap<>(exceptionsFromCurrentLoop);

        // Fix MW: Clear exceptionsFromCurrentLoop; otherwise the loop with run at most twice.
        exceptionsFromCurrentLoop.clear();

        // process all tasks from the queue
        while (!queue.isEmpty()) {
            Task task = queue.poll();

            TaskContextMetadata execution = getExistingExecution(aConfiguration, aContext, task, aConfig,
                    aExecutedSubtasks);

            // Check if a subtask execution compatible with the present configuration has
            // does already exist ...
            if (execution == null) {
                // ... otherwise execute it with the present configuration
                log.info("Executing task [" + task.getType() + "]");

                // set scope here so that the inherited scopes are considered
                if (task instanceof BatchTask) {
                    ((BatchTask) task).setScope(scope);
                }

                ExecutionThread thread = new ExecutionThread(aContext, task, aConfig, aExecutedSubtasks);
                threads.put(task, thread);

                futures.put(executor.submit(thread), task);
            } else {
                log.debug("Using existing execution [" + execution.getId() + "]");

                // Record new/existing execution
                aExecutedSubtasks.add(execution.getId());
                scope.add(execution.getId());
            }
        }

        // try and get results from all futures to check for failed executions
        for (Map.Entry<Future<?>, Task> entry : futures.entrySet()) {
            try {
                entry.getKey().get();
            } catch (java.util.concurrent.ExecutionException ex) {
                Task task = entry.getValue();
                // TODO MW: add a retry-counter here to prevent endless loops?
                log.info("Task exec failed for [" + task.getType() + "]");
                // record the failed task, so that it can be re-added to the queue
                exceptionsFromCurrentLoop.put(task, ex);
            } catch (InterruptedException ex) {
                // thread interrupted, exit
                throw new RuntimeException(ex);
            }
        }

        log.debug("Calling shutdown");
        executor.shutdown();
        log.debug("All threads finished");

        // collect the results
        for (Map.Entry<Task, ExecutionThread> entry : threads.entrySet()) {
            Task task = entry.getKey();
            ExecutionThread thread = entry.getValue();
            TaskContextMetadata execution = thread.getTaskContextMetadata();

            // probably failed
            if (execution == null) {
                Throwable exception = exceptionsFromCurrentLoop.get(task);
                if (!(exception instanceof UnresolvedImportException)
                        && !(exception instanceof java.util.concurrent.ExecutionException)) {
                    throw new RuntimeException(exception);
                }
                exceptionsFromCurrentLoop.put(task, exception);

                // re-add to the queue
                queue.add(task);
            } else {

                // Record new/existing execution
                aExecutedSubtasks.add(execution.getId());
                scope.add(execution.getId());
            }
        }

    }
    // finish if the same tasks failed again
    while (!exceptionsFromCurrentLoop.keySet().equals(exceptionsFromLastLoop.keySet()));
    // END OF DO; finish if the same tasks failed again

    if (!exceptionsFromCurrentLoop.isEmpty()) {
        // collect all details
        StringBuilder details = new StringBuilder();
        for (Throwable throwable : exceptionsFromCurrentLoop.values()) {
            details.append("\n -");
            details.append(throwable.getMessage());
        }

        // we re-throw the first exception
        Throwable next = exceptionsFromCurrentLoop.values().iterator().next();
        if (next instanceof RuntimeException) {
            throw (RuntimeException) next;
        }

        // otherwise wrap it
        throw new RuntimeException(details.toString(), next);
    }
    log.info("MultiThreadBatchTask completed successfully. Total number of outer loop runs: "
            + outerLoopCounter);
}

From source file:com.github.podd.example.ExamplePoddClient.java

private void populateGenotypeUriMap(
        final ConcurrentMap<String, ConcurrentMap<URI, InferredOWLOntologyID>> projectUriMap,
        final ConcurrentMap<URI, ConcurrentMap<URI, Model>> genotypeUriMap) throws PoddClientException {
    for (final String nextProjectName : projectUriMap.keySet()) {
        final ConcurrentMap<URI, InferredOWLOntologyID> nextProjectNameMapping = projectUriMap
                .get(nextProjectName);//from   w ww  .  j  a v  a2  s .c  om
        for (final URI projectUri : nextProjectNameMapping.keySet()) {
            final InferredOWLOntologyID artifactId = nextProjectNameMapping.get(projectUri);
            final Model nextSparqlResults = this.doSPARQL(
                    String.format(ExampleSpreadsheetConstants.TEMPLATE_SPARQL_BY_TYPE_ALL_PROPERTIES,
                            RenderUtils.getSPARQLQueryString(PODD.PODD_SCIENCE_GENOTYPE)),
                    Arrays.asList(artifactId));
            if (nextSparqlResults.isEmpty()) {
                this.log.debug("Could not find any existing genotypes for project: {} {}", nextProjectName,
                        projectUri);
            }

            for (final Resource nextGenotype : nextSparqlResults
                    .filter(null, RDF.TYPE, PODD.PODD_SCIENCE_GENOTYPE).subjects()) {
                if (!(nextGenotype instanceof URI)) {
                    this.log.error("Found genotype that was not assigned a URI: {} artifact={}", nextGenotype,
                            artifactId);
                } else {
                    ConcurrentMap<URI, Model> nextGenotypeMap = new ConcurrentHashMap<>();
                    final ConcurrentMap<URI, Model> putIfAbsent = genotypeUriMap.put(projectUri,
                            nextGenotypeMap);
                    if (putIfAbsent != null) {
                        nextGenotypeMap = putIfAbsent;
                    }
                    final Model putIfAbsent2 = nextGenotypeMap.putIfAbsent((URI) nextGenotype,
                            nextSparqlResults);
                    if (putIfAbsent2 != null) {
                        this.log.info(
                                "Found existing description for genotype URI within the same project: {} {}",
                                projectUri, nextGenotype);
                    }
                }
            }
        }
    }
}

From source file:org.danann.cernunnos.DynamicCacheHelper.java

public V getCachedObject(TaskRequest req, TaskResponse res, K key, Factory<K, V> factory) {
    final CacheMode cacheMode = CacheMode.valueOf((String) this.cacheModelPhrase.evaluate(req, res));

    if (this.logger.isDebugEnabled()) {
        this.logger.debug("Getting cached object for '" + key + "' using cache mode " + cacheMode
                + " and factory " + factory);
    }/* www .  j a  v  a 2s  .  co m*/

    //Load the cache only if cache-all is enabled
    final ConcurrentMap<Tuple<Serializable, K>, Object> cache;
    final Tuple<Serializable, K> compoundCacheKey;
    switch (cacheMode) {
    case NONE: {
        return factory.createObject(key);
    }

    default:
    case ONE: {
        cache = null;
        compoundCacheKey = null;
    }
        break;

    case ALL: {
        cache = (ConcurrentMap<Tuple<Serializable, K>, Object>) this.cachePhrase.evaluate(req, res);
        final Serializable cacheNamespace = factory.getCacheNamespace(key);
        compoundCacheKey = new Tuple<Serializable, K>(cacheNamespace, key);
    }
        break;
    }

    //Determine the object to synchronize around
    final Object syncTarget = factory.getMutex(key);

    //get or create & cache the target object
    V instance = null;
    synchronized (syncTarget) {
        //Get the object from the local variables if no cache is available
        if (cache == null) {
            //Try for a thread-local instance first
            if (this.compareKeys(key, this.threadKeyHolder.get())) {
                instance = this.threadInstanceHolder.get();
            }
            //Next try for a singleton instance
            else if (this.compareKeys(key, this.key)) {
                instance = this.instance;
            }
        }
        //Look in the passed cache for the instance
        else {
            final Object object = cache.get(compoundCacheKey);

            //If the cached object is a ThreadLocal use it for the instance
            if (object instanceof ThreadLocal<?>) {
                instance = ((ThreadLocal<V>) object).get();
            }
            //If not assume it is the instance 
            else {
                instance = (V) object;
            }
        }

        //If no instance was found create and cache one
        if (instance == null) {
            instance = factory.createObject(key);
            final boolean threadSafe = factory.isThreadSafe(key, instance);

            if (this.logger.isDebugEnabled()) {
                this.logger.debug(
                        "Cache miss for '" + key + "' created '" + instance + "' threadSafe=" + threadSafe);
            }

            //If no cache is available store the instance in the local variables
            if (cache == null) {
                if (threadSafe) {
                    this.instance = instance;
                    this.key = key;
                } else {
                    this.threadInstanceHolder.set(instance);
                    this.threadKeyHolder.set(key);
                }
            }
            //Cache available store there
            else {
                if (threadSafe) {
                    cache.put(compoundCacheKey, instance);
                } else {
                    ThreadLocal<V> threadInstanceHolder = (ThreadLocal<V>) cache.get(compoundCacheKey);
                    if (threadInstanceHolder == null) {
                        threadInstanceHolder = new ThreadLocal<V>();

                        while (true) {
                            Object existing = cache.putIfAbsent(compoundCacheKey, threadInstanceHolder);
                            if (existing == null) {
                                //nothing existed for that key, put was successful
                                break;
                            }

                            if (existing instanceof ThreadLocal) {
                                //Existing ThreadLocal, just use it
                                threadInstanceHolder = (ThreadLocal) existing;
                                break;
                            }

                            //something other than a ThreadLocal already exists, try replacing with the ThreadLocal
                            final boolean replaced = cache.replace(compoundCacheKey, threadInstanceHolder,
                                    existing);
                            if (replaced) {
                                //Replace worked!
                                break;
                            }

                            //Replace didn't work, try the whole process again, yay non-blocking!
                        }

                        if (cache instanceof EvictionAwareCache) {
                            ((EvictionAwareCache) cache)
                                    .registerCacheEvictionListener(ThreadLocalCacheEvictionListener.INSTANCE);
                        }
                    }

                    threadInstanceHolder.set(instance);
                }
            }
        } else if (this.logger.isDebugEnabled()) {
            this.logger.debug("Cache hit for '" + key + "' using '" + instance + "'");
        }
    }

    return instance;
}

From source file:org.opendaylight.controller.clustering.services_implementation.internal.ClusteringServicesIT.java

@Test
public void clusterContainerAndGlobalTest()
        throws CacheExistException, CacheConfigException, CacheListenerAddException, InterruptedException {
    String cache1 = "Cache1";
    String cache2 = "Cache2";
    // Lets test the case of caches with same name in different
    // containers (actually global an container case)
    String cache3 = "Cache2";

    HashSet<cacheMode> cacheModeSet = new HashSet<cacheMode>();
    cacheModeSet.add(cacheMode.NON_TRANSACTIONAL);
    ConcurrentMap cm11 = this.clusterDefaultServices.createCache(cache1, cacheModeSet);
    assertNotNull(cm11);/*from   w ww.  j  av  a2 s. c  o  m*/

    assertTrue(this.clusterDefaultServices.existCache(cache1));
    assertEquals(cm11, this.clusterDefaultServices.getCache(cache1));

    ConcurrentMap cm12 = this.clusterDefaultServices.createCache(cache2, cacheModeSet);
    ConcurrentMap cm23 = this.clusterGlobalServices.createCache(cache3, cacheModeSet);

    // Now given cahe2 and cache3 have same name lets make sure
    // they don't return the same reference
    assertNotNull(this.clusterGlobalServices.getCache(cache2));
    // cm12 reference must be different than cm23
    assertTrue(cm12 != cm23);

    HashSet<String> cacheList = (HashSet<String>) this.clusterDefaultServices.getCacheList();
    assertEquals(2, cacheList.size());
    assertTrue(cacheList.contains(cache1));
    assertTrue(cacheList.contains(cache2));

    assertNotNull(this.clusterDefaultServices.getCacheProperties(cache1));

    {
        /***********************************/
        /* Testing cacheAware in Container */
        /***********************************/
        Dictionary<String, Object> props = new Hashtable<String, Object>();
        Set<String> propSet = new HashSet<String>();
        propSet.add(cache1);
        propSet.add(cache2);
        props.put("cachenames", propSet);
        CacheAware listener = new CacheAware();
        CacheAware listenerRepeated = new CacheAware();
        ServiceRegistration updateServiceReg = ServiceHelper.registerServiceWReg(ICacheUpdateAware.class,
                "default", listener, props);
        assertNotNull(updateServiceReg);

        // Register another service for the same caches, this
        // should not get any update because we don't allow to
        // override the existing unless before unregistered
        ServiceRegistration updateServiceRegRepeated = ServiceHelper
                .registerServiceWReg(ICacheUpdateAware.class, "default", listenerRepeated, props);
        assertNotNull(updateServiceRegRepeated);
        CountDownLatch res = null;
        List<Update> ups = null;
        Update up = null;
        Integer k1 = new Integer(10);
        Long k2 = new Long(100L);

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // Start monitoring the updates
        res = listener.restart(2);
        // modify the cache
        cm11.put(k1, "foo");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 2);
        // Validate that first we get an update (yes even in case of a
        // new value added)
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k1));
        assertTrue(up.value.equals("foo"));
        assertTrue(up.cacheName.equals(cache1));
        // Validate that we then get a create
        up = ups.get(1);
        assertTrue(up.t.equals(UpdateType.ADDED));
        assertTrue(up.key.equals(k1));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache1));

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm11.put(k1, "baz");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get an update with expect fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k1));
        assertTrue(up.value.equals("baz"));
        assertTrue(up.cacheName.equals(cache1));

        /**********************************/
        /* RE-UPDATE AN EXISTING KEY CASE */
        /**********************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm11.put(k1, "baz");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get an update with expect fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k1));
        assertTrue(up.value.equals("baz"));
        assertTrue(up.cacheName.equals(cache1));

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm11.remove(k1);
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get a delete with expected fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.REMOVED));
        assertTrue(up.key.equals(k1));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache1));

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // Start monitoring the updates
        res = listener.restart(2);
        // modify the cache
        cm12.put(k2, new Short((short) 15));
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 2);
        // Validate that first we get an update (yes even in case of a
        // new value added)
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k2));
        assertTrue(up.value.equals(new Short((short) 15)));
        assertTrue(up.cacheName.equals(cache2));
        // Validate that we then get a create
        up = ups.get(1);
        assertTrue(up.t.equals(UpdateType.ADDED));
        assertTrue(up.key.equals(k2));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache2));

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm12.put(k2, "BAZ");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get an update with expect fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k2));
        assertTrue(up.value.equals("BAZ"));
        assertTrue(up.cacheName.equals(cache2));

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm12.remove(k2);
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get a delete with expected fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.REMOVED));
        assertTrue(up.key.equals(k2));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache2));

        /******************************************************************/
        /* NOW LETS REMOVE THE REGISTRATION AND MAKE SURE NO UPDATS COMES */
        /******************************************************************/
        updateServiceReg.unregister();
        // Start monitoring the updates, noone should come in
        res = listener.restart(1);

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // modify the cache
        cm11.put(k1, "foo");

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // modify the cache
        cm11.put(k1, "baz");

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // modify the cache
        cm11.remove(k1);

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // modify the cache
        cm12.put(k2, new Short((short) 15));

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // modify the cache
        cm12.put(k2, "BAZ");

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // modify the cache
        cm12.remove(k2);

        // Wait to make sure no updates came in, clearly this is
        // error prone as logic, but cannot find a better way than
        // this to make sure updates didn't get in
        res.await(1L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 0);
    }

    {
        /***********************************/
        /* Testing cacheAware in Global */
        /***********************************/
        Dictionary<String, Object> props = new Hashtable<String, Object>();
        Set<String> propSet = new HashSet<String>();
        propSet.add(cache3);
        props.put("cachenames", propSet);
        CacheAware listener = new CacheAware();
        ServiceRegistration updateServiceReg = ServiceHelper.registerGlobalServiceWReg(ICacheUpdateAware.class,
                listener, props);
        assertNotNull(updateServiceReg);

        CountDownLatch res = null;
        List<Update> ups = null;
        Update up = null;
        Integer k1 = new Integer(10);

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // Start monitoring the updates
        res = listener.restart(2);
        // modify the cache
        cm23.put(k1, "foo");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 2);
        // Validate that first we get an update (yes even in case of a
        // new value added)
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k1));
        assertTrue(up.value.equals("foo"));
        assertTrue(up.cacheName.equals(cache3));
        // Validate that we then get a create
        up = ups.get(1);
        assertTrue(up.t.equals(UpdateType.ADDED));
        assertTrue(up.key.equals(k1));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache3));

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm23.put(k1, "baz");
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get an update with expect fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.CHANGED));
        assertTrue(up.key.equals(k1));
        assertTrue(up.value.equals("baz"));
        assertTrue(up.cacheName.equals(cache3));

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // Start monitoring the updates
        res = listener.restart(1);
        // modify the cache
        cm23.remove(k1);
        // Wait
        res.await(100L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 1);
        // Validate we get a delete with expected fields
        up = ups.get(0);
        assertTrue(up.t.equals(UpdateType.REMOVED));
        assertTrue(up.key.equals(k1));
        assertNull(up.value);
        assertTrue(up.cacheName.equals(cache3));

        /******************************************************************/
        /* NOW LETS REMOVE THE REGISTRATION AND MAKE SURE NO UPDATS COMES */
        /******************************************************************/
        updateServiceReg.unregister();
        // Start monitoring the updates, noone should come in
        res = listener.restart(1);

        /***********************/
        /* CREATE NEW KEY CASE */
        /***********************/
        // modify the cache
        cm23.put(k1, "foo");

        /*******************************/
        /* UPDATE AN EXISTING KEY CASE */
        /*******************************/
        // modify the cache
        cm23.put(k1, "baz");

        /********************************/
        /* REMOVAL OF EXISTING KEY CASE */
        /********************************/
        // modify the cache
        cm23.remove(k1);

        // Wait to make sure no updates came in, clearly this is
        // error prone as logic, but cannot find a better way than
        // this to make sure updates didn't get in
        res.await(1L, TimeUnit.SECONDS);
        // Analyze the updates
        ups = listener.getUpdates();
        assertTrue(ups.size() == 0);
    }

    InetAddress addr = this.clusterDefaultServices.getMyAddress();
    assertNotNull(addr);

    List<InetAddress> addrList = this.clusterDefaultServices.getClusteredControllers();

    this.clusterDefaultServices.destroyCache(cache1);
    assertFalse(this.clusterDefaultServices.existCache(cache1));
}

From source file:org.tomitribe.tribestream.registryng.resources.ClientResource.java

@GET
@Path("invoke/stream")
@Produces("text/event-stream") // will be part of JAX-RS 2.1, for now just making it working
public void invokeScenario(@Suspended final AsyncResponse asyncResponse, @Context final Providers providers,
        @Context final HttpServletRequest httpServletRequest,
        // base64 encoded json with the request and identify since EventSource doesnt handle it very well
        // TODO: use a ciphering with a POST endpoint to avoid to have it readable (or other)
        @QueryParam("request") final String requestBytes) {
    final SseRequest in = loadPayload(SseRequest.class, providers, requestBytes);

    final String auth = in.getIdentity();
    security.check(auth, httpServletRequest, () -> {
    }, () -> {//w  ww.  ja va2 s  .com
        throw new WebApplicationException(Response.Status.FORBIDDEN);
    });

    final GenericClientService.Request req = toRequest(in.getHttp());
    final Scenario scenario = in.getHttp().getScenario();

    final MultivaluedHashMap<String, Object> fakeHttpHeaders = new MultivaluedHashMap<>();
    final ConcurrentMap<Future<?>, Boolean> computations = new ConcurrentHashMap<>();
    final MessageBodyWriter<LightHttpResponse> writerResponse = providers.getMessageBodyWriter(
            LightHttpResponse.class, LightHttpResponse.class, annotations, APPLICATION_JSON_TYPE);
    final MessageBodyWriter<ScenarioEnd> writerEnd = providers.getMessageBodyWriter(ScenarioEnd.class,
            ScenarioEnd.class, annotations, APPLICATION_JSON_TYPE);

    // not jaxrs one cause cxf wraps this one and prevents the flush() to works
    final HttpServletResponse httpServletResponse = HttpServletResponse.class
            .cast(httpServletRequest.getAttribute("tribe.registry.response"));
    httpServletResponse.setHeader("Content-Type", "text/event-stream");
    try {
        httpServletResponse.flushBuffer();
    } catch (final IOException e) {
        throw new IllegalStateException(e);
    }

    final ServletOutputStream out;
    try {
        out = httpServletResponse.getOutputStream();
    } catch (final IOException e) {
        throw new IllegalStateException(e);
    }

    mes.submit(() -> {
        final AtomicReference<Invoker.Handle> handleRef = new AtomicReference<>();

        try {
            // we compute some easy stats asynchronously
            final Map<Integer, AtomicInteger> sumPerResponse = new HashMap<>();
            final AtomicInteger total = new AtomicInteger();
            final AtomicLong min = new AtomicLong();
            final AtomicLong max = new AtomicLong();
            final AtomicLong sum = new AtomicLong();

            final AtomicInteger writeErrors = new AtomicInteger(0);

            final long start = System.currentTimeMillis();
            handleRef.set(invoker.invoke(scenario.getThreads(), scenario.getInvocations(),
                    scenario.getDuration(), timeout, () -> {
                        if (handleRef.get().isCancelled()) {
                            return;
                        }

                        LightHttpResponse resp;
                        try {
                            final GenericClientService.Response invoke = service.invoke(req);
                            resp = new LightHttpResponse(invoke.getStatus(), null,
                                    invoke.getClientExecutionDurationMs());
                        } catch (final RuntimeException e) {
                            resp = new LightHttpResponse(-1, e.getMessage(), -1);
                        }

                        // let's process it in an environment where synchronisation is fine
                        final LightHttpResponse respRef = resp;
                        computations.put(mes.submit(() -> {
                            synchronized (out) {
                                try {
                                    out.write(dataStart);
                                    writerResponse.writeTo(respRef, LightHttpResponse.class,
                                            LightHttpResponse.class, annotations, APPLICATION_JSON_TYPE,
                                            fakeHttpHeaders, out);
                                    out.write(dataEnd);
                                    out.flush();
                                } catch (final IOException e) {
                                    if (writeErrors.incrementAndGet() > toleratedWriteErrors) {
                                        handleRef.get().cancel();
                                    }
                                    throw new IllegalStateException(e);
                                }
                            }

                            if (handleRef.get().isCancelled()) {
                                return;
                            }

                            final long clientExecutionDurationMs = respRef.getClientExecutionDurationMs();

                            total.incrementAndGet();
                            sumPerResponse.computeIfAbsent(respRef.getStatus(), k -> new AtomicInteger())
                                    .incrementAndGet();
                            sum.addAndGet(clientExecutionDurationMs);
                            {
                                long m = min.get();
                                do {
                                    m = min.get();
                                    if (min.compareAndSet(m, clientExecutionDurationMs)) {
                                        break;
                                    }
                                } while (m > clientExecutionDurationMs);
                            }

                            {
                                long m = max.get();
                                do {
                                    m = max.get();
                                    if (max.compareAndSet(m, clientExecutionDurationMs)) {
                                        break;
                                    }
                                } while (m < clientExecutionDurationMs);
                            }
                        }), true);
                    }));

            handleRef.get().await();

            final long end = System.currentTimeMillis();

            do { // wait all threads finished to compute the stats
                final Iterator<Future<?>> iterator = computations.keySet().iterator();
                while (iterator.hasNext()) {
                    try {
                        iterator.next().get(timeout, TimeUnit.MILLISECONDS);
                    } catch (final InterruptedException e) {
                        Thread.interrupted();
                    } catch (final ExecutionException | TimeoutException e) {
                        throw new IllegalStateException(e.getCause());
                    } finally {
                        iterator.remove();
                    }
                }
            } while (!computations.isEmpty());

            if (handleRef.get().isCancelled()) {
                return;
            }

            try {
                out.write(dataStart);
                writerEnd.writeTo(
                        new ScenarioEnd(
                                sumPerResponse.entrySet().stream()
                                        .collect(toMap(Map.Entry::getKey, t -> t.getValue().get())),
                                end - start, total.get(), min.get(), max.get(), sum.get() * 1. / total.get()),
                        ScenarioEnd.class, ScenarioEnd.class, annotations, APPLICATION_JSON_TYPE,
                        new MultivaluedHashMap<>(), out);
                out.write(dataEnd);
                out.flush();
            } catch (final IOException e) {
                throw new IllegalStateException(e);
            }
        } finally {
            try {
                // cxf will skip it since we already write ourself
                asyncResponse.resume("");
            } catch (final RuntimeException re) {
                // no-op: not that important
            }
        }
    });
}

From source file:org.jasig.portlet.proxy.service.proxy.document.URLRewritingFilter.java

protected void updateUrls(final Document document, final IContentResponse proxyResponse,
        final Map<String, Set<String>> elementSet, final RenderRequest request, final RenderResponse response,
        boolean action) {

    // attempt to retrieve the list of rewritten URLs from the session
    final PortletSession session = request.getPortletSession();
    ConcurrentMap<String, String> rewrittenUrls;
    synchronized (PortletUtils.getSessionMutex(session)) {
        rewrittenUrls = (ConcurrentMap<String, String>) session.getAttribute(REWRITTEN_URLS_KEY);

        // if the rewritten URLs list doesn't exist yet, create it
        if (rewrittenUrls == null) {
            rewrittenUrls = new ConcurrentHashMap<String, String>();
            session.setAttribute(REWRITTEN_URLS_KEY, rewrittenUrls);
        }/*from  ww  w  .java  2s.co  m*/
    }

    // get the list of configured whitelist regexes
    final PortletPreferences preferences = request.getPreferences();
    final String[] whitelistRegexes = preferences.getValues("whitelistRegexes", new String[] {});

    // If we're proxying a remote website (as opposed to a local file system 
    // resources, we'll need to transform any relative URLs.  To do this,
    // we first compute the base and relative URLs for the page.
    String baseUrl = null;
    String relativeUrl = null;
    try {
        baseUrl = getBaseServerUrl(proxyResponse.getProxiedLocation());
        relativeUrl = getRelativePathUrl(proxyResponse.getProxiedLocation());
        LOG.trace("Computed base url {} and relative url {} for proxied url {}", baseUrl, relativeUrl,
                proxyResponse.getProxiedLocation());
    } catch (URISyntaxException e) {
        LOG.error(e.getMessage(), e);
    }

    for (final Map.Entry<String, Set<String>> elementEntry : elementSet.entrySet()) {
        for (final String attributeName : elementEntry.getValue()) {

            // get a list of elements for this element type and iterate through
            // them, updating the relevant URL attribute
            final Elements elements = document.getElementsByTag(elementEntry.getKey());
            for (Element element : elements) {

                String attributeUrl = element.attr(attributeName);
                LOG.trace("Considering element {}  with URL attribute {} of value {}", element, attributeName,
                        attributeUrl);

                // don't adjust or filter javascript url targets
                if (StringUtils.isNotBlank(attributeUrl) && !attributeUrl.startsWith(JAVASCRIPT_PREFIX)
                        && !attributeUrl.startsWith(JAVASCRIPT_PREFIX.toLowerCase())) {

                    // if we're proxying a remote website, adjust any 
                    // relative URLs into absolute URLs
                    if (baseUrl != null) {

                        // (1) do not prefix absolute URLs
                        if (attributeUrl.contains("://") || attributeUrl.startsWith("//")) {
                            // do nothing...
                        }

                        // (2) if the URL is relative to the server base,
                        // prepend the base URL
                        else if (attributeUrl.startsWith("/")) {
                            attributeUrl = baseUrl.concat(attributeUrl);
                        }

                        // (3) otherwise use the full relative path
                        else {
                            attributeUrl = relativeUrl.concat(attributeUrl);
                        }

                    }

                    // if this URL matches our whitelist regex, rewrite it 
                    // to pass through this portlet
                    for (String regex : whitelistRegexes) {

                        if (StringUtils.isNotBlank(regex)) {
                            final Pattern pattern = Pattern.compile(regex); // TODO share compiled regexes
                            if (pattern.matcher(attributeUrl).find()) {

                                // record that we've rewritten this URL
                                rewrittenUrls.put(attributeUrl, attributeUrl);

                                // TODO: the value in the rewritten URLs map needs to 
                                // be a resource URL.  we also want to key URLs by a short
                                // string rather than the full URL

                                if (elementEntry.getKey().equals("form")) {
                                    // the form action needs to be set to POST to
                                    // properly pass through our portlet
                                    boolean isPost = "POST".equalsIgnoreCase(element.attr("method"));
                                    if (!isPost) {
                                        element.attr("method", "POST");
                                    }
                                    attributeUrl = createFormUrl(response, isPost, attributeUrl);
                                }

                                else if (action) {
                                    attributeUrl = createActionUrl(response, attributeUrl);
                                }

                                else {
                                    attributeUrl = createResourceUrl(response, attributeUrl);
                                }
                            }
                        }
                    }

                }

                element.attr(attributeName, attributeUrl.replace("&amp;", "&"));

            }

        }

    }

}

From source file:io.hops.metadata.util.RMUtilities.java

/**
 * Recover inactive nodes map of RMContextImpl.
 *
 * @param rmContext/*from  w  w w .  ja v a 2  s . co  m*/
 * @param state
 * @return
 * @throws java.lang.Exception
 */
//For testing TODO move to test
public static Map<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode> getRMContextInactiveNodes(
        final RMContext rmContext, final RMState state, final Configuration conf) throws Exception {
    LightWeightRequestHandler getRMContextInactiveNodesHandler = new LightWeightRequestHandler(
            YARNOperationType.TEST) {
        @Override
        public Object performTask() throws StorageException {
            connector.beginTransaction();
            connector.writeLock();
            ConcurrentMap<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode> inactiveNodes = new ConcurrentHashMap<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode>();
            //Retrieve rmctxnodes table entries
            RMContextInactiveNodesDataAccess rmctxInactiveNodesDA = (RMContextInactiveNodesDataAccess) RMStorageFactory
                    .getDataAccess(RMContextInactiveNodesDataAccess.class);
            ResourceDataAccess DA = (ResourceDataAccess) YarnAPIStorageFactory
                    .getDataAccess(ResourceDataAccess.class);
            RMNodeDataAccess rmDA = (RMNodeDataAccess) RMStorageFactory.getDataAccess(RMNodeDataAccess.class);
            List<RMContextInactiveNodes> hopRMContextInactiveNodes = rmctxInactiveNodesDA.findAll();
            if (hopRMContextInactiveNodes != null && !hopRMContextInactiveNodes.isEmpty()) {
                for (RMContextInactiveNodes key : hopRMContextInactiveNodes) {

                    NodeId nodeId = ConverterUtils.toNodeId(key.getRmnodeid());
                    //retrieve RMNode in order to create a new FiCaSchedulerNode
                    RMNode hopRMNode = (RMNode) rmDA.findByNodeId(key.getRmnodeid());
                    //Retrieve resource of RMNode
                    Resource res = (Resource) DA.findEntry(hopRMNode.getNodeId(), Resource.TOTAL_CAPABILITY,
                            Resource.RMNODE);
                    //Retrieve and Initialize NodeBase for RMNode
                    NodeDataAccess nodeDA = (NodeDataAccess) RMStorageFactory
                            .getDataAccess(NodeDataAccess.class);
                    //Retrieve and Initialize NodeBase for RMNode
                    org.apache.hadoop.net.Node node = null;
                    if (hopRMNode.getNodeId() != null) {
                        Node hopNode = (Node) nodeDA.findById(hopRMNode.getNodeId());
                        node = new NodeBase(hopNode.getName(), hopNode.getLocation());
                        if (hopNode.getParent() != null) {
                            node.setParent(new NodeBase(hopNode.getParent()));
                        }
                        node.setLevel(hopNode.getLevel());
                    }
                    //Retrieve nextHeartbeat
                    NextHeartbeatDataAccess nextHBDA = (NextHeartbeatDataAccess) RMStorageFactory
                            .getDataAccess(NextHeartbeatDataAccess.class);
                    boolean nextHeartbeat = nextHBDA.findEntry(key.getRmnodeid());
                    org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode rmNode = new RMNodeImpl(nodeId,
                            rmContext, hopRMNode.getHostName(), hopRMNode.getCommandPort(),
                            hopRMNode.getHttpPort(), node,
                            ResourceOption.newInstance(org.apache.hadoop.yarn.api.records.Resource.newInstance(
                                    res.getMemory(), res.getVirtualCores()), hopRMNode.getOvercommittimeout()),
                            hopRMNode.getNodemanagerVersion(), hopRMNode.getHealthReport(),
                            hopRMNode.getLastHealthReportTime(), nextHeartbeat,
                            conf.getBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED,
                                    YarnConfiguration.DEFAULT_HOPS_DISTRIBUTED_RT_ENABLED));
                    ((RMNodeImpl) rmNode).setState(hopRMNode.getCurrentState());
                    alreadyRecoveredRMContextInactiveNodes.put(rmNode.getNodeID().getHost(), rmNode);
                    inactiveNodes.put(rmNode.getNodeID().getHost(), rmNode);

                }
            }
            connector.commit();
            return inactiveNodes;
        }
    };
    try {
        if (alreadyRecoveredRMContextInactiveNodes.isEmpty()) {
            Map<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode> result = (Map<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode>) getRMContextInactiveNodesHandler
                    .handle();
            for (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode node : result.values()) {
                node.recover(state);
            }
            return result;
        } else {
            return alreadyRecoveredRMContextInactiveNodes;
        }
    } catch (IOException ex) {
        LOG.error("HOP", ex);
    }
    return null;
}