Example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

List of usage examples for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService.

Prototype

public ExecutorCompletionService(Executor executor) 

Source Link

Document

Creates an ExecutorCompletionService using the supplied executor for base task execution and a LinkedBlockingQueue as a completion queue.

Usage

From source file:com.appdynamics.monitors.hadoop.communicator.AmbariCommunicator.java

/**
 * Parses a JSON Reader object as cluster metrics and collect service and host metrics.
 * @see #getServiceMetrics(java.io.Reader, String)
 * @see #getHostMetrics(java.io.Reader, String)
 *
 * @param response//from   ww  w .j  a v  a 2 s . c  o m
 */
private void getClusterMetrics(Reader response) {
    try {
        Map<String, Object> json = (Map<String, Object>) parser.parse(response, simpleContainer);
        try {
            String clusterName = (String) ((Map) json.get("Clusters")).get("cluster_name");
            List<Map> services = (ArrayList<Map>) json.get("services");
            List<Map> hosts = (ArrayList<Map>) json.get("hosts");

            CompletionService<Reader> threadPool = new ExecutorCompletionService<Reader>(executor);
            int count = 0;
            for (Map service : services) {
                if (xmlParser
                        .isIncludeService((String) ((Map) service.get("ServiceInfo")).get("service_name"))) {
                    threadPool.submit(new Response(service.get("href") + SERVICE_FIELDS));
                    count++;
                }
            }
            for (; count > 0; count--) {
                getServiceMetrics(threadPool.take().get(), clusterName + "|services");
            }

            for (Map host : hosts) {
                if (xmlParser.isIncludeHost((String) ((Map) host.get("Hosts")).get("host_name"))) {
                    threadPool.submit(new Response(host.get("href") + HOST_FIELDS));
                    count++;
                }
            }
            for (; count > 0; count--) {
                getHostMetrics(threadPool.take().get(), clusterName + "|hosts");
            }
        } catch (Exception e) {
            logger.error("Failed to parse cluster metrics: " + stackTraceToString(e));
        }
    } catch (Exception e) {
        logger.error("Failed to get response for cluster metrics: " + stackTraceToString(e));
    }
}

From source file:com.facebook.presto.accumulo.index.ColumnCardinalityCache.java

/**
 * Gets the cardinality for each {@link AccumuloColumnConstraint}.
 * Given constraints are expected to be indexed! Who knows what would happen if they weren't!
 *
 * @param schema Schema name/*from   ww  w  .  j  av a2s  .  c o m*/
 * @param table Table name
 * @param auths Scan authorizations
 * @param idxConstraintRangePairs Mapping of all ranges for a given constraint
 * @param earlyReturnThreshold Smallest acceptable cardinality to return early while other tasks complete
 * @param pollingDuration Duration for polling the cardinality completion service
 * @return An immutable multimap of cardinality to column constraint, sorted by cardinality from smallest to largest
 * @throws TableNotFoundException If the metrics table does not exist
 * @throws ExecutionException If another error occurs; I really don't even know anymore.
 */
public Multimap<Long, AccumuloColumnConstraint> getCardinalities(String schema, String table,
        Authorizations auths, Multimap<AccumuloColumnConstraint, Range> idxConstraintRangePairs,
        long earlyReturnThreshold, Duration pollingDuration) throws ExecutionException, TableNotFoundException {
    // Submit tasks to the executor to fetch column cardinality, adding it to the Guava cache if necessary
    CompletionService<Pair<Long, AccumuloColumnConstraint>> executor = new ExecutorCompletionService<>(
            executorService);
    idxConstraintRangePairs.asMap().forEach((key, value) -> executor.submit(() -> {
        long cardinality = getColumnCardinality(schema, table, auths, key.getFamily(), key.getQualifier(),
                value);
        LOG.debug("Cardinality for column %s is %s", key.getName(), cardinality);
        return Pair.of(cardinality, key);
    }));

    // Create a multi map sorted by cardinality
    ListMultimap<Long, AccumuloColumnConstraint> cardinalityToConstraints = MultimapBuilder.treeKeys()
            .arrayListValues().build();
    try {
        boolean earlyReturn = false;
        int numTasks = idxConstraintRangePairs.asMap().entrySet().size();
        do {
            // Sleep for the polling duration to allow concurrent tasks to run for this time
            Thread.sleep(pollingDuration.toMillis());

            // Poll each task, retrieving the result if it is done
            for (int i = 0; i < numTasks; ++i) {
                Future<Pair<Long, AccumuloColumnConstraint>> futureCardinality = executor.poll();
                if (futureCardinality != null && futureCardinality.isDone()) {
                    Pair<Long, AccumuloColumnConstraint> columnCardinality = futureCardinality.get();
                    cardinalityToConstraints.put(columnCardinality.getLeft(), columnCardinality.getRight());
                }
            }

            // If the smallest cardinality is present and below the threshold, set the earlyReturn flag
            Optional<Entry<Long, AccumuloColumnConstraint>> smallestCardinality = cardinalityToConstraints
                    .entries().stream().findFirst();
            if (smallestCardinality.isPresent()) {
                if (smallestCardinality.get().getKey() <= earlyReturnThreshold) {
                    LOG.info("Cardinality %s, is below threshold. Returning early while other tasks finish",
                            smallestCardinality);
                    earlyReturn = true;
                }
            }
        } while (!earlyReturn && cardinalityToConstraints.entries().size() < numTasks);
    } catch (ExecutionException | InterruptedException e) {
        if (e instanceof InterruptedException) {
            Thread.currentThread().interrupt();
        }
        throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Exception when getting cardinality", e);
    }

    // Create a copy of the cardinalities
    return ImmutableMultimap.copyOf(cardinalityToConstraints);
}

From source file:com.mgmtp.jfunk.core.JFunk.java

/**
 * Executes the jFunk test. A thread pool ({@link ExecutorService}) is created with the number
 * of configured threads, which handles concurrent script execution.
 *///  w  ww. ja v a 2 s .  c  o m
@Override
protected void doExecute() throws Exception {
    ExecutorService execService = createExecutorService();
    CompletionService<Boolean> completionService = new ExecutorCompletionService<>(execService);

    for (final File script : scripts) {
        completionService.submit(new Callable<Boolean>() {
            @Override
            public Boolean call() {
                boolean success = false;
                StopWatch stopWatch = new StopWatch();
                stopWatch.start();

                RESULT_LOG.info("Thread " + Thread.currentThread().getName() + ": starting execution of script "
                        + script.getName());

                try {
                    success = scriptExecutor.executeScript(script, scriptProperties);
                } catch (Throwable th) {
                    LOG.error(th.getMessage(), th);
                } finally {

                    LOG.info("SCRIPT EXECUTION " + (success ? "SUCCESSFUL" : "FAILED") + " (" + script + ")");

                    RESULT_LOG.info(
                            "Thread " + Thread.currentThread().getName() + ": finished execution of script "
                                    + script.getName() + " (took " + stopWatch + " H:mm:ss.SSS)");
                }
                return success;
            }
        });
    }

    boolean overallResult = true;
    for (int i = 0, size = scripts.size(); i < size; ++i) {
        if (!completionService.take().get()) {
            overallResult = false;
        }
    }

    shutDownExecutorService(execService);

    if (!overallResult) {
        throw new JFunkExecutionException();
    }
}

From source file:org.springframework.integration.groovy.GroovyExpressionTests.java

@Test
public void testScriptFactoryCustomizerThreadSafetyWithNewScript() throws Exception {
    final Customizer customizer = new Customizer(Collections.singletonMap("name", (Object) "foo"));
    final GroovyScriptFactory factory = new GroovyScriptFactory("Groovy Script", customizer);
    CompletionService<String> completionService = new ExecutorCompletionService<String>(
            Executors.newFixedThreadPool(5));
    for (int i = 0; i < 100; i++) {
        final String name = "Bar" + i;
        completionService.submit(new Callable<String>() {
            public String call() throws Exception {
                Object scriptedObject;
                synchronized (customizer) {
                    customizer.setMap(Collections.singletonMap("name", (Object) name));
                    ResourceScriptSource scriptSource = new ResourceScriptSource(
                            new NamedByteArrayResource("\"name=${name}\"".getBytes(), "InlineScript" + name));
                    scriptedObject = factory.getScriptedObject(scriptSource, null);
                }//from w  w  w  .  ja  va2  s  .c  o m
                String result = scriptedObject.toString();
                logger.debug("Result=" + result + " with name=" + name);
                if (!("name=" + name).equals(result)) {
                    throw new IllegalStateException("Wrong value (" + result + ") for: " + name);
                }
                return name;
            }
        });
    }
    Set<String> set = new HashSet<String>();
    for (int i = 0; i < 100; i++) {
        set.add(completionService.take().get());
    }
    assertEquals(100, set.size());
}

From source file:org.apache.hadoop.hbase.wal.LogRecoveredEditsOutputSink.java

/**
 * Close all of the output streams./* ww  w  .j  av a 2  s . c o m*/
 * @return the list of paths written.
 */
List<Path> close() throws IOException {
    Preconditions.checkState(!closeAndCleanCompleted);

    final List<Path> paths = new ArrayList<>();
    final List<IOException> thrown = Lists.newArrayList();
    ThreadPoolExecutor closeThreadPool = Threads.getBoundedCachedThreadPool(numThreads, 30L, TimeUnit.SECONDS,
            new ThreadFactory() {
                private int count = 1;

                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(r, "split-log-closeStream-" + count++);
                    return t;
                }
            });
    CompletionService<Void> completionService = new ExecutorCompletionService<>(closeThreadPool);
    boolean progress_failed;
    try {
        progress_failed = executeCloseTask(completionService, thrown, paths);
    } catch (InterruptedException e) {
        IOException iie = new InterruptedIOException();
        iie.initCause(e);
        throw iie;
    } catch (ExecutionException e) {
        throw new IOException(e.getCause());
    } finally {
        closeThreadPool.shutdownNow();
    }
    if (!thrown.isEmpty()) {
        throw MultipleIOException.createIOException(thrown);
    }
    writersClosed = true;
    closeAndCleanCompleted = true;
    if (progress_failed) {
        return null;
    }
    return paths;
}

From source file:com.elixsr.portforwarder.forwarding.ForwardingService.java

/**
 * Starts forwarding based on rules found in database.
 *
 * Acquires an instance of the Forwarding Manager to turn forwarding flag on.
 *
 * Creates a list off callbacks for each forward thread, and handle exceptions as they come.
 *
 * If an exception is thrown, the service immediately stops, and the #onDestroy method is
 * called./*w  ww .  j a va  2s .c  o  m*/
 *
 * @param intent
 */
@Override
protected void onHandleIntent(Intent intent) {

    // Gets data from the incoming Intent
    //        String dataString = intent.getDataString();

    Log.i(TAG, "Ran the service");

    ForwardingManager.getInstance().enableForwarding();

    runService = true;

    /*
     * Creates a new Intent containing a Uri object
     * BROADCAST_ACTION is a custom Intent action
     */
    Intent localIntent = new Intent(BROADCAST_ACTION)
            // Puts the status into the Intent
            .putExtra(PORT_FORWARD_SERVICE_STATE, ForwardingManager.getInstance().isEnabled());
    // Broadcasts the Intent to receivers in this app.
    LocalBroadcastManager.getInstance(this).sendBroadcast(localIntent);

    showForwardingEnabledNotification();

    //load the rules from the datastore
    //TODO: inject the rules as extras
    RuleDao ruleDao = new RuleDao(new RuleDbHelper(this));
    List<RuleModel> ruleModels = ruleDao.getAllRuleModels();

    InetSocketAddress from;

    Forwarder forwarder = null;

    /*
     Sourced from: http://stackoverflow.com/questions/19348248/waiting-on-a-list-of-future
     */
    CompletionService<Void> completionService = new ExecutorCompletionService<>(executorService);

    // how many futures there are to check
    int remainingFutures = 0;

    for (RuleModel ruleModel : ruleModels) {

        try {
            from = generateFromIpUsingInterface(ruleModel.getFromInterfaceName(), ruleModel.getFromPort());

            if (ruleModel.isTcp()) {
                completionService.submit(new TcpForwarder(from, ruleModel.getTarget(), ruleModel.getName()));
                remainingFutures++;
            }

            if (ruleModel.isUdp()) {
                completionService.submit(new UdpForwarder(from, ruleModel.getTarget(), ruleModel.getName()));
                remainingFutures++;
            }

        } catch (SocketException | ObjectNotFoundException e) {
            Log.e(TAG, "Error generating IP Address for FROM interface with rule '" + ruleModel.getName() + "'",
                    e);

            // graceful UI Exception handling - broadcast this to ui - it will deal with display something to the user e.g. a Toast
            localIntent = new Intent(BROADCAST_ACTION)
                    // Puts the status into the Intent
                    .putExtra(PORT_FORWARD_SERVICE_ERROR_MESSAGE,
                            "Error while trying to start rule '" + ruleModel.getName() + "'");
            // Broadcasts the Intent to receivers in this app.
            LocalBroadcastManager.getInstance(this).sendBroadcast(localIntent);
        }
    }

    // Build and send an Event.
    tracker.send(new HitBuilders.EventBuilder().setCategory(CATEGORY_FORWARDING)
            .setAction(ACTION_START_FORWARDING).setLabel(ruleModels.size() + " rules").build());

    Future<?> completedFuture;

    // loop through each callback, and handle an exception
    while (remainingFutures > 0) {

        // block until a callable completes
        try {
            completedFuture = completionService.take();
            remainingFutures--;

            completedFuture.get();
        } catch (ExecutionException e) {
            Throwable cause = e.getCause();

            Log.e(TAG, "Error when forwarding port.", e);
            localIntent = new Intent(BROADCAST_ACTION)
                    // Puts the status into the Intent
                    .putExtra(PORT_FORWARD_SERVICE_ERROR_MESSAGE, e.getCause().getMessage());
            // Broadcasts the Intent to receivers in this app.
            LocalBroadcastManager.getInstance(this).sendBroadcast(localIntent);

            break;
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java

@Test
public void testCluster() throws Exception {
    final int QTY = 20;
    final int OPERATION_TIME_MS = 1000;
    final String PATH = "/foo/bar/lock";

    ExecutorService executorService = Executors.newFixedThreadPool(QTY);
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService);
    final Timing timing = new Timing();
    TestingCluster cluster = new TestingCluster(3);
    List<SemaphoreClient> semaphoreClients = Lists.newArrayList();
    try {//from   w w w  .j  a va2s .  c  o  m
        cluster.start();

        final AtomicInteger opCount = new AtomicInteger(0);
        for (int i = 0; i < QTY; ++i) {
            SemaphoreClient semaphoreClient = new SemaphoreClient(cluster.getConnectString(), PATH,
                    new Callable<Void>() {
                        @Override
                        public Void call() throws Exception {
                            opCount.incrementAndGet();
                            Thread.sleep(OPERATION_TIME_MS);
                            return null;
                        }
                    });
            completionService.submit(semaphoreClient);
            semaphoreClients.add(semaphoreClient);
        }

        timing.forWaiting().sleepABit();

        Assert.assertNotNull(SemaphoreClient.getActiveClient());

        final CountDownLatch latch = new CountDownLatch(1);
        CuratorFramework client = CuratorFrameworkFactory.newClient(cluster.getConnectString(),
                timing.session(), timing.connection(), new ExponentialBackoffRetry(100, 3));
        ConnectionStateListener listener = new ConnectionStateListener() {
            @Override
            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                if (newState == ConnectionState.LOST) {
                    latch.countDown();
                }
            }
        };
        client.getConnectionStateListenable().addListener(listener);
        client.start();
        try {
            client.getZookeeperClient().blockUntilConnectedOrTimedOut();

            cluster.stop();

            latch.await();
        } finally {
            IOUtils.closeQuietly(client);
        }

        long startTicks = System.currentTimeMillis();
        for (;;) {
            int thisOpCount = opCount.get();
            Thread.sleep(2 * OPERATION_TIME_MS);
            if (thisOpCount == opCount.get()) {
                break; // checking that the op count isn't increasing
            }
            Assert.assertTrue((System.currentTimeMillis() - startTicks) < timing.forWaiting().milliseconds());
        }

        int thisOpCount = opCount.get();

        Iterator<InstanceSpec> iterator = cluster.getInstances().iterator();
        cluster = new TestingCluster(iterator.next(), iterator.next());
        cluster.start();
        timing.forWaiting().sleepABit();

        startTicks = System.currentTimeMillis();
        for (;;) {
            Thread.sleep(2 * OPERATION_TIME_MS);
            if (opCount.get() > thisOpCount) {
                break; // checking that semaphore has started working again
            }
            Assert.assertTrue((System.currentTimeMillis() - startTicks) < timing.forWaiting().milliseconds());
        }
    } finally {
        for (SemaphoreClient semaphoreClient : semaphoreClients) {
            IOUtils.closeQuietly(semaphoreClient);
        }
        IOUtils.closeQuietly(cluster);
        executorService.shutdownNow();
    }
}

From source file:com.github.NearestNeighbors.java

public Map<String, Map<String, Collection<Float>>> evaluate(final Collection<Watcher> test_instances)
        throws IOException, InterruptedException, ExecutionException {
    log.info("knn-evaluate: Loading watchers.");

    log.debug(String.format("knn-evaluate: Total unique test watchers: %d", test_instances.size()));

    final Map<String, Map<String, Collection<Float>>> results = new HashMap<String, Map<String, Collection<Float>>>();

    final ExecutorService pool = Executors.newFixedThreadPool(THREAD_POOL_SIZE);

    // For each watcher in the test set . . .
    log.info("knn-evaluate: Starting evaluations");
    int test_watcher_count = 0;
    for (final Watcher watcher : test_instances) {
        test_watcher_count++;//  w ww  .j a  v a  2  s .com
        log.info(String.format("Processing watcher (%d/%d)", test_watcher_count, test_instances.size()));

        results.put(watcher.id, new HashMap<String, Collection<Float>>());

        // See if we have any training instances for the watcher.  If not, we really can't guess anything.
        final Watcher training_watcher = training_watchers.get(watcher.id);
        if (training_watcher == null) {
            continue;
        }

        /***********************************
         *** Handling repository regions ***
         ***********************************/

        // Calculate the distance between the repository regions we know the test watcher is in, to every other
        // region in the training data.
        final Set<NeighborRegion> test_regions = watchers_to_regions.get(watcher.id);

        /*
        final List<NeighborRegion> related_regions = find_regions_with_most_cutpoints(watcher, test_regions);
        for (final NeighborRegion related_region : related_regions)
        {
          storeDistance(results, watcher, related_region.most_popular, 0.0f);
          storeDistance(results, watcher, related_region.most_forked, 0.0f);
        }
        */

        /*
          also_owned_counts = {}
          training_watcher.repositories.each do |repo_id|
            repo = @training_repositories[repo_id]
                
            also_owned_counts[repo.owner] ||= 0
            also_owned_counts[repo.owner] += 1
          end
                
          also_owned_counts.each do |owner, count|
            # If 5% or more of the test watcher's repositories are owned by the same person, look at the owner's other repositories.
            if (also_owned_repos.size.to_f / training_watcher.repositories.size) > 0.05 || (also_owned_repos.size.to_f / @owners_to_repositories[owner].size) > 0.3
              repositories_to_check.merge(@owners_to_repositories[owner].collect {|r| r.id})
            end
          end
          */

        // Add in the most forked regions from similar watchers.
        /*
        final Set<NeighborRegion> related_regions = find_regions_containing_fellow_watchers(test_regions);
        for (final NeighborRegion region : related_regions)
        {
          repositories_to_check.add(region.most_forked);
        }
        */

        /*************************************
         **** Begin distance calculations ****
         *************************************/
        int test_region_count = 0;

        for (final NeighborRegion test_region : test_regions) {
            test_region_count++;

            final CompletionService<Map<Repository, Float>> cs = new ExecutorCompletionService<Map<Repository, Float>>(
                    pool);
            int training_region_count = 0;

            final Set<Repository> repositories_to_check = new HashSet<Repository>();

            // Add in the most forked repositories from each region we know the test watcher is in.
            for (final NeighborRegion region : test_regions) {
                repositories_to_check.add(region.most_forked);
            }

            for (final Repository repo : training_watcher.repositories) {
                if (repo.parent != null) {
                    repositories_to_check.add(repo.parent);
                }
            }

            /********************************************************************
             *** Handling repositories owned by owners we're already watching ***
             ********************************************************************/
            if (training_watcher.owner_counts.get(test_region.most_forked.owner) != null
                    && (((training_watcher.owner_counts.get(test_region.most_forked.owner).floatValue()
                            / owners_to_repositories.get(test_region.most_forked.owner).size()) > 0.25)
                            || (training_watcher.owner_distribution(test_region.most_forked.owner) > 0.25))) {
                for (final Repository also_owned : owners_to_repositories.get(test_region.most_forked.owner)) {
                    {
                        // Only add repos that are the most forked in their respective regions.
                        if (also_owned.region.most_forked.equals(also_owned)) {
                            repositories_to_check.add(also_owned);
                        }
                    }
                }
            }

            for (final Repository training_repository : repositories_to_check) {
                training_region_count++;

                if (log.isDebugEnabled()) {
                    log.debug(String.format("Processing watcher (%d/%d) - (%d/%d):(%d/%d)", test_watcher_count,
                            test_instances.size(), test_region_count, test_regions.size(),
                            training_region_count, repositories_to_check.size()));
                }

                // Submit distance calculation task if the test watcher isn't already watching the repository.
                cs.submit(new Callable<Map<Repository, Float>>() {

                    public Map<Repository, Float> call() throws Exception {
                        final Map<Repository, Float> ret = new HashMap<Repository, Float>();

                        if (!training_repository.watchers.contains(training_watcher)) {
                            float distance = euclidian_distance(training_watcher, test_region.most_forked,
                                    training_repository);

                            ret.put(training_repository, Float.valueOf(distance));
                        }

                        return ret;
                    }

                });
            }

            // Process the distance calculation results.
            for (int i = 0; i < repositories_to_check.size(); i++) {
                final Map<Repository, Float> distance = cs.take().get();

                for (final Map.Entry<Repository, Float> pair : distance.entrySet()) {
                    storeDistance(results, watcher, pair.getKey(), pair.getValue().floatValue());
                }
            }
        }
    }

    /*
            
            
    =begin
      # Find a set of repositories from fellow watchers that happen to watch a lot of same repositories as the test watcher.
      repositories_to_check.merge find_repositories_containing_fellow_watchers(test_regions)
            
      # Add in the most popular and most forked regions we know the test watcher is in.
      related_regions = find_regions_containing_fellow_watchers(test_regions)
      related_regions.each do |region|
        repositories_to_check << region.most_popular.id
        repositories_to_check << region.most_forked.id
      end
            
      $LOG.info "Added regions from fellow watchers for watcher #{watcher.id} -- new size #{repositories_to_check.size} (+ #{repositories_to_check.size - old_size})"
      old_size = repositories_to_check.size
            
      $LOG.info "Added similarly owned for watcher #{watcher.id} -- new size #{repositories_to_check.size} (+ #{repositories_to_check.size - old_size})"
      old_size = repositories_to_check.size
    =end
            
            
            
            
    =begin
            
    end
            
    results
     */

    return results;
}

From source file:com.palantir.atlasdb.transaction.impl.SnapshotTransactionTest.java

@Test
public void testConcurrentWriteChangedConflicts() throws InterruptedException, ExecutionException {
    conflictDetectionManager.setConflictDetectionMode(TABLE, ConflictHandler.RETRY_ON_VALUE_CHANGED);
    CompletionService<Void> executor = new ExecutorCompletionService<Void>(PTExecutors.newFixedThreadPool(8));
    final Cell cell = Cell.create("row1".getBytes(), "column1".getBytes());
    Transaction t1 = txManager.createNewTransaction();
    t1.put(TABLE, ImmutableMap.of(cell, EncodingUtils.encodeVarLong(0L)));
    t1.commit();/*  w  ww  . j  ava2s  .  c o  m*/
    for (int i = 0; i < 1000; i++) {
        executor.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                txManager.runTaskWithRetry(new TxTask() {
                    @Override
                    public Void execute(Transaction t) throws RuntimeException {
                        long prev = EncodingUtils
                                .decodeVarLong(t.get(TABLE, ImmutableSet.of(cell)).values().iterator().next());
                        t.put(TABLE, ImmutableMap.of(cell, EncodingUtils.encodeVarLong(prev + 1)));
                        return null;
                    }
                });
                return null;
            }
        });
    }
    for (int i = 0; i < 1000; i++) {
        Future<Void> future = executor.take();
        future.get();
    }
    t1 = txManager.createNewTransaction();
    long val = EncodingUtils.decodeVarLong(t1.get(TABLE, ImmutableSet.of(cell)).values().iterator().next());
    assertEquals(1000, val);
}

From source file:org.apache.phoenix.execute.UpsertSelectOverlappingBatchesIT.java

@Test
public void testUpsertSelectSameBatchConcurrently() throws Exception {
    try (Connection conn = driver.connect(url, props)) {
        int numUpsertSelectRunners = 5;
        ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
        CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec);
        List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners);
        // run one UPSERT SELECT for 100 rows (that locks the rows for a long time)
        futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1)));
        // run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT)
        for (int i = 0; i < 100; i += 25) {
            futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i + 25, 5)));
        }//  w w w. j a va  2 s  . co m
        int received = 0;
        while (received < futures.size()) {
            Future<Boolean> resultFuture = completionService.take();
            Boolean result = resultFuture.get();
            received++;
            assertTrue(result);
        }
        exec.shutdownNow();
    }
}