Example usage for java.util.concurrent Future isDone

List of usage examples for java.util.concurrent Future isDone

Introduction

In this page you can find the example usage for java.util.concurrent Future isDone.

Prototype

boolean isDone();

Source Link

Document

Returns true if this task completed.

Usage

From source file:controllers.GWT2Controller.java

private static Object chainDo(GWT2ChainRuntime chain) {
    Future<?> f = chain.getFuture();
    while (f.isDone() || f.isCancelled()) {
        await(100);//  w  w  w  .  jav a  2 s .c  om
    }
    try {
        return f.get();
    } catch (Exception e) {
        e.printStackTrace();
        return null;
    }
}

From source file:org.opentox.toxotis.factory.PropertyFactory.java

public static Feature createAndPublishProperty(String title, String units, SubstanceDataset ds,
        VRI featureService, AuthenticationToken token) throws ServiceInvocationException {
    Feature brandNewProperty = new Feature();
    MetaInfo mi = new MetaInfoImpl();
    mi.addTitle(title);// w ww.j  a va2s. c o  m
    brandNewProperty.setMeta(mi);
    brandNewProperty.setUnits(units);

    //TODO custom enanomapper
    // Publish the property. Inside the substanceDataset there is a csv containing 
    // the header info of a property and a dummy substance

    Future<VRI> predictedFeatureUri = ds.publish(featureService, token);
    /* Wait for remote server to respond */
    try {
        while (!predictedFeatureUri.isDone()) {
            Thread.sleep(1000);
        }
        // Publishing a property is not available. Thus we post to /substance, which returns a substance
        // From that substance we get the substanceOwner and then get the dataset (created only for publishing properties)

        VRI resultUri = predictedFeatureUri.get();

        String host = SubstanceDataset.getHostFromVRI(ds.getUri().toString());
        String ownerUUID = Substance.getSubstanceKey(token, resultUri.getUri(), "ownerUUID");

        //TODO custom enanomapper
        // Get the dataset of the substance
        String datasetUri = SubstanceDataset.getDatasetFromUUIDOwner(ownerUUID, host);

        VRI input = new VRI(datasetUri);
        DatasetJsonDownloader jsn = new DatasetJsonDownloader(input);
        JSONObject obj = jsn.getJSON(token);

        // Get the info from the first (and only one) property of the dataset 
        // There the property's URI is returned.

        String property = jsn.getFirstProperty(obj, host);

        brandNewProperty.setUri(new VRI(property));
        return brandNewProperty;
    } catch (InterruptedException ex) {
        throw new IllegalArgumentException("Interrupted", ex);
    } catch (URISyntaxException ex) {
        throw new IllegalArgumentException("Invalid URI", ex);
    } catch (ExecutionException ex) {
        if (ex.getCause() != null && ex.getCause() instanceof ServiceInvocationException) {
            throw (ServiceInvocationException) ex.getCause();
        }
        throw new ServiceInvocationException(ex);
    }
}

From source file:com.curecomp.primefaces.migrator.PrimefacesMigration.java

private static void awaitAll(List<Future<?>> futures) throws InterruptedException {
    Iterator<Future<?>> iter = futures.iterator();

    while (iter.hasNext()) {
        Future<?> f = iter.next();

        while (!f.isDone()) {
            TimeUnit.SECONDS.sleep(1);
        }/* w w w . j  a v a 2s.  c  om*/
    }
}

From source file:com.twitter.graphjet.bipartite.edgepool.EdgePoolConcurrentTestHelper.java

/**
 * This helper method sets up a concurrent read-write situation with a single writer and multiple
 * readers that access the same underlying edgePool, and tests for correct edge access during
 * simultaneous edge writes. This helps test read consistency during arbitrary points of
 * inserting edges. Note that the exact read-write sequence here is non-deterministic and would
 * vary depending on the machine, but the hope is that given the large number of readers the reads
 * would be done at many different points of edge insertion. The test itself checks only for
 * partial correctness (it could have false positives) so this should only be used as a supplement
 * to other testing.//www .  j  av a  2s. c  o m
 *
 * @param edgePool           is the underlying
 *                           {@link com.twitter.graphjet.bipartite.edgepool.EdgePool}
 * @param numReadersPerNode  is the number of reader threads to use per node
 * @param leftSize           is the number of left nodes
 * @param rightSize          is the number of right nodes
 * @param edgeProbability    is the probability of an edge between a left-right node pair
 * @param random             is the random number generator to use for generating a random graph
 */
public static void testRandomConcurrentReadWriteThreads(EdgePool edgePool, int numReadersPerNode, int leftSize,
        int rightSize, double edgeProbability, Random random) {
    int maxWaitingTimeForThreads = 20; // in milliseconds
    int numReaders = leftSize * numReadersPerNode;
    CountDownLatch readersDoneLatch = new CountDownLatch(numReaders);
    // First, construct a random set of edges to insert in the graph
    Set<Pair<Integer, Integer>> edges = Sets
            .newHashSetWithExpectedSize((int) (leftSize * rightSize * edgeProbability));
    List<EdgePoolReader> readers = Lists.newArrayListWithCapacity(numReaders);
    Int2ObjectMap<IntSet> leftSideGraph = new Int2ObjectOpenHashMap<IntSet>(leftSize);
    int averageLeftDegree = (int) (rightSize * edgeProbability);
    for (int i = 0; i < leftSize; i++) {
        IntSet nodeEdges = new IntOpenHashSet(averageLeftDegree);
        for (int j = 0; j < rightSize; j++) {
            if (random.nextDouble() < edgeProbability) {
                nodeEdges.add(j);
                edges.add(Pair.of(i, j));
            }
        }
        leftSideGraph.put(i, nodeEdges);
    }

    // Create a bunch of leftReaders per node that'll read from the graph at random
    for (int i = 0; i < leftSize; i++) {
        for (int j = 0; j < numReadersPerNode; j++) {
            readers.add(new EdgePoolReader(edgePool, new CountDownLatch(0), readersDoneLatch, i,
                    random.nextInt(maxWaitingTimeForThreads)));
        }
    }

    // Create a single writer that will insert these edges in random order
    List<WriterInfo> writerInfo = Lists.newArrayListWithCapacity(edges.size());
    List<Pair<Integer, Integer>> edgesList = Lists.newArrayList(edges);
    Collections.shuffle(edgesList);
    CountDownLatch writerDoneLatch = new CountDownLatch(edgesList.size());
    for (Pair<Integer, Integer> edge : edgesList) {
        writerInfo.add(new WriterInfo(edge.getLeft(), edge.getRight(), new CountDownLatch(0), writerDoneLatch));
    }

    ExecutorService executor = Executors.newFixedThreadPool(numReaders + 1); // single writer
    List<Callable<Integer>> allThreads = Lists.newArrayListWithCapacity(numReaders + 1);
    // First, we add the writer
    allThreads.add(Executors.callable(new EdgePoolWriter(edgePool, writerInfo), 1));
    // then the readers
    for (int i = 0; i < numReaders; i++) {
        allThreads.add(Executors.callable(readers.get(i), 1));
    }
    // these will execute in some non-deterministic order
    Collections.shuffle(allThreads, random);

    // Wait for all the processes to finish
    try {
        List<Future<Integer>> results = executor.invokeAll(allThreads, 10, TimeUnit.SECONDS);
        for (Future<Integer> result : results) {
            assertTrue(result.isDone());
            assertEquals(1, result.get().intValue());
        }
    } catch (InterruptedException e) {
        throw new RuntimeException("Execution for a thread was interrupted: ", e);
    } catch (ExecutionException e) {
        throw new RuntimeException("Execution issue in an executor thread: ", e);
    }

    // confirm that these worked as expected
    try {
        readersDoneLatch.await();
        writerDoneLatch.await();
    } catch (InterruptedException e) {
        throw new RuntimeException("Execution for last reader was interrupted: ", e);
    }

    // Check that all readers' read info is consistent with the graph
    for (EdgePoolReader reader : readers) {
        IntSet expectedEdges = leftSideGraph.get(reader.queryNode);
        assertTrue(reader.getQueryNodeDegree() <= expectedEdges.size());
        if (reader.getQueryNodeDegree() == 0) {
            assertNull(reader.getQueryNodeEdges());
        } else {
            for (int edge : reader.getQueryNodeEdges()) {
                assertTrue(expectedEdges.contains(edge));
            }
        }
    }
}

From source file:UnitTest4.java

public static void execute()
        throws ClientProtocolException, IOException, InterruptedException, ExecutionException {
    /*//from  w w w  .  ja v  a  2  s  .c om
      CloseableHttpAsyncClient httpclient = HttpAsyncClients.createDefault();
      try {
    httpclient.start();
    HttpGet request = new HttpGet("http://www.apache.org/");
    Future<HttpResponse> future = httpclient.execute(request, null);
    HttpResponse response = future.get();
    System.out.println("Response: " + response.getStatusLine());
    System.out.println("Shutting down");
      } finally {
    httpclient.close();
      }
      System.out.println("Done");
    */

    /*
    try (CloseableHttpAsyncClient httpclient = HttpAsyncClients.createDefault()) {
        httpclient.start();
        HttpPost request = new HttpPost(addr);
        StringEntity entity = new StringEntity(event, ContentType.create("application/json", Consts.UTF_8));
        request.setEntity(entity);
        httpclient.execute(request, null);
    } catch (Exception e) {
        LOG.error("Failed to sending event", e);
    }
    */
    //Asserts a;
    CloseableHttpAsyncClient m_httpClient = HttpAsyncClients.createDefault();

    m_httpClient.start();

    HttpHost m_target = new HttpHost("localhost", 5000, "http");
    //HttpPost postRequest = new HttpPost("http://localhost:5000/hello");
    HttpPost postRequest = new HttpPost("/");

    StringEntity params = new StringEntity("");

    postRequest.addHeader("content-type", "application/json");
    postRequest.setEntity(params);

    log.debug("execute() executing request to " + m_target);

    //HttpAsyncRequestConsumer<HttpRequest> gh;

    // works HttpResponse httpResponse = httpClient.execute(target, getRequest);
    Future<HttpResponse> future = m_httpClient.execute(m_target, postRequest, null);
    //Future<HttpResponse> future = m_httpClient.execute(postRequest, null);
    //HttpResponse httpResponse = future.get();
    while (future.isDone() == false) {
        log.debug("Inside while");
    }
    HttpResponse httpResponse = null;
    try {
        httpResponse = future.get(100, TimeUnit.NANOSECONDS);
    } catch (TimeoutException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    HttpEntity entity = httpResponse.getEntity();

    log.debug("execute()----------------------------------------");
    log.debug("execute() {}", httpResponse.getStatusLine());
    Header[] headers = httpResponse.getAllHeaders();
    for (int i = 0; i < headers.length; i++) {
        log.debug("execute() {}", headers[i]);
    }
    log.debug("execute()----------------------------------------");

    String jsonString = null;
    if (entity != null) {
        jsonString = EntityUtils.toString(entity);
        log.debug("execute() {}", jsonString);
    }

}

From source file:com.twitter.graphjet.bipartite.GraphConcurrentTestHelper.java

/**
 * This helper method sets up a concurrent read-write situation with a single writer and multiple
 * readers that access the same underlying bipartiteGraph, and tests for correct edge access during
 * simultaneous edge writes. This helps test read consistency during arbitrary points of
 * inserting edges. Note that the exact read-write sequence here is non-deterministic and would
 * vary depending on the machine, but the hope is that given the large number of readers the reads
 * would be done at many different points of edge insertion. The test itself checks only for
 * partial correctness (it could have false positives) so this should only be used as a supplement
 * to other testing./*from  w w w.  ja va 2 s  . c om*/
 *
 * @param graph              is the underlying
 *                           {@link BipartiteGraph}
 * @param numReadersPerNode  is the number of reader threads to use per node
 * @param leftSize           is the number of left nodes
 * @param rightSize          is the number of right nodes
 * @param edgeProbability    is the probability of an edge between a left-right node pair
 * @param random             is the random number generator to use for generating a random graph
 */
public static <T extends BipartiteGraph & DynamicBipartiteGraph> void testRandomConcurrentReadWriteThreads(
        T graph, int numReadersPerNode, int leftSize, int rightSize, double edgeProbability, Random random) {
    int maxWaitingTimeForThreads = 20; // in milliseconds
    int numLeftReaders = leftSize * numReadersPerNode;
    int numRightReaders = rightSize * numReadersPerNode;
    int totalNumReaders = numLeftReaders + numRightReaders;
    CountDownLatch readersDoneLatch = new CountDownLatch(totalNumReaders);
    // First, construct a random set of edges to insert in the graph
    Set<Pair<Long, Long>> edges = Sets
            .newHashSetWithExpectedSize((int) (leftSize * rightSize * edgeProbability));
    List<BipartiteGraphReader> leftReaders = Lists.newArrayListWithCapacity(numLeftReaders);
    List<BipartiteGraphReader> rightReaders = Lists.newArrayListWithCapacity(numRightReaders);
    Long2ObjectMap<LongSet> leftSideGraph = new Long2ObjectOpenHashMap<LongSet>(leftSize);
    Long2ObjectMap<LongSet> rightSideGraph = new Long2ObjectOpenHashMap<LongSet>(leftSize);
    int averageLeftDegree = (int) (rightSize * edgeProbability);
    for (int i = 0; i < leftSize; i++) {
        LongSet nodeEdges = new LongOpenHashSet(averageLeftDegree);
        for (int j = 0; j < rightSize; j++) {
            if (random.nextDouble() < edgeProbability) {
                nodeEdges.add(j);
                if (!rightSideGraph.containsKey(j)) {
                    rightSideGraph.put(j, new LongOpenHashSet(new long[] { i }));
                } else {
                    rightSideGraph.get(j).add(i);
                }
                edges.add(Pair.of((long) i, (long) j));
            }
        }
        leftSideGraph.put(i, nodeEdges);
    }

    // Create a bunch of leftReaders per node that'll read from the graph at random
    for (int i = 0; i < leftSize; i++) {
        for (int j = 0; j < numReadersPerNode; j++) {
            leftReaders.add(new BipartiteGraphReader(graph, new CountDownLatch(0), readersDoneLatch, i, true,
                    random.nextInt(maxWaitingTimeForThreads)));
        }
    }

    // Create a bunch of rightReaders per node that'll read from the graph at random
    for (int i = 0; i < rightSize; i++) {
        for (int j = 0; j < numReadersPerNode; j++) {
            rightReaders.add(new BipartiteGraphReader(graph, new CountDownLatch(0), readersDoneLatch, i, false,
                    random.nextInt(maxWaitingTimeForThreads)));
        }
    }

    // Create a single writer that will insert these edges in random order
    List<WriterInfo> writerInfo = Lists.newArrayListWithCapacity(edges.size());
    List<Pair<Long, Long>> edgesList = Lists.newArrayList(edges);
    Collections.shuffle(edgesList);
    CountDownLatch writerDoneLatch = new CountDownLatch(edgesList.size());
    for (Pair<Long, Long> edge : edgesList) {
        writerInfo.add(new WriterInfo(edge.getLeft(), edge.getRight(), new CountDownLatch(0), writerDoneLatch));
    }

    ExecutorService executor = Executors.newFixedThreadPool(totalNumReaders + 1); // single writer
    List<Callable<Integer>> allThreads = Lists.newArrayListWithCapacity(totalNumReaders + 1);
    // First, we add the writer
    allThreads.add(Executors.callable(new BipartiteGraphWriter(graph, writerInfo), 1));
    // then the readers
    for (int i = 0; i < numLeftReaders; i++) {
        allThreads.add(Executors.callable(leftReaders.get(i), 1));
    }
    for (int i = 0; i < numRightReaders; i++) {
        allThreads.add(Executors.callable(rightReaders.get(i), 1));
    }
    // these will execute in some non-deterministic order
    Collections.shuffle(allThreads, random);

    // Wait for all the processes to finish
    try {
        List<Future<Integer>> results = executor.invokeAll(allThreads, 10, TimeUnit.SECONDS);
        for (Future<Integer> result : results) {
            assertTrue(result.isDone());
            assertEquals(1, result.get().intValue());
        }
    } catch (InterruptedException e) {
        throw new RuntimeException("Execution for a thread was interrupted: ", e);
    } catch (ExecutionException e) {
        throw new RuntimeException("Execution issue in an executor thread: ", e);
    }

    // confirm that these worked as expected
    try {
        readersDoneLatch.await();
        writerDoneLatch.await();
    } catch (InterruptedException e) {
        throw new RuntimeException("Execution for a latch was interrupted: ", e);
    }

    // Check that all readers' read info is consistent with the graph
    // first check the left side
    for (int i = 0; i < numLeftReaders; i++) {
        LongSet expectedLeftEdges = leftSideGraph.get(leftReaders.get(i).queryNode);
        assertTrue(leftReaders.get(i).getQueryNodeDegree() <= expectedLeftEdges.size());
        if (leftReaders.get(i).getQueryNodeDegree() == 0) {
            assertNull(leftReaders.get(i).getQueryNodeEdges());
        } else {
            for (long edge : leftReaders.get(i).getQueryNodeEdges()) {
                assertTrue(expectedLeftEdges.contains(edge));
            }
        }
    }

    // then the right side
    for (int i = 0; i < numRightReaders; i++) {
        LongSet expectedRightEdges = rightSideGraph.get(rightReaders.get(i).queryNode);
        assertTrue(rightReaders.get(i).getQueryNodeDegree() <= expectedRightEdges.size());
        if (rightReaders.get(i).getQueryNodeDegree() == 0) {
            assertNull(rightReaders.get(i).getQueryNodeEdges());
        } else {
            for (long edge : rightReaders.get(i).getQueryNodeEdges()) {
                assertTrue(expectedRightEdges.contains(edge));
            }
        }
    }
}

From source file:org.objectweb.proactive.extensions.dataspaces.vfs.VFSMountManagerHelper.java

/**
 * tries to close the FileSystems represented by the given urls
 *
 * @param uris file system uris//from   w ww. j  a v a  2 s. c o m
 */
public static void closeFileSystems(Collection<String> uris) {
    try {
        writeLock.lock();
        for (String uri : uris) {
            if (alreadyMountedSpaces.containsKey(uri)) {
                Future<FileObject> future = alreadyMountedSpaces.remove(uri);
                if (future.isDone()) {

                    try {
                        FileObject fo = future.get();
                        final FileSystem spaceFileSystem = fo.getFileSystem();

                        // we may not need to close FileObject, but with VFS you never know...
                        try {
                            fo.close();
                        } catch (org.apache.commons.vfs.FileSystemException x) {
                            logger.debug("Could not close data space root file object : " + fo, x);
                            ProActiveLogger.logEatedException(logger,
                                    String.format("Could not close data space %s root file object", fo), x);
                        }
                        vfsManager.closeFileSystem(spaceFileSystem);
                        if (logger.isDebugEnabled())
                            logger.debug("Unmounted space: " + fo);
                    } catch (InterruptedException e) {
                        // ignore
                    } catch (ExecutionException e) {
                        // ignore
                    }
                } else {
                    future.cancel(true);
                }
            }
        }
    } finally {
        writeLock.unlock();
    }
}

From source file:com.glaf.core.resource.ResourceFactory.java

public static byte[] getData(final String region, final String key) {
    if (conf.getBoolean(DISTRIBUTED_ENABLED, false)) {
        String regionName = Environment.getCurrentSystemName() + "_res_" + region;
        String complexKey = Environment.getCurrentSystemName() + "_res_" + key;
        if (SystemProperties.getDeploymentSystemName() != null) {
            regionName = SystemProperties.getDeploymentSystemName() + "_" + Environment.getCurrentSystemName()
                    + "_res_" + region;
        }/*  www  . ja  va2s . com*/
        if (SystemProperties.getDeploymentSystemName() != null) {
            complexKey = SystemProperties.getDeploymentSystemName() + "_" + Environment.getCurrentSystemName()
                    + "_res_" + key;
        }
        final String regionName2 = regionName;
        final String complexKey2 = complexKey;

        boolean waitFor = true;
        Callable<byte[]> task = new Callable<byte[]>() {
            @Override
            public byte[] call() throws Exception {
                return channel.getData(regionName2, complexKey2);
            }
        };
        try {
            Future<byte[]> result = pool.submit(task);
            long start = System.currentTimeMillis();
            // ?
            if (waitFor) {
                while (true) {
                    if (System.currentTimeMillis() - start > 2000) {
                        break;
                    }
                    if (result.isDone()) {
                        return result.get();
                    }
                }
            }
        } catch (Exception ex) {
            ex.printStackTrace();
            logger.error(ex);
        }
    }
    return null;
}

From source file:com.glaf.core.config.ConfigFactory.java

public static String getString(final String region, final String key) {
    if (conf.getBoolean(DISTRIBUTED_ENABLED, false)) {
        String regionName = Environment.getCurrentSystemName() + "_" + region;
        String complexKey = Environment.getCurrentSystemName() + "_" + key;
        if (SystemProperties.getDeploymentSystemName() != null) {
            regionName = SystemProperties.getDeploymentSystemName() + "_" + Environment.getCurrentSystemName()
                    + "_" + region;
        }/*from w  w w . j  a  va 2  s  . co  m*/
        if (SystemProperties.getDeploymentSystemName() != null) {
            complexKey = SystemProperties.getDeploymentSystemName() + "_" + Environment.getCurrentSystemName()
                    + "_" + key;
        }
        final String regionName2 = regionName;
        final String complexKey2 = complexKey;

        boolean waitFor = true;
        Callable<String> task = new Callable<String>() {
            @Override
            public String call() throws Exception {
                return channel.getString(regionName2, complexKey2);
            }
        };
        try {
            Future<String> result = pool.submit(task);
            long start = System.currentTimeMillis();
            // ?
            if (waitFor) {
                while (true) {
                    if (System.currentTimeMillis() - start > 2000) {
                        break;
                    }
                    if (result.isDone()) {
                        return result.get();
                    }
                }
            }
        } catch (Exception ex) {
            ex.printStackTrace();
            logger.error(ex);
        }
    }
    return null;
}

From source file:org.objectweb.proactive.extensions.dataspaces.vfs.VFSMountManagerHelper.java

private static void handleFuture(Future<FileObject> future, List<String> uris, StringBuilder exceptionMessage,
        MutableInteger exceptionCount, String nl) throws FileSystemException {
    if (future.isDone()) {
        try {/*ww w . j av a 2 s  .  c o m*/
            FileObject answer = future.get();

        } catch (InterruptedException e) {
            throw new FileSystemException("Interruption occurred when trying to mount " + uris, e);
        } catch (ExecutionException e) {
            exceptionMessage.append(StackTraceUtil.getStackTrace(e) + nl + nl);
            exceptionCount.add(1);

        }
    }
}