List of usage examples for java.util.concurrent Callable Callable
Callable
From source file:apiserver.services.images.controllers.ImageController.java
/** * get basic info about image./*from www . j a va2 s. com*/ * @param documentId cache id * @return height,width, pixel size, transparency * , @RequestPart("meta-data") Object metadata * , @RequestParam MultipartFile file */ @ApiOperation(value = "Get the height and width for the image", responseClass = "java.util.Map") @RequestMapping(value = "/info/{documentId}/size", method = { RequestMethod.GET }) public WebAsyncTask<ResponseEntity<Map>> imageInfoByImageAsync( @ApiParam(name = "documentId", required = true, defaultValue = "8D981024-A297-4169-8603-E503CC38EEDA") @PathVariable(value = "documentId") String documentId) throws ExecutionException, TimeoutException, InterruptedException { final String _documentId = documentId; Callable<ResponseEntity<Map>> callable = new Callable<ResponseEntity<Map>>() { @Override public ResponseEntity<Map> call() throws Exception { FileInfoJob args = new FileInfoJob(); args.setDocumentId(_documentId); Future<Map> imageFuture = gateway.imageSize(args); Map payload = imageFuture.get(defaultTimeout, TimeUnit.MILLISECONDS); HttpHeaders headers = new HttpHeaders(); headers.setContentType(MediaType.APPLICATION_JSON); ResponseEntity<Map> result = new ResponseEntity<Map>(payload, headers, HttpStatus.OK); return result; } }; return new WebAsyncTask<ResponseEntity<Map>>(defaultTimeout, callable); }
From source file:com.netflix.curator.framework.recipes.queue.TestBoundedDistributedQueue.java
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") @Test/*ww w .j a v a 2 s. c o m*/ public void testMulti() throws Exception { final String PATH = "/queue"; final int CLIENT_QTY = 4; final int MAX_ITEMS = 10; final int ADD_ITEMS = MAX_ITEMS * 100; final int SLOP_FACTOR = 2; final QueueConsumer<String> consumer = new QueueConsumer<String>() { @Override public void consumeMessage(String message) throws Exception { Thread.sleep(10); } @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { } }; final Timing timing = new Timing(); final ExecutorService executor = Executors.newCachedThreadPool(); ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executor); final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); try { client.start(); client.create().forPath(PATH); final CountDownLatch isWaitingLatch = new CountDownLatch(1); final AtomicBoolean isDone = new AtomicBoolean(false); final List<Integer> counts = new CopyOnWriteArrayList<Integer>(); final Object lock = new Object(); executor.submit(new Callable<Void>() { @Override public Void call() throws Exception { Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { synchronized (lock) { lock.notifyAll(); } } }; while (!Thread.currentThread().isInterrupted() && client.isStarted() && !isDone.get()) { synchronized (lock) { int size = client.getChildren().usingWatcher(watcher).forPath(PATH).size(); counts.add(size); isWaitingLatch.countDown(); lock.wait(); } } return null; } }); isWaitingLatch.await(); for (int i = 0; i < CLIENT_QTY; ++i) { final int index = i; completionService.submit(new Callable<Void>() { @Override public Void call() throws Exception { CuratorFramework client = null; DistributedQueue<String> queue = null; try { client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); client.start(); queue = QueueBuilder.builder(client, consumer, serializer, PATH).executor(executor) .maxItems(MAX_ITEMS).putInBackground(false).lockPath("/locks").buildQueue(); queue.start(); for (int i = 0; i < ADD_ITEMS; ++i) { queue.put("" + index + "-" + i); } } finally { IOUtils.closeQuietly(queue); IOUtils.closeQuietly(client); } return null; } }); } for (int i = 0; i < CLIENT_QTY; ++i) { completionService.take().get(); } isDone.set(true); synchronized (lock) { lock.notifyAll(); } for (int count : counts) { Assert.assertTrue(counts.toString(), count <= (MAX_ITEMS * SLOP_FACTOR)); } } finally { executor.shutdownNow(); IOUtils.closeQuietly(client); } }
From source file:com.microsoft.azure.management.sql.DatabaseBackupOperationsImpl.java
/** * Returns a list of Azure SQL Database restore points. * * @param resourceGroupName Required. The name of the Resource Group to * which the server belongs.//from w ww. java 2s. co m * @param serverName Required. The name of the Azure SQL Database Server on * which the database is hosted. * @param databaseName Required. The name of the Azure SQL Database from * which to retrieve available restore points. * @return Represents the response to a List Azure Sql Database restore * points request. */ @Override public Future<RestorePointListResponse> listRestorePointsAsync(final String resourceGroupName, final String serverName, final String databaseName) { return this.getClient().getExecutorService().submit(new Callable<RestorePointListResponse>() { @Override public RestorePointListResponse call() throws Exception { return listRestorePoints(resourceGroupName, serverName, databaseName); } }); }
From source file:com.skymobi.monitor.action.LogsAction.java
@RequestMapping(value = "/projects/{projectName}/logs/more", method = RequestMethod.GET) public void console(final HttpServletResponse response, ModelMap map, @PathVariable String projectName, LogQuery logQuery) throws IOException, ParseException { Project project = projectService.findProject(projectName); map.put("project", project); final MongoConverter converter = project.fetchMongoTemplate().getConverter(); final DBCursor cursor = logsService.findLogs(projectName, logQuery); final StringBuffer buf = new StringBuffer(); @SuppressWarnings("unchecked") FutureTask<String> task = new FutureTask(new Callable<String>() { @Override/* w w w .ja v a2 s. c o m*/ public String call() throws Exception { long startTime = System.currentTimeMillis(); //???20 logger.debug("result:"); while (cursor.hasNext()) { Log log = converter.read(Log.class, cursor.next()); buf.insert(0, log.toString() + "\n"); long current = System.currentTimeMillis(); if ((current - startTime) / 1000 >= mongWaitSeconds) break; } return buf.toString(); } }); executor.execute(task); try { task.get(mongWaitSeconds + 5, TimeUnit.SECONDS); cursor.close(); } catch (Exception e) { logger.error("time out ", e); task.cancel(true); } response.setContentType("text/html;charset=UTF-8"); response.getWriter().write(buf.toString()); response.getWriter().flush(); }
From source file:com.appleframework.monitor.action.LogsAction.java
@RequestMapping(value = "/projects/{projectName}/logs/more", method = RequestMethod.GET) public void console(final HttpServletResponse response, ModelMap map, @PathVariable String projectName, LogQuery logQuery) throws IOException, ParseException { Project project = projectService.findProject(projectName); map.put("project", project); final MongoConverter converter = project.fetchMongoTemplate().getConverter(); final DBCursor cursor = logsService.findLogs(projectName, logQuery); final StringBuffer buf = new StringBuffer(); FutureTask<String> task = new FutureTask<String>(new Callable<String>() { @Override//from w w w . j av a 2 s. co m public String call() throws Exception { long startTime = System.currentTimeMillis(); //???20 logger.debug("result:"); while (cursor.hasNext()) { Log log = converter.read(Log.class, cursor.next()); buf.insert(0, log.toString() + "\n"); long current = System.currentTimeMillis(); if ((current - startTime) / 1000 >= mongWaitSeconds) break; } return buf.toString(); } }); executor.execute(task); try { task.get(mongWaitSeconds + 5, TimeUnit.SECONDS); cursor.close(); } catch (Exception e) { logger.error("time out ", e); task.cancel(true); } response.setContentType("text/html;charset=UTF-8"); response.getWriter().write(buf.toString()); response.getWriter().flush(); }
From source file:com.github.rosjava.rosjava_extras.hokuyo.scip20.Device.java
/** * It is not necessary to provide buffered streams. Buffering is handled * internally.//from w w w. j av a 2s .c o m * * @param inputStream * the {@link InputStream} for the ACM serial device * @param outputStream * the {@link OutputStream} for the ACM serial device * @param epochTimeProvider */ public Device(InputStream inputStream, OutputStream outputStream, TimeProvider epochTimeProvider) { bufferedInputStream = new BufferedInputStream(inputStream, STREAM_BUFFER_SIZE); reader = new BufferedReader(new InputStreamReader(bufferedInputStream, Charset.forName("US-ASCII"))); writer = new BufferedWriter(new OutputStreamWriter( new BufferedOutputStream(outputStream, STREAM_BUFFER_SIZE), Charset.forName("US-ASCII"))); remoteUptimeClock = RemoteUptimeClock.newDefault(epochTimeProvider, new Callable<Double>() { @Override public Double call() throws Exception { return (double) queryUptime(); } }, DRIFT_SENSITIVITY, ERROR_REDUCTION_COEFFICIENT_SENSITIVITY, LATENCY_FILTER_SAMPLE_SIZE, LATENCY_FILTER_THRESHOLD); init(); configuration = queryConfiguration(); }
From source file:com.google.cloud.bigtable.hbase.TestCreateTable.java
/** * Requirement 1.8 - Table names must match [_a-zA-Z0-9][-_.a-zA-Z0-9]* *///from w w w . j av a2 s . c om @Test(timeout = 1000l * 60 * 4) public void testTableNames() throws IOException { String shouldTest = System.getProperty("bigtable.test.create.table", "true"); if (!"true".equals(shouldTest)) { return; } String[] goodNames = { "a", "1", "_", // Really? Yuck. "_x", "a-._5x", "_a-._5x", // TODO(sduskis): Join the last 2 strings once the Bigtable backend supports table names // longer than 50 characters. "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghi", "jklmnopqrstuvwxyz1234567890_-." }; String[] badNames = { "-x", ".x", "a!", "a@", "a#", "a$", "a%", "a^", "a&", "a*", "a(", "a+", "a=", "a~", "a`", "a{", "a[", "a|", "a\\", "a/", "a<", "a,", "a?", "a" + RandomStringUtils.random(10, false, false) }; final Admin admin = getConnection().getAdmin(); for (String badName : badNames) { boolean failed = false; try { admin.createTable(new HTableDescriptor(TableName.valueOf(badName)) .addFamily(new HColumnDescriptor(COLUMN_FAMILY))); } catch (IllegalArgumentException e) { failed = true; } Assert.assertTrue("Should fail as table name: '" + badName + "'", failed); } final TableName[] tableNames = admin.listTableNames(); List<ListenableFuture<Void>> futures = new ArrayList<>(); ListeningExecutorService es = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool()); for (final String goodName : goodNames) { futures.add(es.submit(new Callable<Void>() { @Override public Void call() throws IOException { createTable(admin, goodName, tableNames); return null; } })); } try { try { Futures.allAsList(futures).get(3, TimeUnit.MINUTES); } catch (Exception e) { throw new RuntimeException(e); } } finally { es.shutdownNow(); } }
From source file:comsat.sample.actuator.SampleController.java
private Callable<Map<String, Object>> ollehCallable(final Message message, final DeferredResult<Map<String, Object>> optDeferred) throws SuspendExecution { return new Callable<Map<String, Object>>() { @Override//from w w w. ja va 2 s . c om @Suspendable public Map<String, Object> call() throws Exception { try { Fiber.sleep(10); Map<String, Object> model = new LinkedHashMap<>(); model.put("message", message.getValue()); model.put("title", "Hello Home"); model.put("date", new Date()); if (optDeferred != null) optDeferred.setResult(model); return model; } catch (Throwable t) { if (optDeferred != null) optDeferred.setErrorResult(t); throw t; } } }; }
From source file:com.netflix.curator.framework.recipes.barriers.TestDistributedBarrier.java
@Test public void testMultiClient() throws Exception { CuratorFramework client1 = null;//from www. ja v a 2s . co m CuratorFramework client2 = null; try { { CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); try { client.start(); DistributedBarrier barrier = new DistributedBarrier(client, "/barrier"); barrier.setBarrier(); } finally { IOUtils.closeQuietly(client); } } client1 = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client2 = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); List<Future<Object>> futures = Lists.newArrayList(); ExecutorService service = Executors.newCachedThreadPool(); for (final CuratorFramework c : new CuratorFramework[] { client1, client2 }) { Future<Object> future = service.submit(new Callable<Object>() { @Override public Object call() throws Exception { c.start(); DistributedBarrier barrier = new DistributedBarrier(c, "/barrier"); barrier.waitOnBarrier(10, TimeUnit.MILLISECONDS); return null; } }); futures.add(future); } Thread.sleep(1000); { CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); try { client.start(); DistributedBarrier barrier = new DistributedBarrier(client, "/barrier"); barrier.removeBarrier(); } finally { IOUtils.closeQuietly(client); } } for (Future<Object> f : futures) { f.get(); } } finally { IOUtils.closeQuietly(client1); IOUtils.closeQuietly(client2); } }
From source file:com.microsoft.azure.management.resources.ProviderOperationsMetadataOperationsImpl.java
/** * Gets provider operations metadata/* ww w . ja va2 s .com*/ * * @param resourceProviderNamespace Required. Namespace of the resource * provider. * @return Provider operations metadata */ @Override public Future<ProviderOperationsMetadataGetResult> getAsync(final String resourceProviderNamespace) { return this.getClient().getExecutorService().submit(new Callable<ProviderOperationsMetadataGetResult>() { @Override public ProviderOperationsMetadataGetResult call() throws Exception { return get(resourceProviderNamespace); } }); }