List of usage examples for java.util.concurrent FutureTask FutureTask
public FutureTask(Callable<V> callable)
From source file:it.geosolutions.tools.io.file.Copy.java
/** * /*from ww w. j a v a 2s.co m*/ * @param ex * @param source * @param destination * @param seconds * @return * @throws RejectedExecutionException * - if this task cannot be accepted for execution. * @throws IllegalArgumentException * - if executor is null or terminated. */ public static FutureTask<File> asynchFileCopyToNFS(final ExecutorService ex, final File source, final File destination, final int seconds) throws RejectedExecutionException, IllegalArgumentException { if (ex == null || ex.isTerminated()) { throw new IllegalArgumentException( "Unable to run asynchronously using a terminated or null ThreadPoolExecutor"); } final Callable<File> call = new Callable<File>() { public File call() throws Exception { return Copy.copyFileToNFS(source, destination, seconds); } }; // final FutureTask<File> futureFile = new FutureTask<File>(call); ex.execute(futureFile); return futureFile; // return ex.submit(call); }
From source file:org.wso2.carbon.caching.impl.CacheImpl.java
@Override public Future<Map<K, ? extends V>> loadAll(final Set<? extends K> keys) { Util.checkAccess(ownerTenantDomain, ownerTenantId); checkStatusStarted();/* w w w . ja v a 2 s.co m*/ lastAccessed = System.currentTimeMillis(); if (keys == null) { throw new NullPointerException("keys"); } CacheLoader<K, ? extends V> cacheLoader = cacheConfiguration.getCacheLoader(); if (cacheLoader == null) { return null; } if (keys.contains(null)) { throw new NullPointerException("key"); } CarbonContext carbonContext = CarbonContext.getThreadLocalCarbonContext(); Callable<Map<K, ? extends V>> callable = new CacheLoaderLoadAllCallable<K, V>(this, cacheLoader, keys, carbonContext.getTenantDomain(), carbonContext.getTenantId()); FutureTask<Map<K, ? extends V>> task = new FutureTask<Map<K, ? extends V>>(callable); cacheLoadExecService.submit(task); return task; }
From source file:org.opencms.workflow.CmsDefaultWorkflowManager.java
/** * The implementation of the "publish" workflow action.<p> * * @param userCms the user CMS context/* w w w . j a va 2 s . c o m*/ * @param options the publish options * @param resources the resources which the action should process * * @return the workflow response * @throws CmsException if something goes wrong */ protected CmsWorkflowResponse actionPublish(CmsObject userCms, CmsPublishOptions options, final List<CmsResource> resources) throws CmsException { final CmsPublish publish = new CmsPublish(userCms, options); // use FutureTask to get the broken links, because we can then use a different thread if it takes too long final FutureTask<List<CmsPublishResource>> brokenResourcesGetter = new FutureTask<List<CmsPublishResource>>( new Callable<List<CmsPublishResource>>() { public List<CmsPublishResource> call() throws Exception { return publish.getBrokenResources(resources); } }); Thread brokenResourcesThread = new Thread(brokenResourcesGetter); brokenResourcesThread.start(); try { List<CmsPublishResource> brokenResources = brokenResourcesGetter.get(10, TimeUnit.SECONDS); if (brokenResources.size() == 0) { publish.publishResources(resources); CmsWorkflowResponse response = new CmsWorkflowResponse(true, "", new ArrayList<CmsPublishResource>(), new ArrayList<CmsWorkflowAction>(), null); return response; } else { String brokenResourcesLabel = getLabel(userCms, Messages.GUI_BROKEN_LINKS_0); boolean canForcePublish = OpenCms.getWorkplaceManager().getDefaultUserSettings() .isAllowBrokenRelations() || OpenCms.getRoleManager().hasRole(userCms, CmsRole.VFS_MANAGER); List<CmsWorkflowAction> actions = new ArrayList<CmsWorkflowAction>(); if (canForcePublish) { String forceLabel = getLabel(userCms, Messages.GUI_WORKFLOW_ACTION_FORCE_PUBLISH_0); actions.add(new CmsWorkflowAction(ACTION_FORCE_PUBLISH, forceLabel, true, true)); } CmsWorkflowResponse response = new CmsWorkflowResponse(false, brokenResourcesLabel, brokenResources, actions, null); return response; } } catch (TimeoutException e) { // Things are taking too long, do them in a different thread and just return "OK" to the client Thread thread = new Thread() { @SuppressWarnings("synthetic-access") @Override public void run() { LOG.info( "Checking broken relations is taking too long, using a different thread for checking and publishing now."); try { // Make sure the computation is finished by calling get() without a timeout parameter // We don't need the actual result of the get(), though; we just get the set of resource paths from the validator object brokenResourcesGetter.get(); List<CmsResource> resourcesToPublish = new ArrayList<CmsResource>(resources); Iterator<CmsResource> resIter = resourcesToPublish.iterator(); while (resIter.hasNext()) { CmsResource currentRes = resIter.next(); if (publish.getRelationValidator().keySet().contains(currentRes.getRootPath())) { resIter.remove(); LOG.info("Excluding resource from publish list because relations would be broken: " + currentRes.getRootPath()); } } publish.publishResources(resourcesToPublish); } catch (Exception ex) { LOG.error(ex.getLocalizedMessage(), ex); } } }; thread.start(); CmsWorkflowResponse response = new CmsWorkflowResponse(true, "", new ArrayList<CmsPublishResource>(), new ArrayList<CmsWorkflowAction>(), null); return response; } catch (InterruptedException e) { // shouldn't happen; log exception LOG.error(e.getLocalizedMessage()); return null; } catch (ExecutionException e) { // shouldn't happen; log exception LOG.error(e.getLocalizedMessage()); return null; } }
From source file:org.apache.hadoop.hive.ql.exec.tez.TezSessionState.java
protected void openInternal(final HiveConf conf, Collection<String> additionalFiles, boolean isAsync, LogHelper console, Path scratchDir) throws IOException, LoginException, IllegalArgumentException, URISyntaxException, TezException { this.conf = conf; // TODO Why is the queue name set again. It has already been setup via setQueueName. Do only one of the two. String confQueueName = conf.get(TezConfiguration.TEZ_QUEUE_NAME); if (queueName != null && !queueName.equals(confQueueName)) { LOG.warn("Resetting a queue name that was already set: was " + queueName + ", now " + confQueueName); }/*from w ww. ja v a 2 s . co m*/ this.queueName = confQueueName; this.doAsEnabled = conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS); final boolean llapMode = "llap" .equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_MODE)); // TODO This - at least for the session pool - will always be the hive user. How does doAs above this affect things ? UserGroupInformation ugi = Utils.getUGI(); user = ugi.getShortUserName(); LOG.info("User of session id " + sessionId + " is " + user); // create the tez tmp dir tezScratchDir = scratchDir == null ? createTezDir(sessionId) : scratchDir; additionalFilesNotFromConf.clear(); if (additionalFiles != null) { additionalFilesNotFromConf.addAll(additionalFiles); } refreshLocalResourcesFromConf(conf); // unless already installed on all the cluster nodes, we'll have to // localize hive-exec.jar as well. appJarLr = createJarLocalResource(utils.getExecJarPathLocal()); // configuration for the application master final Map<String, LocalResource> commonLocalResources = new HashMap<String, LocalResource>(); commonLocalResources.put(utils.getBaseName(appJarLr), appJarLr); for (LocalResource lr : localizedResources) { commonLocalResources.put(utils.getBaseName(lr), lr); } if (llapMode) { // localize llap client jars addJarLRByClass(LlapTaskSchedulerService.class, commonLocalResources); addJarLRByClass(LlapProtocolClientImpl.class, commonLocalResources); addJarLRByClass(LlapProtocolClientProxy.class, commonLocalResources); addJarLRByClassName("org.apache.hadoop.registry.client.api.RegistryOperations", commonLocalResources); } // Create environment for AM. Map<String, String> amEnv = new HashMap<String, String>(); MRHelpers.updateEnvBasedOnMRAMEnv(conf, amEnv); // and finally we're ready to create and start the session // generate basic tez config final TezConfiguration tezConfig = new TezConfiguration(conf); // set up the staging directory to use tezConfig.set(TezConfiguration.TEZ_AM_STAGING_DIR, tezScratchDir.toUri().toString()); conf.stripHiddenConfigurations(tezConfig); ServicePluginsDescriptor servicePluginsDescriptor; Credentials llapCredentials = null; if (llapMode) { if (UserGroupInformation.isSecurityEnabled()) { llapCredentials = new Credentials(); llapCredentials.addToken(LlapTokenIdentifier.KIND_NAME, getLlapToken(user, tezConfig)); } // TODO Change this to not serialize the entire Configuration - minor. UserPayload servicePluginPayload = TezUtils.createUserPayloadFromConf(tezConfig); // we need plugins to handle llap and uber mode servicePluginsDescriptor = ServicePluginsDescriptor.create(true, new TaskSchedulerDescriptor[] { TaskSchedulerDescriptor.create(LLAP_SERVICE, LLAP_SCHEDULER) .setUserPayload(servicePluginPayload) }, new ContainerLauncherDescriptor[] { ContainerLauncherDescriptor.create(LLAP_SERVICE, LLAP_LAUNCHER) }, new TaskCommunicatorDescriptor[] { TaskCommunicatorDescriptor .create(LLAP_SERVICE, LLAP_TASK_COMMUNICATOR).setUserPayload(servicePluginPayload) }); } else { servicePluginsDescriptor = ServicePluginsDescriptor.create(true); } // container prewarming. tell the am how many containers we need if (HiveConf.getBoolVar(conf, ConfVars.HIVE_PREWARM_ENABLED)) { int n = HiveConf.getIntVar(conf, ConfVars.HIVE_PREWARM_NUM_CONTAINERS); n = Math.max(tezConfig.getInt(TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS, TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS_DEFAULT), n); tezConfig.setInt(TezConfiguration.TEZ_AM_SESSION_MIN_HELD_CONTAINERS, n); } setupSessionAcls(tezConfig, conf); final TezClient session = TezClient.newBuilder("HIVE-" + sessionId, tezConfig).setIsSession(true) .setLocalResources(commonLocalResources).setCredentials(llapCredentials) .setServicePluginDescriptor(servicePluginsDescriptor).build(); LOG.info("Opening new Tez Session (id: " + sessionId + ", scratch dir: " + tezScratchDir + ")"); TezJobMonitor.initShutdownHook(); if (!isAsync) { startSessionAndContainers(session, conf, commonLocalResources, tezConfig, false); this.session = session; } else { FutureTask<TezClient> sessionFuture = new FutureTask<>(new Callable<TezClient>() { @Override public TezClient call() throws Exception { try { return startSessionAndContainers(session, conf, commonLocalResources, tezConfig, true); } catch (Throwable t) { LOG.error("Failed to start Tez session", t); throw (t instanceof Exception) ? (Exception) t : new Exception(t); } } }); new Thread(sessionFuture, "Tez session start thread").start(); // We assume here nobody will try to get session before open() returns. this.console = console; this.sessionFuture = sessionFuture; } }
From source file:info.pancancer.arch3.test.TestWorker.java
@Test public void testWorker_endlessFromConfig() throws Exception { HierarchicalINIConfiguration configObj = new HierarchicalINIConfiguration(); configObj.addProperty("rabbit.rabbitMQQueueName", "seqware"); configObj.addProperty("rabbit.rabbitMQHost", "localhost"); configObj.addProperty("rabbit.rabbitMQUser", "guest"); configObj.addProperty("rabbit.rabbitMQPass", "guest"); configObj.addProperty("worker.heartbeatRate", "2.5"); configObj.addProperty("worker.max-runs", "1"); configObj.addProperty("worker.preworkerSleep", "1"); configObj.addProperty("worker.postworkerSleep", "1"); configObj.addProperty("worker.endless", "true"); configObj.addProperty("worker.hostUserName", System.getProperty("user.name")); byte[] body = setupMessage(); Delivery testDelivery = new Delivery(mockEnvelope, mockProperties, body); setupMockQueue(testDelivery);/*from w w w.java2s. c o m*/ Mockito.when(Utilities.parseConfig(anyString())).thenReturn(configObj); //Because the code that does cleanup in calls resultHandler.waitFor(); we need to actually execute something, even if it does nothing. Mockito.doNothing().when(mockExecutor).execute(any(CommandLine.class), any(DefaultExecuteResultHandler.class)); // This is to mock the cleanup command - we don't really want to execute the command for deleting contents of /datastore, at least not when unit testing on a workstation! PowerMockito.whenNew(DefaultExecutor.class).withNoArguments().thenReturn(mockExecutor); Mockito.when(mockExecHandler.hasResult()).thenReturn(true); PowerMockito.whenNew(DefaultExecuteResultHandler.class).withNoArguments().thenReturn(mockExecHandler); final FutureTask<String> tester = new FutureTask<>(new Callable<String>() { @Override public String call() { LOG.info("tester thread started"); try { Worker.main(new String[] { "--config", "src/test/resources/workerConfig.ini", "--uuid", "vm123456", "--pidFile", "/var/run/arch3_worker.pid" }); } catch (CancellationException | InterruptedException e) { LOG.error("Exception caught: " + e.getMessage()); return e.getMessage(); } catch (Exception e) { e.printStackTrace(); fail("Unexpected exception"); return null; } finally { Mockito.verify(mockAppender, Mockito.atLeastOnce()).doAppend(argCaptor.capture()); String s = appendEventsIntoString(argCaptor.getAllValues()); return s; } } }); final Thread killer = new Thread(new Runnable() { @Override public void run() { LOG.info("killer thread started"); try { // The endless worker will not end on its own (because it's endless) so we need to wait a little bit (0.5 seconds) and // then kill it as if it were killed by the command-line script (kill_worker_daemon.sh). Thread.sleep(2500); } catch (InterruptedException e) { e.printStackTrace(); LOG.error(e.getMessage()); } tester.cancel(true); } }); ExecutorService es = Executors.newFixedThreadPool(2); es.execute(tester); es.execute(killer); try { tester.get(); } catch (CancellationException e) { Mockito.verify(mockAppender, Mockito.atLeastOnce()).doAppend(argCaptor.capture()); List<LoggingEvent> tmpList = new LinkedList<LoggingEvent>(argCaptor.getAllValues()); String output = this.appendEventsIntoString(tmpList); assertTrue("--endless flag was detected and set", output.contains("The \"--endless\" flag was set, this worker will run endlessly!")); int numJobsPulled = StringUtils.countMatches(output, " WORKER IS PREPARING TO PULL JOB FROM QUEUE "); LOG.info("Number of jobs attempted: " + numJobsPulled); assertTrue("number of jobs attempted > 1", numJobsPulled > 1); } catch (Exception e) { e.printStackTrace(); fail(); } }
From source file:org.apache.tika.parser.ocr.TesseractOCRParser.java
/** * Run external tesseract-ocr process./*from w ww . j a va 2 s. c o m*/ * * @param input * File to be ocred * @param output * File to collect ocr result * @param config * Configuration of tesseract-ocr engine * @throws TikaException * if the extraction timed out * @throws IOException * if an input error occurred */ private void doOCR(File input, File output, TesseractOCRConfig config) throws IOException, TikaException { String[] cmd = { config.getTesseractPath() + getTesseractProg(), input.getPath(), output.getPath(), "-l", config.getLanguage(), "-psm", config.getPageSegMode() }; ProcessBuilder pb = new ProcessBuilder(cmd); setEnv(config, pb); final Process process = pb.start(); process.getOutputStream().close(); InputStream out = process.getInputStream(); InputStream err = process.getErrorStream(); logStream("OCR MSG", out, input); logStream("OCR ERROR", err, input); FutureTask<Integer> waitTask = new FutureTask<Integer>(new Callable<Integer>() { public Integer call() throws Exception { return process.waitFor(); } }); Thread waitThread = new Thread(waitTask); waitThread.start(); try { waitTask.get(config.getTimeout(), TimeUnit.SECONDS); } catch (InterruptedException e) { waitThread.interrupt(); process.destroy(); Thread.currentThread().interrupt(); throw new TikaException("TesseractOCRParser interrupted", e); } catch (ExecutionException e) { // should not be thrown } catch (TimeoutException e) { waitThread.interrupt(); process.destroy(); throw new TikaException("TesseractOCRParser timeout", e); } }
From source file:org.dllearner.algorithms.qtl.experiments.PathDetectionTask.java
public RunnableFuture<List<Path>> newTask() { return new FutureTask<List<Path>>(PathDetectionTask.this) { @Override/*from w ww .j av a2 s. c om*/ public boolean cancel(boolean mayInterruptIfRunning) { PathDetectionTask.this.cancelTask(); return super.cancel(mayInterruptIfRunning); } }; }
From source file:gda.scan.ConcurrentScanChild.java
/** * Asynchronously, readout detectors using parallel threads into ScanDataPoint and add to pipeline for possible * completion and publishing. Call {@link ConcurrentScanChild#waitForDetectorReadoutAndPublishCompletion()} to wait * for this task to complete, or {@link #cancelReadoutAndPublishCompletion()} to cancel and interrupt it. * <p>//from w w w. ja va 2s .c o m * If the property {@link LocalProperties#GDA_SCAN_CONCURRENTSCAN_READOUT_CONCURRENTLY} is its default false value * then simply block while reading out each detector in series and then adding the ScanDataPoint to the pipeline. * * @param point * @throws Exception */ @Override protected void readoutDetectorsAndPublish(final ScanDataPoint point) throws Exception { final boolean lastPointInLine = (getPointPositionInLine() == PointPositionInLine.LAST); // latch value if (!isReadoutConcurrent()) { super.readoutDetectorsAndPublish(point); return; } // Make sure the previous point has read been published // (If the scan contains a detector this method will already have been called) waitForDetectorReadoutAndPublishCompletion(); final String threadName = "ConcurrentScanChild.readoutDetectorsAndPublish(point '" + point.toString() + "')"; detectorReadoutTask = new FutureTask<Void>(new Callable<Void>() { List<Future<Object>> readoutTasks; /** * Readout each detector in a thread, add the resulting data to the ScanDataPoint and publish. */ @Override public Void call() throws Exception { try { Vector<Detector> detectors = point.getDetectors(); // if there are detectors then readout in parallel threads if (detectors.size() != 0) { readoutTasks = new ArrayList<Future<Object>>(detectors.size()); // Start readout tasks for (Detector detector : point.getDetectors()) { FutureTask<Object> readoutTask = new FutureTask<Object>(new ReadoutDetector(detector)); new Thread(readoutTask, threadName + ": readout '" + detector.getName() + "'").start(); readoutTasks.add(readoutTask); } // Wait for readout results and put into point for (int i = 0; i < detectors.size(); i++) { checkThreadInterrupted(); Object data = readoutTasks.get(i).get(); point.addDetectorData(data, ScannableUtils.getExtraNamesFormats(detectors.get(i))); } } // Put point onto pipeline checkThreadInterrupted(); // probably voodoo and not required here scanDataPointPipeline.put(point); // may block checkThreadInterrupted(); // probably voodoo and not required here // The main scan thread cannot call atPointEnd (and subsequently atPointStart) in the correct order // with respect to readout so call these here instead. for (Detector detector : detectors) { detector.atPointEnd(); } // unless this is the last point in the line, call atPointStart hooks for the next point (the one // that main scan thread is now working on. if (!lastPointInLine) { for (Detector detector : detectors) { detector.atPointStart(); } } } catch (Exception e) { // could be the normal result of cancelling this task // (detector.readout() unfortunately doesn't distinguish InteruptedException from DeviceException logger.info("'" + representThrowable(e) + "' --- while reading out detectors. *Canceling any remaining readout tasks.*"); for (Future<Object> task : readoutTasks) { task.cancel(true); } throw e; } return null; } }); new Thread(detectorReadoutTask, threadName).start(); }
From source file:io.teak.sdk.TeakNotification.java
/** * Cancel a push notification that was scheduled with {@link TeakNotification#scheduleNotification(String, String, long)} * * @param scheduleId//w w w . j a v a 2s . c o m * @return */ @SuppressWarnings("unused") public static FutureTask<String> cancelNotification(final String scheduleId) { if (!Teak.isEnabled()) { Log.e(LOG_TAG, "Teak is disabled, ignoring cancelNotification()."); return null; } if (scheduleId == null || scheduleId.isEmpty()) { Log.e(LOG_TAG, "scheduleId cannot be null or empty"); return null; } final ArrayBlockingQueue<String> q = new ArrayBlockingQueue<>(1); final FutureTask<String> ret = new FutureTask<>(new Callable<String>() { public String call() { try { return q.take(); } catch (InterruptedException e) { Log.e(LOG_TAG, Log.getStackTraceString(e)); } return null; } }); Session.whenUserIdIsReadyRun(new Session.SessionRunnable() { @Override public void run(Session session) { HashMap<String, Object> payload = new HashMap<>(); payload.put("id", scheduleId); new Request("/me/cancel_local_notify.json", payload, session) { @Override protected void done(int responseCode, String responseBody) { try { JSONObject response = new JSONObject(responseBody); if (response.getString("status").equals("ok")) { q.offer(response.getJSONObject("event").getString("id")); } else { q.offer(""); } } catch (Exception ignored) { q.offer(""); } ret.run(); } }.run(); } }); return ret; }
From source file:org.openbase.display.DisplayView.java
private <V> Future<V> runTask(final Callable<V> callable) throws CouldNotPerformException { try {/*from ww w.j ava2 s. c o m*/ if (Platform.isFxApplicationThread()) { try { return CompletableFuture.completedFuture(callable.call()); } catch (Exception ex) { ExceptionPrinter.printHistory(new CouldNotPerformException("Could not perform task!", ex), logger); } } FutureTask<V> future = new FutureTask(() -> { try { return callable.call(); } catch (Exception ex) { throw ExceptionPrinter.printHistoryAndReturnThrowable(ex, logger); } }); Platform.runLater(future); return future; } catch (Exception ex) { throw new CouldNotPerformException("Could not perform task!", ex); } }