List of usage examples for java.util.concurrent ScheduledExecutorService shutdown
void shutdown();
From source file:org.apache.nifi.controller.StandardFlowService.java
@Override public void stop(final boolean force) { writeLock.lock();//from www . j a v a 2 s .c om try { if (!isRunning()) { return; } running.set(false); if (clusterCoordinator != null) { final Thread shutdownClusterCoordinator = new Thread(new Runnable() { @Override public void run() { clusterCoordinator.shutdown(); } }); shutdownClusterCoordinator.setDaemon(true); shutdownClusterCoordinator.setName("Shutdown Cluster Coordinator"); shutdownClusterCoordinator.start(); } if (!controller.isTerminated()) { controller.shutdown(force); } if (configuredForClustering && senderListener != null) { try { senderListener.stop(); } catch (final IOException ioe) { logger.warn("Protocol sender/listener did not stop gracefully due to: " + ioe); } } final ScheduledExecutorService executorService = executor.get(); if (executorService != null) { if (force) { executorService.shutdownNow(); } else { executorService.shutdown(); } boolean graceful; try { graceful = executorService.awaitTermination(gracefulShutdownSeconds, TimeUnit.SECONDS); } catch (final InterruptedException e) { graceful = false; } if (!graceful) { logger.warn("Scheduling service did not gracefully shutdown within configured " + gracefulShutdownSeconds + " second window"); } } } finally { writeLock.unlock(); } }
From source file:com.meltmedia.dropwizard.etcd.junit.EtcdJsonRule.java
@Override public Statement apply(Statement base, Description description) { return new Statement() { @Override/*from www.j a v a 2 s. co m*/ public void evaluate() throws Throwable { ScheduledExecutorService executor = Executors.newScheduledThreadPool(10); ObjectMapper mapper = new ObjectMapper().registerModule(new JodaModule()); try { try { clientSupplier.get().deleteDir(directory).recursive().send().get(); } catch (Exception e) { System.out.printf("could not delete %s from service rule", directory); e.printStackTrace(); } factory = EtcdJson.builder().withClient(clientSupplier).withBaseDirectory(directory) .withExecutor(executor).withMapper(mapper).withMetricRegistry(new MetricRegistry()) .build(); factory.start(); try { base.evaluate(); } finally { try { factory.stop(); } catch (Throwable ioe) { ioe.printStackTrace(System.err); } factory = null; } } catch (Exception e) { e.printStackTrace(); throw e; } finally { executor.shutdown(); } } }; }
From source file:com.networknt.client.Client.java
private void checkCCTokenExpired() throws ClientException, ApiException { long tokenRenewBeforeExpired = (Integer) oauthConfig.get(TOKEN_RENEW_BEFORE_EXPIRED); long expiredRefreshRetryDelay = (Integer) oauthConfig.get(EXPIRED_REFRESH_RETRY_DELAY); long earlyRefreshRetryDelay = (Integer) oauthConfig.get(EARLY_REFRESH_RETRY_DELAY); boolean isInRenewWindow = expire - System.currentTimeMillis() < tokenRenewBeforeExpired; logger.trace("isInRenewWindow = " + isInRenewWindow); if (isInRenewWindow) { if (expire <= System.currentTimeMillis()) { logger.trace("In renew window and token is expired."); // block other request here to prevent using expired token. synchronized (Client.class) { if (expire <= System.currentTimeMillis()) { logger.trace("Within the synch block, check if the current request need to renew token"); if (!renewing || System.currentTimeMillis() > expiredRetryTimeout) { // if there is no other request is renewing or the renewing flag is true but renewTimeout is passed renewing = true; expiredRetryTimeout = System.currentTimeMillis() + expiredRefreshRetryDelay; logger.trace(//from w w w. j a v a 2 s . c om "Current request is renewing token synchronously as token is expired already"); getCCToken(); renewing = false; } else { logger.trace("Circuit breaker is tripped and not timeout yet!"); // reject all waiting requests by thrown an exception. throw new ApiException(new Status(STATUS_CLIENT_CREDENTIALS_TOKEN_NOT_AVAILABLE)); } } } } else { // Not expired yet, try to renew async but let requests use the old token. logger.trace("In renew window but token is not expired yet."); synchronized (Client.class) { if (expire > System.currentTimeMillis()) { if (!renewing || System.currentTimeMillis() > earlyRetryTimeout) { renewing = true; earlyRetryTimeout = System.currentTimeMillis() + earlyRefreshRetryDelay; logger.trace("Retrieve token async is called while token is not expired yet"); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); executor.schedule(new Runnable() { @Override public void run() { try { getCCToken(); renewing = false; logger.trace("Async get token is completed."); } catch (Exception e) { logger.error("Async retrieve token error", e); // swallow the exception here as it is on a best effort basis. } } }, 50, TimeUnit.MILLISECONDS); executor.shutdown(); } } } } } logger.trace("Check secondary token is done!"); }
From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java
@Test public void shouldNotExhaustThreads() throws Exception { final ScheduledExecutorService executorService = Executors.newScheduledThreadPool(2, testingThreadFactory); final GremlinExecutor gremlinExecutor = GremlinExecutor.build().executorService(executorService) .scheduledExecutorService(executorService).create(); final AtomicInteger count = new AtomicInteger(0); assertTrue(IntStream.range(0, 1000).mapToObj(i -> gremlinExecutor.eval("1+1")).allMatch(f -> { try {/*from w w w . ja va 2s. co m*/ return (Integer) f.get() == 2; } catch (Exception ex) { throw new RuntimeException(ex); } finally { count.incrementAndGet(); } })); assertEquals(1000, count.intValue()); executorService.shutdown(); executorService.awaitTermination(30000, TimeUnit.MILLISECONDS); }
From source file:com.meltmedia.dropwizard.etcd.json.EtcdWatchServiceRule.java
@Override public Statement apply(Statement base, Description description) { return new Statement() { @Override//from ww w . j av a 2 s . co m public void evaluate() throws Throwable { ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); ObjectMapper mapper = new ObjectMapper(); try { try { clientSupplier.get().deleteDir(directory).recursive().send().get(); } catch (Exception e) { System.out.printf("could not delete %s from service rule", directory); e.printStackTrace(); } service = WatchService.builder().withEtcdClient(clientSupplier).withDirectory(directory) .withExecutor(executor).withMapper(mapper).withMetricRegistry(new MetricRegistry()) .withWatchTimeout(10, TimeUnit.MILLISECONDS).build(); service.start(); try { base.evaluate(); } finally { try { service.stop(); } catch (Throwable ioe) { ioe.printStackTrace(System.err); } service = null; } } catch (Exception e) { e.printStackTrace(); throw e; } finally { executor.shutdown(); } } }; }
From source file:com.amazonaws.services.dynamodbv2.streamsadapter.functionals.CorrectnessTest.java
/** * This test spawns a thread to periodically write items to the source table. It shuts down and restarts the KCL * worker while writes are happening (to simulate the real-world situation of a worker dying and another taking its * place). There are two things being verified here: * 1. New KCL worker resumes from the checkpoint * 2. All stream records are processed/*from ww w. j av a 2s. c o m*/ * * @throws Exception */ @Test public void workerFailureTest() throws Exception { LOG.info("Starting single shard KCL worker failure test."); KinesisClientLibConfiguration workerConfig = new KinesisClientLibConfiguration(leaseTable, streamId, credentials, KCL_WORKER_ID).withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON); startKCLWorker(workerConfig); // A thread that keeps writing to the table every 2 seconds ScheduledExecutorService loadGeneratorService = Executors.newSingleThreadScheduledExecutor(); loadGeneratorService.scheduleAtFixedRate(new Runnable() { @Override public void run() { insertAndUpdateItems(1); } }, 0/* initialDelay */, 2/* period */, TimeUnit.SECONDS); while (recordProcessorFactory.getNumRecordsProcessed() < 10) { LOG.info("Sleep till first few records are processed"); Thread.sleep(THREAD_SLEEP_2S); } shutDownKCLWorker(); // Calculate number of records processed by first worker and also the number of processed-but-not-checkpointed // records, since checkpoint happens after every batch of 10 records int numRecordsProcessedByFirstWorker = recordProcessorFactory.getNumRecordsProcessed(); int numRecordsNotCheckpointed = numRecordsProcessedByFirstWorker % ReplicatingRecordProcessor.CHECKPOINT_BATCH_SIZE; // Start a new worker startKCLWorker(workerConfig); while (recordProcessorFactory.getNumRecordsProcessed() < 0) { LOG.info("Sleep till RecordProcessor is initialized"); Thread.sleep(THREAD_SLEEP_2S); } loadGeneratorService.shutdown(); if (!loadGeneratorService.awaitTermination(THREAD_SLEEP_5S, TimeUnit.MILLISECONDS)) { loadGeneratorService.shutdownNow(); } int numStreamRecords = 2 * this.numItemsInSrcTable; int remainingRecordsToBeProcessed = numStreamRecords - numRecordsProcessedByFirstWorker + numRecordsNotCheckpointed; /* * The second worker must process atleast remainingRecordsToBeProcessed * num of records so that we have replicated everything to destination * table. Thus, this should never technically end up as an infinite * loop. If it does, something else is gone wrong. */ while (recordProcessorFactory.getNumRecordsProcessed() < remainingRecordsToBeProcessed) { LOG.info("Sleep till remaining records are processed"); Thread.sleep(THREAD_SLEEP_2S); } shutDownKCLWorker(); ScanResult srcTableScan = TestUtil.scanTable(dynamoDBClient, srcTable); ScanResult destTableScan = TestUtil.scanTable(dynamoDBClient, destTable); assertEquals(srcTableScan.getItems(), destTableScan.getItems()); }
From source file:org.codice.ddf.commands.catalog.IngestCommand.java
@Override protected Object executeWithSubject() throws Exception { final CatalogFacade catalog = getCatalog(); final File inputFile = new File(filePath); if (!inputFile.exists()) { printErrorMessage("File or directory [" + filePath + "] must exist."); console.println("If the file does indeed exist, try putting the path in quotes."); return null; }//from ww w.j a va 2 s . com if (deprecatedBatchSize != DEFAULT_BATCH_SIZE) { // user specified the old style batch size, so use that printErrorMessage( "Batch size positional argument is DEPRECATED, please use --batchsize option instead."); batchSize = deprecatedBatchSize; } if (batchSize <= 0) { printErrorMessage( "A batch size of [" + batchSize + "] was supplied. Batch size must be greater than 0."); return null; } if (!StringUtils.isEmpty(failedDir)) { failedIngestDirectory = new File(failedDir); if (!verifyFailedIngestDirectory()) { return null; } /** * Batch size is always set to 1, when using an Ingest Failure Directory. If a batch size is specified by the user, issue * a warning stating that a batch size of 1 will be used. */ if (batchSize != DEFAULT_BATCH_SIZE) { console.println("WARNING: An ingest failure directory was supplied in addition to a batch size of " + batchSize + ". When using an ingest failure directory, the batch size must be 1. Setting batch size to 1."); } batchSize = 1; } BundleContext bundleContext = getBundleContext(); if (!DEFAULT_TRANSFORMER_ID.equals(transformerId)) { ServiceReference[] refs = null; try { refs = bundleContext.getServiceReferences(InputTransformer.class.getName(), "(|" + "(" + Constants.SERVICE_ID + "=" + transformerId + ")" + ")"); } catch (InvalidSyntaxException e) { throw new IllegalArgumentException("Invalid transformer transformerId: " + transformerId, e); } if (refs == null || refs.length == 0) { throw new IllegalArgumentException("Transformer " + transformerId + " not found"); } else { transformer = (InputTransformer) bundleContext.getService(refs[0]); } } Stream<Path> ingestStream = Files.walk(inputFile.toPath(), FileVisitOption.FOLLOW_LINKS); int totalFiles = (inputFile.isDirectory()) ? inputFile.list().length : 1; fileCount.getAndSet(totalFiles); final ArrayBlockingQueue<Metacard> metacardQueue = new ArrayBlockingQueue<>(batchSize * multithreaded); ExecutorService queueExecutor = Executors.newSingleThreadExecutor(); final long start = System.currentTimeMillis(); printProgressAndFlush(start, fileCount.get(), 0); queueExecutor.submit(() -> buildQueue(ingestStream, metacardQueue, start)); final ScheduledExecutorService batchScheduler = Executors.newSingleThreadScheduledExecutor(); BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(multithreaded); RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy(); ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L, TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler); submitToCatalog(batchScheduler, executorService, metacardQueue, catalog, start); while (!doneBuildingQueue.get() || processingThreads.get() != 0) { try { TimeUnit.SECONDS.sleep(2); } catch (InterruptedException e) { LOGGER.error("Ingest 'Waiting for processing to finish' thread interrupted: {}", e); } } try { queueExecutor.shutdown(); executorService.shutdown(); batchScheduler.shutdown(); } catch (SecurityException e) { LOGGER.error("Executor service shutdown was not permitted: {}", e); } printProgressAndFlush(start, fileCount.get(), ingestCount.get() + ignoreCount.get()); long end = System.currentTimeMillis(); console.println(); String elapsedTime = timeFormatter.print(new Period(start, end).withMillis(0)); console.println(); console.printf(" %d file(s) ingested in %s %n", ingestCount.get(), elapsedTime); LOGGER.info("{} file(s) ingested in {} [{} records/sec]", ingestCount.get(), elapsedTime, calculateRecordsPerSecond(ingestCount.get(), start, end)); INGEST_LOGGER.info("{} file(s) ingested in {} [{} records/sec]", ingestCount.get(), elapsedTime, calculateRecordsPerSecond(ingestCount.get(), start, end)); if (fileCount.get() != ingestCount.get()) { console.println(); if ((fileCount.get() - ingestCount.get() - ignoreCount.get()) >= 1) { String failedAmount = Integer.toString(fileCount.get() - ingestCount.get() - ignoreCount.get()); printErrorMessage( failedAmount + " file(s) failed to be ingested. See the ingest log for more details."); INGEST_LOGGER.warn("{} files(s) failed to be ingested.", failedAmount); } if (ignoreList != null) { String ignoredAmount = Integer.toString(ignoreCount.get()); printColor(Ansi.Color.YELLOW, ignoredAmount + " file(s) ignored. See the ingest log for more details."); INGEST_LOGGER.warn("{} files(s) were ignored.", ignoredAmount); } } console.println(); return null; }
From source file:com.vmware.photon.controller.model.adapters.vsphere.ovf.OvfDeployer.java
public ManagedObjectReference deployOvf(URI ovfUri, ManagedObjectReference host, ManagedObjectReference vmFolder, String vmName, List<OvfNetworkMapping> networks, ManagedObjectReference datastore, List<KeyValue> ovfProps, String deploymentConfig, ManagedObjectReference resourcePool) throws Exception { String ovfDescriptor = getRetriever().retrieveAsString(ovfUri); OvfCreateImportSpecParams params = new OvfCreateImportSpecParams(); params.setHostSystem(host);/* w ww .j ava 2s.com*/ params.setLocale("US"); params.setEntityName(vmName); if (deploymentConfig == null) { deploymentConfig = ""; } params.setDeploymentOption(deploymentConfig); params.getNetworkMapping().addAll(networks); params.setDiskProvisioning(OvfCreateImportSpecParamsDiskProvisioningType.THIN.name()); if (ovfProps != null) { params.getPropertyMapping().addAll(ovfProps); } ManagedObjectReference ovfManager = this.connection.getServiceContent().getOvfManager(); OvfCreateImportSpecResult importSpecResult = getVimPort().createImportSpec(ovfManager, ovfDescriptor, resourcePool, datastore, params); if (!importSpecResult.getError().isEmpty()) { return VimUtils.rethrow(importSpecResult.getError().get(0)); } long totalBytes = getImportSizeBytes(importSpecResult); ManagedObjectReference lease = getVimPort().importVApp(resourcePool, importSpecResult.getImportSpec(), vmFolder, host); LeaseProgressUpdater leaseUpdater = new LeaseProgressUpdater(this.connection, lease, totalBytes); GetMoRef get = new GetMoRef(this.connection); HttpNfcLeaseInfo httpNfcLeaseInfo; ScheduledExecutorService executorService = Executors.newScheduledThreadPool(1); try { leaseUpdater.awaitReady(); logger.info("Lease ready"); // start updating the lease leaseUpdater.start(executorService); httpNfcLeaseInfo = get.entityProp(lease, PROP_INFO); List<HttpNfcLeaseDeviceUrl> deviceUrls = httpNfcLeaseInfo.getDeviceUrl(); String ip = this.connection.getURI().getHost(); String basePath = extractParentPath(ovfUri); for (HttpNfcLeaseDeviceUrl deviceUrl : deviceUrls) { String deviceKey = deviceUrl.getImportKey(); for (OvfFileItem ovfFileItem : importSpecResult.getFileItem()) { if (deviceKey.equals(ovfFileItem.getDeviceId())) { logger.debug("Importing device id: {}", deviceKey); String sourceUri = basePath + ovfFileItem.getPath(); String uploadUri = makUploadUri(ip, deviceUrl); uploadVmdkFile(ovfFileItem, sourceUri, uploadUri, leaseUpdater, this.ovfRetriever.getClient()); logger.info("Completed uploading VMDK file {}", sourceUri); } } } // complete lease leaseUpdater.complete(); } catch (Exception e) { leaseUpdater.abort(VimUtils.convertExceptionToFault(e)); logger.info("Error importing ovf", e); throw e; } finally { executorService.shutdown(); } httpNfcLeaseInfo = get.entityProp(lease, PROP_INFO); ManagedObjectReference entity = httpNfcLeaseInfo.getEntity(); // as this is an OVF it makes sense to enable the OVF transport // only the guestInfo is enabled by default VmConfigSpec spec = new VmConfigSpec(); spec.getOvfEnvironmentTransport().add(TRANSPORT_GUESTINFO); VirtualMachineConfigSpec reconfig = new VirtualMachineConfigSpec(); reconfig.setVAppConfig(spec); ManagedObjectReference reconfigTask = getVimPort().reconfigVMTask(entity, reconfig); VimUtils.waitTaskEnd(this.connection, reconfigTask); return entity; }
From source file:org.j2free.config.ConfigurationListener.java
/** * * @param event/*w w w . ja v a 2s .c o m*/ */ public synchronized void contextInitialized(ServletContextEvent event) { context = event.getServletContext(); // Get the configuration file String configPathTemp = (String) context.getInitParameter(INIT_PARAM_CONFIG_PATH); // Use the default path if it wasn't specified if (StringUtils.isBlank(configPathTemp)) configPathTemp = DEFAULT_CONFIG_PATH; // Finalize the config path (needs to be final for inner-Runnable below) final String configPath = configPathTemp; context.setAttribute(CONTEXT_ATTR_CONFIG_PATH, configPath); try { // Load the configuration DefaultConfigurationBuilder configBuilder = new DefaultConfigurationBuilder(); configBuilder.setFileName(configPath); final CombinedConfiguration config = configBuilder.getConfiguration(true); // Save the config where we can get at it later context.setAttribute(CONTEXT_ATTR_CONFIG, config); Global.put(CONTEXT_ATTR_CONFIG, config); // Determine the localhost String localhost = config.getString(PROP_LOCALHOST, "ip"); if (localhost.equalsIgnoreCase("ip")) { try { localhost = InetAddress.getLocalHost().getHostAddress(); log.info("Using localhost: " + localhost); } catch (Exception e) { log.warn("Error determining localhost", e); localhost = "localhost"; } } context.setAttribute(CONTEXT_ATTR_LOCALHOST, localhost); Global.put(CONTEXT_ATTR_LOCALHOST, localhost); loadedConfigPropKeys.add(CONTEXT_ATTR_LOCALHOST); // Set application context attributes for all config properties String prop, value; Iterator itr = config.getKeys(); while (itr.hasNext()) { prop = (String) itr.next(); value = config.getString(prop); // Anything with the value "localhost" will be set to the IP if possible value = (value.equals("localhost") ? localhost : value); log.debug("Config: " + prop + " = " + value); context.setAttribute(prop, value); loadedConfigPropKeys.add(prop); } // Run Mode configuration String runMode = config.getString(PROP_RUNMODE); try { RUN_MODE = RunMode.valueOf(runMode); } catch (Exception e) { log.warn("Error setting runmode, invalid value: " + runMode); } context.setAttribute("devMode", RUN_MODE != RunMode.PRODUCTION); loadedConfigPropKeys.add("devMode"); // Fragment Cache Configuration if (config.getBoolean(FragmentCache.Properties.ENABLED, false)) { log.info("Enabling fragment caching..."); FragmentCacheTag.enable(); // This is expected to be in seconds long temp = config.getLong(FragmentCache.Properties.REQUEST_TIMEOUT, -1l); if (temp != -1) { log.info("Setting FragmentCacheTag request timeout: " + temp); FragmentCacheTag.setRequestTimeout(temp); } // The property is in seconds, but WARNING_COMPUTE_DURATION does NOT use a TimeUnit, so it's in ms temp = config.getLong(FragmentCache.Properties.WARNING_DURATION, -1l); if (temp != -1) { log.info("Setting FragmentCacheTag warning duration: " + temp); FragmentCacheTag.setWarningComputeDuration(temp * 1000); } // Get the fragment cache names String[] cacheNames = config.getStringArray(FragmentCache.Properties.ENGINE_NAMES); for (String cacheName : cacheNames) { String cacheClassName = config .getString(String.format(FragmentCache.Properties.ENGINE_CLASS_TEMPLATE, cacheName)); try { // Load up the class Class<? extends FragmentCache> cacheClass = (Class<? extends FragmentCache>) Class .forName(cacheClassName); // Look for a constructor that takes a config Constructor<? extends FragmentCache> constructor = null; try { constructor = cacheClass.getConstructor(Configuration.class); } catch (Exception e) { } FragmentCache cache; // If we found the configuration constructor, use it if (constructor != null) cache = constructor.newInstance(config); else { // otherwise use a default no-args constructor log.warn("Could not find a " + cacheClass.getSimpleName() + " constructor that takes a Configuration, defaulting to no-args constructor"); cache = cacheClass.newInstance(); } // register the cache with the FragmentCacheTag using the config strategy-name, or the engineName // if a strategy-name is not specified log.info("Registering FragmentCache strategy: [name=" + cacheName + ", class=" + cacheClass.getName() + "]"); FragmentCacheTag.registerStrategy(cacheName, cache); } catch (Exception e) { log.error("Error enabling FragmentCache engine: " + cacheName, e); } } } else { // Have to call this here, because reconfiguration could turn // the cache off after it was previously enabled. FragmentCacheTag.disable(); } // For Task execution ScheduledExecutorService taskExecutor; if (config.getBoolean(PROP_TASK_EXECUTOR_ON, false)) { int threads = config.getInt(PROP_TASK_EXECUTOR_THREADS, DEFAULT_TASK_EXECUTOR_THREADS); if (threads == 1) taskExecutor = Executors.newSingleThreadScheduledExecutor(); else taskExecutor = Executors.newScheduledThreadPool(threads); context.setAttribute(CONTEXT_ATTR_TASK_MANAGER, taskExecutor); loadedConfigPropKeys.add(CONTEXT_ATTR_TASK_MANAGER); Global.put(CONTEXT_ATTR_TASK_MANAGER, taskExecutor); } else { // Not allowed to shutdown the taskExecutor if dynamic reconfig is enabled if (reconfigTask == null) { // Shutdown and remove references to the taskManager previously created taskExecutor = (ScheduledExecutorService) Global.get(CONTEXT_ATTR_TASK_MANAGER); if (taskExecutor != null) { taskExecutor.shutdown(); // will block until all tasks complete taskExecutor = null; Global.remove(CONTEXT_ATTR_TASK_MANAGER); } } else { // We could just log a warning that you can't do this, but the user // might not see that, so we're going to refuse to reset a configuration // that cannot be loaded in whole successfully. throw new ConfigurationException( "Cannot disable task execution service, dynamic reconfiguration is enabled!"); } } // Email Service if (config.getBoolean(PROP_MAIL_SERVICE_ON, false)) { if (!SimpleEmailService.isEnabled()) { // Get the SMTP properties Properties props = System.getProperties(); props.put(PROP_SMTP_HOST, config.getString(PROP_SMTP_HOST)); props.put(PROP_SMTP_PORT, config.getString(PROP_SMTP_PORT)); props.put(PROP_SMTP_AUTH, config.getString(PROP_SMTP_AUTH)); Session session; if (config.getBoolean(PROP_SMTP_AUTH)) { final String user = config.getString(PROP_SMTP_USER); final String pass = config.getString(PROP_SMTP_PASS); Authenticator auth = new Authenticator() { @Override public PasswordAuthentication getPasswordAuthentication() { return new PasswordAuthentication(user, pass); } }; session = Session.getInstance(props, auth); } else { session = Session.getInstance(props); } // Get the global headers Iterator headerNames = config.getKeys(PROP_MAIL_HEADER_PREFIX); List<KeyValuePair<String, String>> headers = new LinkedList<KeyValuePair<String, String>>(); String headerName; while (headerNames.hasNext()) { headerName = (String) headerNames.next(); headers.add(new KeyValuePair<String, String>(headerName, config.getString(headerName))); } // Initialize the service SimpleEmailService.init(session); SimpleEmailService.setGlobalHeaders(headers); // Set whether we actually send the e-mails SimpleEmailService.setDummyMode(config.getBoolean(PROP_MAIL_DUMMY_MODE, false)); // Set the failure policy String policy = config.getString(PROP_MAIL_ERROR_POLICY); if (policy != null) { if (policy.equals(VALUE_MAIL_POLICY_DISCARD)) { SimpleEmailService.setErrorPolicy(new SimpleEmailService.DiscardPolicy()); } else if (policy.equals(VALUE_MAIL_POLICY_REQUEUE)) { Priority priority = null; try { priority = Priority.valueOf(config.getString(PROP_MAIL_REQUEUE_PRIORITY)); } catch (Exception e) { log.warn("Error reading requeue policy priority: " + config.getString(PROP_MAIL_REQUEUE_PRIORITY, "") + ", using default"); } if (priority == null) SimpleEmailService.setErrorPolicy(new SimpleEmailService.RequeuePolicy()); else SimpleEmailService.setErrorPolicy(new SimpleEmailService.RequeuePolicy(priority)); } } // Parse templates String emailTemplateDir = config.getString(PROP_MAIL_TEMPLATE_DIR); // If the template if (StringUtils.isBlank(emailTemplateDir)) emailTemplateDir = DEFAULT_EMAIL_TEMPLATE_DIR; log.debug("Looking for e-mail templates in: " + emailTemplateDir); Set<String> templates = context.getResourcePaths(emailTemplateDir); // E-mail templates if (templates != null && !templates.isEmpty()) { log.debug("Found " + templates.size() + " templates"); String key; String defaultTemplate = config.getString(PROP_MAIL_DEFAULT_TEMPLATE, EMPTY); InputStream in; StringBuilder builder; Scanner scanner; try { Template template; String[] parts; ContentType contentType; for (String path : templates) { path = path.trim(); parts = path.split("\\."); contentType = ContentType.valueOfExt(parts[1]); try { in = context.getResourceAsStream(path.trim()); if (in != null && in.available() > 0) { scanner = new Scanner(in); builder = new StringBuilder(); while (scanner.hasNextLine()) { builder.append(scanner.nextLine()); if (contentType == ContentType.PLAIN) { builder.append("\n"); } } template = new Template(builder.toString(), contentType); key = parts[0].replace(emailTemplateDir, EMPTY); SimpleEmailService.registerTemplate(key, template, key.equals(defaultTemplate)); } } catch (IOException ioe) { log.error("Error loading e-mail template: " + path, ioe); } } } catch (Exception e) { log.error("Error loading e-mail templates", e); } } else log.debug("No e-mail templates found."); } } else if (SimpleEmailService.isEnabled()) { boolean shutdown = false; try { shutdown = SimpleEmailService.shutdown(30, TimeUnit.SECONDS); } catch (InterruptedException ie) { log.warn("Interrupted while shutting down SimpleEmailService"); } if (!shutdown) SimpleEmailService.shutdown(); } // QueuedHttpCallService if (config.getBoolean(PROP_HTTP_SRVC_ON, false)) { if (!SimpleHttpService.isEnabled()) // Don't double init... { int defaultThreadCount = Runtime.getRuntime().availableProcessors() + 1; // threads to use if unspecified SimpleHttpService.init(config.getInt(PROP_HTTP_SRVC_CORE_POOL, defaultThreadCount), config.getInt(PROP_HTTP_SRVC_MAX_POOL, defaultThreadCount), config.getLong(PROP_HTTP_SRVC_POOL_IDLE, DEFAULT_HTTP_SRVC_THREAD_IDLE), config.getInt(PROP_HTTP_SRVC_CONNECT_TOUT, DEFAULT_HTTP_SRVC_CONNECT_TOUT), config.getInt(PROP_HTTP_SRVE_SOCKET_TOUT, DEFAULT_HTTP_SRVE_SOCKET_TOUT)); } } else if (SimpleHttpService.isEnabled()) { boolean shutdown = false; try { // Try to shutdown the service while letting currently waiting tasks complete shutdown = SimpleHttpService.shutdown(30, TimeUnit.SECONDS); } catch (InterruptedException ie) { log.warn("Interrupted while waiting for SimpleHttpService to shutdown"); } if (!shutdown) { // But if that doesn't finish in 60 seconds, just cut it off int count = SimpleHttpService.shutdown().size(); log.warn("SimpleHttpService failed to shutdown in 60 seconds, so it was terminated with " + count + " tasks waiting"); } } // Spymemcached Client if (config.getBoolean(PROP_SPYMEMCACHED_ON, false)) { String addresses = config.getString(PROP_SPYMEMCACHED_ADDRESSES); if (addresses == null) { log.error("Error configuring spymemcached; enabled but no addresses!"); } else { try { // Reflect our way to the constructor, this is all so that the // spymemcached jar does not need to be included in a J2Free app // unless it is actually to be used. Class klass = Class.forName("net.spy.memcached.MemcachedClient"); Constructor constructor = klass.getConstructor(List.class); klass = Class.forName("net.spy.memcached.AddrUtil"); Method method = klass.getMethod("getAddresses", String.class); Object client = constructor.newInstance(method.invoke(null, addresses)); context.setAttribute(CONTEXT_ATTR_SPYMEMCACHED, client); loadedConfigPropKeys.add(CONTEXT_ATTR_SPYMEMCACHED); Global.put(CONTEXT_ATTR_SPYMEMCACHED, client); log.info("Spymemcached client created, connected to " + addresses); } catch (Exception e) { log.error("Error creating memcached client [addresses=" + addresses + "]", e); } } } else { // If a spymemcached client was previous created Object client = Global.get(CONTEXT_ATTR_SPYMEMCACHED); if (client != null) { try { // Reflect our way to the shutdown method Class klass = Class.forName("net.spy.memcached.MemcachedClient"); Method method = klass.getMethod("shutdown"); method.invoke(null, client); // and shut it down log.info("Spymemcached client shutdown"); } catch (Exception e) { log.error("Error shutting down spymemcached client", e); } // Then remove any references Global.remove(CONTEXT_ATTR_SPYMEMCACHED); client = null; } } } catch (ConfigurationException ce) { log.error("Error configuring app", ce); } }
From source file:net.sf.mpaxs.spi.computeHost.Host.java
/** * Meldet diesen Host beim Masterserver an. Nach erfolgreicher Anmeldung * kann der Masterserver Jobs an diesen Host vergeben. *//*from w w w. j a va2 s. co m*/ private void connectToMasterServer() { final ScheduledExecutorService ses = Executors.newSingleThreadScheduledExecutor(); final long startUpAt = System.currentTimeMillis(); final long failAfter = 5000; ses.scheduleAtFixedRate(new Runnable() { @Override public void run() { Logger.getLogger(Host.class.getName()).log(Level.FINE, "Trying to connect to MasterServer from IP " + settings.getLocalIp()); long currentTime = System.currentTimeMillis(); if (currentTime - startUpAt >= failAfter) { Logger.getLogger(Host.class.getName()).log(Level.WARNING, "Waited {0} seconds for MasterServer, shutting down ComputeHost", (failAfter / 1000)); System.exit(1); } try { Logger.getLogger(Host.class.getName()).log(Level.FINE, "Trying to bind to MasterServer at " + settings.getMasterServerIP() + ":" + settings.getMasterServerPort() + " with name: " + settings.getMasterServerName()); IRemoteServer remRef = (IRemoteServer) Naming.lookup("//" + settings.getMasterServerIP() + ":" + settings.getMasterServerPort() + "/" + settings.getMasterServerName()); settings.setRemoteReference(remRef); UUID hostID = remRef.addHost(authToken, settings.getName(), settings.getLocalIp(), settings.getCores()); settings.setHostID(hostID); Logger.getLogger(Host.class.getName()).log(Level.FINE, "Connection to server established!"); ses.shutdown(); try { ses.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException ex) { Logger.getLogger(Host.class.getName()).log(Level.SEVERE, null, ex); } } catch (NotBoundException ex) { Logger.getLogger(Host.class.getName()).log(Level.INFO, "Master server not found, waiting for connection!"); //Logger.getLogger(Host.class.getName()).log(Level.SEVERE, null, ex); } catch (MalformedURLException ex) { Logger.getLogger(Host.class.getName()).log(Level.SEVERE, null, ex); System.exit(1); } catch (RemoteException ex) { Logger.getLogger(Host.class.getName()).log(Level.SEVERE, null, ex); System.exit(1); } } }, 1, settings.getMasterServerTimeout(), TimeUnit.SECONDS); }