List of usage examples for java.util.concurrent ExecutorService execute
void execute(Runnable command);
From source file:com.google.api.ads.adwords.awreporting.processors.onmemory.ReportProcessorOnMemory.java
/** * Downloads all the files from the API and process all the rows, saving the data to the * configured data base./*from w ww.j a v a 2 s .c om*/ * * @param sessionBuilder the session builder. * @param reportType the report type. * @param dateRangeType the date range type. * @param dateStart the start date. * @param dateEnd the ending date. * @param acountIdList the account IDs. * @param properties the properties resource. */ private <R extends Report> void downloadAndProcess(String mccAccountId, AdWordsSessionBuilderSynchronizer sessionBuilder, ReportDefinitionReportType reportType, ReportDefinitionDateRangeType dateRangeType, String dateStart, String dateEnd, Set<Long> acountIdList, String reportDefinitionKey, Properties properties) { // Download Reports to local files and Generate Report objects LOGGER.info("\n\n ** Generating: " + reportType.name() + " **"); LOGGER.info(" Processing reports..."); ReportDefinition reportDefinition = getReportDefinition(reportType, dateRangeType, dateStart, dateEnd, reportDefinitionKey, properties); @SuppressWarnings("unchecked") Class<R> reportBeanClass = (Class<R>) this.csvReportEntitiesMapping.getReportBeanClass(reportType); final CountDownLatch latch = new CountDownLatch(acountIdList.size()); ExecutorService executorService = Executors.newFixedThreadPool(numberOfReportProcessors); Stopwatch stopwatch = Stopwatch.createStarted(); for (Long accountId : acountIdList) { LOGGER.trace("."); try { LOGGER.debug("Parsing account: " + accountId); // We create a copy of the AdWordsSession specific for the Account AdWordsSession adWordsSession = sessionBuilder.getAdWordsSessionCopy(accountId); // We need to create a csvToBean and mappingStrategy for each thread ModifiedCsvToBean<R> csvToBean = new ModifiedCsvToBean<R>(); MappingStrategy<R> mappingStrategy = new AnnotationBasedMappingStrategy<R>(reportBeanClass); RunnableProcessorOnMemory<R> runnableProcesor = getRunnableProcessorOnMemory( new RunnableProcessorOnMemory<R>(accountId, adWordsSession, reportDefinition, csvToBean, mappingStrategy, dateRangeType, dateStart, dateEnd, mccAccountId, persister, reportRowsSetSize)); runnableProcesor.setLatch(latch); executorService.execute(runnableProcesor); } catch (Exception e) { System.err.println("Ignoring account (Error when processing): " + accountId + " " + e.getMessage()); e.printStackTrace(); } } try { latch.await(); } catch (InterruptedException e) { LOGGER.error(e.getMessage()); e.printStackTrace(); } executorService.shutdown(); stopwatch.stop(); LOGGER.info("*** Finished processing all reports in " + (stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000) + " seconds ***\n"); }
From source file:com.xn.interfacetest.service.impl.TestCaseServiceImpl.java
@Transactional(propagation = Propagation.NOT_SUPPORTED) private void excute(final List<TestCaseDto> testCaseDtoList, final TestEnvironmentDto testEnvironmentDto, final Long planId, final TestReportDto testReportDto, final TestSuitDto suitDto) { logger.info("=================="); //// ww w .j ava 2 s .co m ExecutorService threadPool = Executors.newFixedThreadPool(10); ; //?? for (int i = 0; i < testCaseDtoList.size(); i++) { final int finalI = i; threadPool.execute(new Runnable() { @Override public void run() { try { logger.info("========" + finalI); excuteCase(testCaseDtoList.get(finalI), testEnvironmentDto, planId, testReportDto, suitDto); } catch (Exception e) { logger.error("", e); } } }); } try { logger.info("sleep-----" + 1000); Thread.sleep(1000); } catch (InterruptedException e) { logger.info("InterruptedException-----" + e.getMessage()); } threadPool.shutdown(); while (true) { if (threadPool.isTerminated()) { break; } } }
From source file:com.linkedin.pinot.integration.tests.BaseClusterIntegrationTest.java
public void setupH2AndInsertAvro(final List<File> avroFiles, ExecutorService executor) throws ClassNotFoundException, SQLException { Class.forName("org.h2.Driver"); _connection = DriverManager.getConnection("jdbc:h2:mem:"); executor.execute(new Runnable() { @Override/* w w w. j a va2 s . c o m*/ public void run() { createH2SchemaAndInsertAvroFiles(avroFiles, _connection); } }); }
From source file:cp.server.app.ClientMultiThreadedExecution.java
public static void fetch() throws Exception { // Create an HttpClient with the ThreadSafeClientConnManager. // This connection manager must be used if more than one thread will // be using the HttpClient. // PoolingHttpClientConnectionManager cm = new // PoolingHttpClientConnectionManager(); // cm.setMaxTotal(100); // CloseableHttpClient httpclient = HttpClients.custom() // .setConnectionManager(cm).build(); ExecutorService pool = Executors.newFixedThreadPool(10); ServerDAO dao = new ServerDAO(); List<Page> pages = null; Time ts = new Time(System.currentTimeMillis()); int interval; try {//from w w w .j a v a 2 s . c o m // // before 10am, query with the comment yesterday // if (Integer.valueOf(ts.toString().substring(0, 2)) > 10) // { // interval = 1; // } // else // { // interval = 2; // } // // pages = dao.queryPagesByDayInterval( // ConnectionFactory.getConnection(), interval); // // System.out.println("load comments from " + pages.size() + // "pages."); // for (Page page : pages) // { // PAGESTACK.push(page.getUrl()); // } } catch (Exception ex) { ex.printStackTrace(); } try { // create an array of URIs to perform GETs on String[] urisToGet = { "http://sports.sina.com.cn", "http://news.sina.com.cn", "http://ent.sina.com.cn", "http://tech.sina.com.cn", "http://sports.sina.com.cn/o/2013-10-27/04016852444.shtml", "http://finance.sina.com.cn/china/20131027/043917125695.shtml", "http://sports.sina.com.cn/j/2013-10-27/06336852561.shtml", "http://sports.sina.com.cn/j/2013-10-26/21006851844.shtml" }; for (int i = 0; i < 10000; i++) { for (int j = 0; j < urisToGet.length; j++) { PAGESTACK.push(urisToGet[j]); } } CountDownLatch cdl = new CountDownLatch(6); // create a thread for each URI GetThread[] threads = new GetThread[urisToGet.length]; for (int i = 0; i < 4; i++) { // HttpGet httpget = new HttpGet(urisToGet[i]); threads[i] = new GetThread(urisToGet[i], i + 1, cdl); } // start the threads for (int j = 0; j < 4; j++) { pool.execute(threads[j]); // threads[j].start(); } cdl.await(); } finally { // httpclient.close(); pool.shutdown(); } }
From source file:org.apache.geode.distributed.internal.InternalLocator.java
private void startSharedConfigurationService(InternalCache internalCache) { installSharedConfigHandler();//w w w . j ava 2s.c om if (this.config.getEnableClusterConfiguration() && !this.isSharedConfigurationStarted) { if (!isDedicatedLocator()) { logger.info("Cluster configuration service not enabled as it is only supported " + "in dedicated locators"); return; } ExecutorService es = internalCache.getDistributionManager().getThreadPool(); es.execute(new SharedConfigurationRunnable()); } else { logger.info("Cluster configuration service is disabled"); } }
From source file:org.apache.solr.update.processor.DistributedUpdateProcessor.java
private void doFinish() { // TODO: if not a forward and replication req is not specified, we could // send in a background thread cmdDistrib.finish();//from www .jav a 2 s.co m List<Error> errors = cmdDistrib.getErrors(); // TODO - we may need to tell about more than one error... // if its a forward, any fail is a problem - // otherwise we assume things are fine if we got it locally // until we start allowing min replication param if (errors.size() > 0) { // if one node is a RetryNode, this was a forward request if (errors.get(0).req.node instanceof RetryNode) { rsp.setException(errors.get(0).e); } // else // for now we don't error - we assume if it was added locally, we // succeeded } // if it is not a forward request, for each fail, try to tell them to // recover - the doc was already added locally, so it should have been // legit // TODO: we should do this in the background it would seem for (final SolrCmdDistributor.Error error : errors) { if (error.req.node instanceof RetryNode) { // we don't try to force a leader to recover // when we cannot forward to it continue; } // TODO: we should force their state to recovering ?? // TODO: do retries?? // TODO: what if its is already recovering? Right now recoveries queue up - // should they? final String recoveryUrl = error.req.node.getBaseUrl(); Thread thread = new Thread() { { setDaemon(true); } @Override public void run() { log.info("try and ask " + recoveryUrl + " to recover"); HttpSolrServer server = new HttpSolrServer(recoveryUrl); try { server.setSoTimeout(60000); server.setConnectionTimeout(15000); RequestRecovery recoverRequestCmd = new RequestRecovery(); recoverRequestCmd.setAction(CoreAdminAction.REQUESTRECOVERY); recoverRequestCmd.setCoreName(error.req.node.getCoreName()); try { server.request(recoverRequestCmd); } catch (Throwable t) { SolrException.log(log, recoveryUrl + ": Could not tell a replica to recover", t); } } finally { server.shutdown(); } } }; ExecutorService executor = req.getCore().getCoreDescriptor().getCoreContainer().getUpdateExecutor(); executor.execute(thread); } }
From source file:org.apache.geode.distributed.internal.InternalLocator.java
private void restartWithDS(InternalDistributedSystem newSystem, InternalCache newCache) throws IOException { synchronized (locatorLock) { if (locator != this && hasLocator()) { throw new IllegalStateException( "A locator can not be created because one already exists in this JVM."); }/*from w w w . j av a2 s . co m*/ this.myDs = newSystem; this.myCache = newCache; this.myDs.setDependentLocator(this); logger.info("Locator restart: initializing TcpServer"); if (isSharedConfigurationEnabled()) { this.sharedConfig = new ClusterConfigurationService(newCache); } this.server.restarting(newSystem, newCache, this.sharedConfig); if (this.productUseLog.isClosed()) { this.productUseLog.reopen(); } this.productUseLog.monitorUse(newSystem); this.isSharedConfigurationStarted = true; if (isSharedConfigurationEnabled()) { ExecutorService es = newCache.getDistributionManager().getThreadPool(); es.execute(new SharedConfigurationRunnable()); } if (!this.server.isAlive()) { logger.info("Locator restart: starting TcpServer"); startTcpServer(); } logger.info("Locator restart: initializing JMX manager"); startJmxManagerLocationService(newCache); endStartLocator(this.myDs); logger.info("Locator restart completed"); } }
From source file:org.wso2.carbon.event.input.adapter.file.FileEventAdapter.java
public void processFiles() { try {/*from w w w . j ava2s. com*/ // collect file in the source directory File folder = new File(this.sourcePath); File[] listOfFiles = folder.listFiles(); //String patternString = ".*\\.csv$"; for (int i = 0; i < listOfFiles.length; i++) { boolean isMatch = Pattern.matches(filenameRegex, listOfFiles[i].getName()); if (isMatch) { BufferedReader in = null; ExecutorService executor = null; try { // initialize thread pool executor = Executors.newFixedThreadPool(this.threads); // loading file in = new BufferedReader(new FileReader(listOfFiles[i].toPath().toString())); String line = null; // skip lines int lineSkipped = 0; while (lineSkipped < this.skipLine && (line = in.readLine()) != null) { lineSkipped = lineSkipped + 1; } // process line by line int lineCount = 0; String jsonArray = ""; line = null; while ((line = in.readLine()) != null) { lineCount = lineCount + 1; jsonArray = jsonArray + formatLineToWSO2JSONEvent(line) + ","; if (lineCount % this.batchSize == 0) { executor.execute(new eventProcessorThread(this.eventAdapterListener, this.tenantId, "[" + jsonArray + "]")); jsonArray = ""; } } executor.execute(new eventProcessorThread(this.eventAdapterListener, this.tenantId, "[" + jsonArray + "]")); executor.shutdown(); // wait until all threads completes while (!executor.isTerminated()) { } } catch (Exception e) { e.printStackTrace(); } finally { // release resources executor = null; in.close(); in = null; //System.gc(); // move current file to archive location Files.move(listOfFiles[i].toPath(), new File(this.arcPath + "/" + listOfFiles[i].getName()).toPath(), REPLACE_EXISTING); } } } } catch (Exception ex) { ex.printStackTrace(); } }
From source file:org.xdi.oxd.server.SocketService.java
public void listenSocket() { final Injector injector = Guice.createInjector(new GuiceModule()); final Configuration c = Configuration.getInstance(); final int port = c.getPort(); final LicenseService licenseService = new LicenseService(c); final ExecutorService executorService = Executors.newFixedThreadPool(licenseService.getThreadsCount(), CoreUtils.daemonThreadFactory()); try {/*from w w w . ja v a 2 s .c o m*/ final Boolean localhostOnly = c.getLocalhostOnly(); if (localhostOnly == null || localhostOnly) { final InetAddress address = InetAddress.getByName("127.0.0.1"); m_serverSocket = new ServerSocket(port, 50, address); } else { m_serverSocket = new ServerSocket(port, 50); } m_serverSocket.setSoTimeout(c.getTimeOutInSeconds() * 1000); LOG.info( "Server socket is bound to port: {}, with timeout: {} seconds. Start listening for notifications.", port, c.getTimeOutInSeconds()); while (!m_shutdown) { try { if (licenseService.isLicenseChanged()) { licenseService.reset(); LOG.info("License was changed. Restart oxd server to enforce new license!"); shutdownNow(); m_shutdown = false; LOG.info("Starting..."); listenSocket(); } final Socket clientSocket = m_serverSocket.accept(); LOG.debug("Start new SocketProcessor..."); executorService.execute(new SocketProcessor(clientSocket, injector)); } catch (IOException e) { LOG.error("Accept failed, port: {}", port); throw e; //System.exit(-1); } } } catch (IOException e) { LOG.error("Could not listen on port: {}.", port); } finally { IOUtils.closeQuietly(m_serverSocket); } }
From source file:com.aerospike.benchmarks.Main.java
private void doRWTest(AerospikeClient client) throws Exception { ExecutorService es = Executors.newFixedThreadPool(this.nThreads); RWTask[] tasks = new RWTask[this.nThreads]; for (int i = 0; i < this.nThreads; i++) { RWTask rt;//from www . ja v a 2 s . c o m if (args.validate) { int tstart = this.startKey + ((int) (this.nKeys * (((float) i) / this.nThreads))); int tkeys = (int) (this.nKeys * (((float) (i + 1)) / this.nThreads)) - (int) (this.nKeys * (((float) i) / this.nThreads)); rt = new RWTaskSync(client, args, counters, tstart, tkeys); } else { rt = new RWTaskSync(client, args, counters, this.startKey, this.nKeys); } tasks[i] = rt; es.execute(rt); } collectRWStats(tasks, null); es.shutdown(); }