List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor
public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler)
From source file:org.codice.ddf.catalog.migratable.impl.MigrationTaskManager.java
private ExecutorService createExecutorService() { return new ThreadPoolExecutor(0, catalogConfig.getExportThreadCount(), 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(catalogConfig.getExportThreadCount()), new ThreadPoolExecutor.CallerRunsPolicy()); }
From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java
/** * Perform a bulk load of the given directory into the given pre-existing table. This method is * not threadsafe./*www .jav a 2 s . com*/ * @param hfofDir the directory that was provided as the output path of a job using * HFileOutputFormat * @param table the table to load into * @throws TableNotFoundException if table does not yet exist */ public void doBulkLoad(Path hfofDir, final HTable table) throws TableNotFoundException, IOException { final HConnection conn = table.getConnection(); if (!conn.isTableAvailable(table.getTableName())) { throw new TableNotFoundException( "Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available."); } // initialize thread pools int nrThreads = cfg.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors()); ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); builder.setNameFormat("LoadIncrementalHFiles-%1$d"); ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), builder.build()); ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); // LQI queue does not need to be threadsafe -- all operations on this queue // happen in this thread Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>(); try { discoverLoadQueue(queue, hfofDir); int count = 0; if (queue.isEmpty()) { LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri() + ". Does it contain files in " + "subdirectories that correspond to column family names?"); return; } if (queue.isEmpty()) { LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri() + ". Does it contain files in " + "subdirectories that correspond to column family names?"); } // Assumes that region splits can happen while this occurs. while (!queue.isEmpty()) { // need to reload split keys each iteration. final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys(); if (count != 0) { LOG.info("Split occured while grouping HFiles, retry attempt " + +count + " with " + queue.size() + " files remaining to group or split"); } int maxRetries = cfg.getInt("hbase.bulkload.retries.number", 0); if (maxRetries != 0 && count >= maxRetries) { LOG.error("Retry attempted " + count + " times without completing, bailing out"); return; } count++; // Using ByteBuffer for byte[] equality semantics Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table, pool, queue, startEndKeys); bulkLoadPhase(table, conn, pool, queue, regionGroups); // NOTE: The next iteration's split / group could happen in parallel to // atomic bulkloads assuming that there are splits and no merges, and // that we can atomically pull out the groups we want to retry. } } finally { pool.shutdown(); if (queue != null && !queue.isEmpty()) { StringBuilder err = new StringBuilder(); err.append("-------------------------------------------------\n"); err.append("Bulk load aborted with some files not yet loaded:\n"); err.append("-------------------------------------------------\n"); for (LoadQueueItem q : queue) { err.append(" ").append(q.hfilePath).append('\n'); } LOG.error(err); } } }
From source file:com.cloud.agent.Agent.java
public Agent(final IAgentShell shell, final int localAgentId, final ServerResource resource) throws ConfigurationException { _shell = shell;//from ww w .jav a 2s. c om _resource = resource; _link = null; resource.setAgentControl(this); final String value = _shell.getPersistentProperty(getResourceName(), "id"); _id = value != null ? Long.parseLong(value) : null; s_logger.info("id is " + (_id != null ? _id : "")); final Map<String, Object> params = PropertiesUtil.toMap(_shell.getProperties()); // merge with properties from command line to let resource access command line parameters for (final Map.Entry<String, Object> cmdLineProp : _shell.getCmdLineProperties().entrySet()) { params.put(cmdLineProp.getKey(), cmdLineProp.getValue()); } if (!_resource.configure(getResourceName(), params)) { throw new ConfigurationException("Unable to configure " + _resource.getName()); } final String host = _shell.getHost(); _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); // ((NioClient)_connection).setBindAddress(_shell.getPrivateIp()); s_logger.debug("Adding shutdown hook"); Runtime.getRuntime().addShutdownHook(new ShutdownThread(this)); _ugentTaskPool = new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue<Runnable>(), new NamedThreadFactory("UgentTask")); _executor = new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("agentRequest-Handler")); s_logger.info("Agent [id = " + (_id != null ? _id : "new") + " : type = " + getResourceName() + " : zone = " + _shell.getZone() + " : pod = " + _shell.getPod() + " : workers = " + _shell.getWorkers() + " : host = " + host + " : port = " + _shell.getPort()); }
From source file:dk.dma.ais.lib.FileConvert.java
/** {@inheritDoc} */ @Override//from www . ja v a 2 s . co m protected void run(Injector injector) throws Exception { configureFileEnding(); final EConsumer<String> consumer = new EConsumer<String>() { @Override public void accept(String s) throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException, IOException, InterruptedException { Path path = Paths.get(s); LOG.debug("Started processing file " + path); Path endPath; if (keepFileStructure) { Path relative; relative = path; endPath = Paths.get(Paths.get(convertTo).toString(), relative.toString()); new File(endPath.toString()).mkdirs(); } else { endPath = Paths.get(""); } String filename = path.getFileName().toString(); if (!filename.endsWith(fileEnding)) filename = FilenameUtils.removeExtension(filename) + fileEnding; Path filePath = Paths.get(endPath.toString(), filename); LOG.debug("Output File: " + filePath.toString()); final OutputStream fos = new FileOutputStream(filePath.toString()); // 2 final boolean createSituationFolder = !StringUtils.isBlank(kmzSnapshotAt); final long snapshotAtEpochMillis = createSituationFolder ? LocalDateTime.parse(kmzSnapshotAt, formatter).toInstant(ZoneOffset.UTC).toEpochMilli() : -1; OutputStreamSink<AisPacket> sink; if ("kmz".equals(outputSinkFormat)) { //AisPacketKMZOutputSink(filter, createSituationFolder, createMovementsFolder, createTracksFolder, isPrimaryTarget, isSecondaryTarget, triggerSnapshot, snapshotDescriptionSupplier, movementInterpolationStep, supplyTitle, supplyDescription, iconHrefSupplier); sink = AisPacketOutputSinks.newKmzSink(e -> true, // this.filter = e -> true; createSituationFolder, // this.createSituationFolder = true; true, // createMovementsFolder = true; true, // this.createTracksFolder = true; e -> kmzPrimaryMmsi <= 0 ? false : e.tryGetAisMessage().getUserId() == kmzPrimaryMmsi, // this.isPrimaryTarget = e -> false; e -> kmzSecondaryMmsi <= 0 ? false : e.tryGetAisMessage().getUserId() == kmzSecondaryMmsi, // this.isSecondaryTarget = e -> false; e -> e.getBestTimestamp() >= snapshotAtEpochMillis, // this.triggerSnapshot = e -> false; () -> "Situation at " + kmzSnapshotAt, // this.snapshotDescriptionSupplier = null; () -> 10, // this.title = defaultTitleSupplier; () -> "description", // this.description = defaultDescriptionSupplier; () -> "10", //this.movementInterpolationStep = defaultMovementInterpolationStepSupplier; (shipTypeCargo, navigationalStatus) -> "" // this.iconHrefSupplier = defaultIconHrefSupplier; ); } else sink = AisPacketOutputSinks.getOutputSink(outputSinkFormat, columns); sink.closeWhenFooterWritten(); AisPacketReader apis = AisPacketReader.createFromFile(path, false); apis.writeTo(fos, sink); apis.close(); fos.close(); } }; /* * Creates a pool of executors, 4 threads. Each thread will open a file using an aispacket reader, 10000 files can be * submitted to the queue, afterwards the calling thread will execute the job instead. */ ThreadPoolExecutor threadpoolexecutor = new ThreadPoolExecutor(4, 4, 1, TimeUnit.SECONDS, new ArrayBlockingQueue<>(10000), new ThreadPoolExecutor.CallerRunsPolicy()); for (final String s : sources) { threadpoolexecutor.execute(() -> { try { consumer.accept(s); } catch (Exception e) { e.printStackTrace(); } }); } threadpoolexecutor.shutdown(); threadpoolexecutor.awaitTermination(999, TimeUnit.DAYS); }
From source file:com.uroad.net.AsyncHttpClient.java
public AsyncHttpClient() { BasicHttpParams httpParams = new BasicHttpParams(); ConnManagerParams.setTimeout(httpParams, socketTimeout); ConnManagerParams.setMaxConnectionsPerRoute(httpParams, new ConnPerRouteBean(maxConnections)); ConnManagerParams.setMaxTotalConnections(httpParams, DEFAULT_MAX_CONNECTIONS); HttpConnectionParams.setSoTimeout(httpParams, socketTimeout); HttpConnectionParams.setConnectionTimeout(httpParams, socketTimeout); HttpConnectionParams.setTcpNoDelay(httpParams, true); HttpConnectionParams.setSocketBufferSize(httpParams, DEFAULT_SOCKET_BUFFER_SIZE); HttpProtocolParams.setVersion(httpParams, HttpVersion.HTTP_1_1); HttpProtocolParams.setUserAgent(httpParams, String.format("uroad-android-httpclient/%s (http://www.u-road.com/)", VERSION)); //Scheme??"http""https"??? //??socket??java.net.SocketSchemeRegistry?Schemes? SchemeRegistry schemeRegistry = new SchemeRegistry(); schemeRegistry.register(new Scheme("http", PlainSocketFactory.getSocketFactory(), 80)); schemeRegistry.register(new Scheme("https", SSLSocketFactory.getSocketFactory(), 443)); ThreadSafeClientConnManager cm = new ThreadSafeClientConnManager(httpParams, schemeRegistry); httpContext = new SyncBasicHttpContext(new BasicHttpContext()); httpClient = new DefaultHttpClient(cm, httpParams); //HTTPClient?? //?//from ww w. jav a2 s. c o m httpClient.addRequestInterceptor(new HttpRequestInterceptor() { @Override public void process(HttpRequest request, HttpContext context) { if (!request.containsHeader(HEADER_ACCEPT_ENCODING)) { request.addHeader(HEADER_ACCEPT_ENCODING, ENCODING_GZIP); } for (String header : clientHeaderMap.keySet()) { request.addHeader(header, clientHeaderMap.get(header)); } } }); //?? httpClient.addResponseInterceptor(new HttpResponseInterceptor() { @Override public void process(HttpResponse response, HttpContext context) { final HttpEntity entity = response.getEntity(); if (entity == null) { return; } final Header encoding = entity.getContentEncoding(); if (encoding != null) { for (HeaderElement element : encoding.getElements()) { if (element.getName().equalsIgnoreCase(ENCODING_GZIP)) { response.setEntity(new InflatingEntity(response.getEntity())); break; } } } } }); httpClient.setHttpRequestRetryHandler(new RetryHandler(DEFAULT_MAX_RETRIES)); threadPool = new ThreadPoolExecutor(DEFAULT_CORE_POOL_SIZE, DEFAULT_MAXIMUM_POOL_SIZE, DEFAULT_KEEP_ALIVETIME, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(3), new ThreadPoolExecutor.CallerRunsPolicy()); requestMap = new WeakHashMap<Context, List<WeakReference<Future<?>>>>(); clientHeaderMap = new HashMap<String, String>(); }
From source file:org.apache.hadoop.hive.llap.daemon.impl.TaskExecutorService.java
public TaskExecutorService(int numExecutors, int waitQueueSize, String waitQueueComparatorClassName, boolean enablePreemption, ClassLoader classLoader, final LlapDaemonExecutorMetrics metrics, Clock clock) {// ww w . jav a 2 s . co m super(TaskExecutorService.class.getSimpleName()); LOG.info("TaskExecutorService is being setup with parameters: " + "numExecutors=" + numExecutors + ", waitQueueSize=" + waitQueueSize + ", waitQueueComparatorClassName=" + waitQueueComparatorClassName + ", enablePreemption=" + enablePreemption); final Comparator<TaskWrapper> waitQueueComparator = createComparator(waitQueueComparatorClassName); this.maxParallelExecutors = numExecutors; this.waitQueue = new EvictingPriorityBlockingQueue<>(waitQueueComparator, waitQueueSize); this.clock = clock == null ? new MonotonicClock() : clock; this.threadPoolExecutor = new ThreadPoolExecutor(numExecutors, // core pool size numExecutors, // max pool size 1, TimeUnit.MINUTES, new SynchronousQueue<Runnable>(), // direct hand-off new ExecutorThreadFactory(classLoader)); this.executorService = MoreExecutors.listeningDecorator(threadPoolExecutor); this.preemptionQueue = new PriorityBlockingQueue<>(numExecutors, new PreemptionQueueComparator()); this.enablePreemption = enablePreemption; this.numSlotsAvailable = new AtomicInteger(numExecutors); this.metrics = metrics; if (metrics != null) { metrics.setNumExecutorsAvailable(numSlotsAvailable.get()); } // single threaded scheduler for tasks from wait queue to executor threads ExecutorService wes = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setDaemon(true) .setNameFormat(WAIT_QUEUE_SCHEDULER_THREAD_NAME_FORMAT).build()); this.waitQueueExecutorService = MoreExecutors.listeningDecorator(wes); ExecutorService executionCompletionExecutorServiceRaw = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("ExecutionCompletionThread #%d").build()); executionCompletionExecutorService = MoreExecutors .listeningDecorator(executionCompletionExecutorServiceRaw); ListenableFuture<?> future = waitQueueExecutorService.submit(new WaitQueueWorker()); Futures.addCallback(future, new WaitQueueWorkerCallback()); }
From source file:org.apache.kylin.dict.lookup.cache.RocksDBLookupTableCache.java
private void initExecutors() { this.cacheBuildExecutor = new ThreadPoolExecutor(0, 50, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("lookup-cache-build-thread")); this.cacheStateCheckExecutor = Executors .newSingleThreadScheduledExecutor(new NamedThreadFactory("lookup-cache-state-checker")); cacheStateCheckExecutor.scheduleAtFixedRate(cacheStateChecker, 10, 10 * 60, TimeUnit.SECONDS); // check every 10 minutes }
From source file:com.facebook.Settings.java
/** * Returns the Executor used by the SDK for non-AsyncTask background work. * * By default this uses AsyncTask Executor via reflection if the API level is high enough. * Otherwise this creates a new Executor with defaults similar to those used in AsyncTask. * * @return an Executor used by the SDK. This will never be null. *///from w w w . j av a 2 s . co m public static Executor getExecutor() { synchronized (LOCK) { if (Settings.executor == null) { Executor executor = getAsyncTaskExecutor(); if (executor == null) { executor = new ThreadPoolExecutor(DEFAULT_CORE_POOL_SIZE, DEFAULT_MAXIMUM_POOL_SIZE, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, DEFAULT_WORK_QUEUE, DEFAULT_THREAD_FACTORY); } Settings.executor = executor; } } return Settings.executor; }
From source file:com.nextgis.maplibui.service.TileDownloadService.java
private void download(DownloadTask task) { MapBase map = MapBase.getInstance(); if (null == map) return;/*from w w w. j a va 2 s. c o m*/ ILayer layer = map.getLayerByPathName(task.getLayerPathName()); if (null != layer && layer instanceof RemoteTMSLayer) { // process only tms layers final RemoteTMSLayer tmsLayer = (RemoteTMSLayer) layer; String notifyTitle = getString(R.string.download_tiles); mBuilder.setWhen(System.currentTimeMillis()).setContentTitle(notifyTitle); mNotifyManager.notify(TILE_DOWNLOAD_NOTIFICATION_ID, mBuilder.build()); final List<TileItem> tiles = new LinkedList<>(); int zoomCount = task.getZoomTo() + 1 - task.getZoomFrom(); for (int zoom = task.getZoomFrom(); zoom < task.getZoomTo() + 1; zoom++) { tiles.addAll(MapUtil.getTileItems(task.getEnvelope(), zoom, tmsLayer.getTMSType())); if (mCanceled) break; mBuilder.setProgress(zoomCount, zoom, false).setContentText(getString(R.string.form_tiles_list)); mNotifyManager.notify(TILE_DOWNLOAD_NOTIFICATION_ID, mBuilder.build()); if (tiles.size() > Constants.MAX_TILES_COUNT) break; } int threadCount = DRAWING_SEPARATE_THREADS; int coreCount = Runtime.getRuntime().availableProcessors(); // FIXME more than 1 pool size causing strange behaviour on 6.0 if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) coreCount = 1; mThreadPool = new ThreadPoolExecutor(coreCount, threadCount, KEEP_ALIVE_TIME, KEEP_ALIVE_TIME_UNIT, new LinkedBlockingQueue<Runnable>(), new RejectedExecutionHandler() { @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { try { executor.getQueue().put(r); } catch (InterruptedException e) { e.printStackTrace(); //throw new RuntimeException("Interrupted while submitting task", e); } } }); int tilesSize = tiles.size(); List<Future> futures = new ArrayList<>(tilesSize); for (int i = 0; i < tilesSize; ++i) { if (mCanceled) break; final TileItem tile = tiles.get(i); futures.add(mThreadPool.submit(new Runnable() { @Override public void run() { android.os.Process.setThreadPriority(Constants.DEFAULT_DRAW_THREAD_PRIORITY); tmsLayer.downloadTile(tile); } })); } //in separate thread // wait for download ending int nStep = futures.size() / Constants.DRAW_NOTIFY_STEP_PERCENT; if (nStep == 0) nStep = 1; for (int i = 0, futuresSize = futures.size(); i < futuresSize; i++) { if (mCanceled) break; try { Future future = futures.get(i); future.get(); // wait for task ending if (i % nStep == 0) { mBuilder.setProgress(futuresSize, i, false) .setContentText(getString(R.string.processing) + " " + tmsLayer.getName()); // Displays the progress bar for the first time. mNotifyManager.notify(TILE_DOWNLOAD_NOTIFICATION_ID, mBuilder.build()); } } catch (CancellationException | InterruptedException e) { //e.printStackTrace(); } catch (ExecutionException e) { e.printStackTrace(); } } } }
From source file:ca.sqlpower.architect.enterprise.ArchitectClientSideSession.java
/** * This constructor is only used for testing. This constructor allows users * to specify an executor to use as the foreground thread instead of using * the normal EDT. This is handy for ensuring all of the events occur on the * correct thread and updates do not conflict with persists. If the executor * is null then the foreground thread will just execute the runnables on the * current thread.//from w w w .j av a 2 s . c om */ public ArchitectClientSideSession(ArchitectSessionContext context, String name, ProjectLocation projectLocation, boolean useThreadPool) throws SQLObjectException { super(context, name, new ArchitectSwingProject()); this.projectLocation = projectLocation; this.useThreadPool = useThreadPool; this.foregroundThreadExecutor = new ThreadPoolExecutor(1, 1, 5, TimeUnit.MINUTES, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread newThread = new Thread(r); foregroundExecutorThread.add(newThread); return newThread; } }); foregroundThreadExecutor.allowCoreThreadTimeOut(false); dataSourceCollectionUpdater = new ArchitectDataSourceCollectionUpdater(projectLocation); this.isEnterpriseSession = true; setupSnapshots(); String ddlgClass = prefs.get(this.projectLocation.getUUID() + ".ddlg", null); if (ddlgClass != null) { try { DDLGenerator ddlg = (DDLGenerator) Class .forName(ddlgClass, true, ArchitectClientSideSession.class.getClassLoader()).newInstance(); setDDLGenerator(ddlg); ddlg.setTargetCatalog(prefs.get(this.projectLocation.getUUID() + ".targetCatalog", null)); ddlg.setTargetSchema(prefs.get(this.projectLocation.getUUID() + ".targetSchema", null)); } catch (Exception e) { createUserPrompter("Cannot load DDL settings due to missing class " + ddlgClass, UserPromptType.MESSAGE, UserPromptOptions.OK, UserPromptResponse.OK, null, "OK"); logger.error("Cannot find DDL Generator for class " + ddlgClass + ", ddl generator properties are not loaded."); } } outboundHttpClient = ClientSideSessionUtils.createHttpClient(projectLocation.getServiceInfo(), cookieStore); dataSourceCollection = getDataSources(); sessionPersister = new ArchitectSessionPersister("inbound-" + projectLocation.getUUID(), getWorkspace(), new ArchitectPersisterSuperConverter(dataSourceCollection, getWorkspace())); sessionPersister.setWorkspaceContainer(this); jsonMessageDecoder = new SPJSONMessageDecoder(sessionPersister); updater = new ArchitectNetworkConflictResolver(projectLocation, jsonMessageDecoder, ClientSideSessionUtils.createHttpClient(projectLocation.getServiceInfo(), cookieStore), outboundHttpClient, this); jsonPersister = new SPJSONPersister(updater); verifyServerLicense(projectLocation); }