Example usage for java.util.concurrent Executors newFixedThreadPool

List of usage examples for java.util.concurrent Executors newFixedThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newFixedThreadPool.

Prototype

public static ExecutorService newFixedThreadPool(int nThreads) 

Source Link

Document

Creates a thread pool that reuses a fixed number of threads operating off a shared unbounded queue.

Usage

From source file:com.hdfstoftp.main.HdfsToFtp.java

/**
 * ?//from   www. j a v  a  2s  . co m
 * 
 * @param srcFS
 *            
 * @param src
 *            ?
 * @param dst
 *            
 * @param queryStr
 *            
 * @param deleteSource
 *            ??
 * @param overwrite
 *            ????
 * @return boolean
 * @throws Exception
 */
private static boolean copyFromHDFSToFTP(Config config) throws Exception {
    // ?hdfs
    Configuration conf = new Configuration();
    FileSystem srcFS = FileSystem.get(conf);
    long start = System.currentTimeMillis();
    boolean isRename = config.isRenameUploaded();
    int retryTimes = config.getRetryTimes();
    // ?
    String dstPath = config.getDestDir();
    Path src = new Path(config.getSouceDir());
    FileStatus fileStatus = srcFS.getFileStatus(src);
    String subDir = null;
    if (fileStatus.isDirectory()) {// 
        if (isRename) {// ??rename
            subDir = Config.RENAME_DIR;
            srcFS.mkdirs(new Path(fileStatus.getPath(), subDir));
        }
        int threadNum = config.getThreadNum();
        // 
        ExecutorService threadPool = Executors.newFixedThreadPool(threadNum);
        // ?ftp
        FTPClientPool ftpPool = new FTPClientPool(threadNum, new FtpClientFactory(config.getFTPClientConfig()));
        FTPClient ftpClient = ftpPool.borrowObject();
        // ?
        ftpClient.makeDirectory(dstPath);
        ftpPool.returnObject(ftpClient);
        // ??
        FileStatus contents[] = srcFS.listStatus(src);
        long beginFilter = 0;
        long endFileter = 0;

        if (config.getCommandLine().hasOption("d") || config.getCommandLine().hasOption("h")
                || config.getCommandLine().hasOption("t")) {// ?"["
            beginFilter = System.currentTimeMillis();
            Long[] timeRange = parseTimeRange(config.getCommandLine());
            contents = getNewContents(timeRange, contents);
            endFileter = System.currentTimeMillis();
        }
        // ?
        if (config.getCommandLine().hasOption("r")) {// "["??
            beginFilter = System.currentTimeMillis();
            contents = getFilterContents(config.getCommandLine().getOptionValue("r").trim(), contents);
            endFileter = System.currentTimeMillis();
        }
        logger.info("total file count:" + contents.length);
        Map<String, String> fileNameMap = null;
        long beginSkip = 0;
        long endSkip = 0;
        boolean overwrite = true;
        if (config.getCommandLine().hasOption("o")) {
            overwrite = "true".equals(config.getCommandLine().getOptionValue("o").trim());
        }
        if (!overwrite) {// ?????
            beginSkip = System.currentTimeMillis();
            fileNameMap = getFileNameMap(dstPath, ftpPool);
            endSkip = System.currentTimeMillis();
        }
        int skiped = 0;

        List<Future<?>> futureList = new ArrayList<Future<?>>();
        for (int i = 0; i < contents.length; i++) {
            if (!overwrite && fileNameMap.containsKey(contents[i].getPath().getName())) {
                // 
                skiped++;
                Log.info("skiped filename:" + contents[i].getPath().getName());
                continue;
            }
            if (contents[i].isDirectory()) {
                continue;
            }
            // ???
            Future<?> future = threadPool.submit(new UploadFileTask(srcFS, contents[i].getPath(),
                    new Path(dstPath, contents[i].getPath().getName()), ftpPool, false, isRename, subDir,
                    retryTimes));
            futureList.add(future);
        }
        int transfered = 0;
        int failed = 0;
        for (Future<?> future : futureList) {
            Boolean computeResult = (Boolean) future.get();
            if (computeResult) {
                transfered++;
                if (transfered % 50 == 0 || transfered == contents.length) {
                    logger.info("have transfered:" + transfered + " files");
                }
            } else {
                failed++;
                logger.error("failed transter:" + failed + " files");
            }
        }
        // 
        threadPool.shutdown();
        // FTPCient
        ftpPool.close();
        // ****************
        logger.info("filter time:" + (endFileter - beginFilter) + " ms");
        if (!overwrite) {
            logger.info("skip time:" + (endSkip - beginSkip) + " ms");
        }
        logger.info("total file count:" + contents.length);
        logger.info("total transtered: " + transfered + ",total failed:" + failed + ",total skiped:" + skiped);

    } else {// 

        BufferedReader reader = null;
        FtpClientFactory facotry = new FtpClientFactory(config.getFTPClientConfig());
        FTPClient ftpClient = null;
        InputStream in = null;
        try {
            Path path = fileStatus.getPath();
            if (!path.getName().contains("log")) {

            }
            reader = new BufferedReader(new FileReader(new File(path.toUri().getPath())));
            String str = null;

            ftpClient = facotry.makeObject();

            while ((str = reader.readLine()) != null) {
                String[] feilds = str.split("&");
                Path filePath = null;
                if (feilds.length == 2 && feilds[1] != "") {
                    filePath = new Path(feilds[1]);
                    in = srcFS.open(filePath);
                    boolean result = ftpClient.storeFile(dstPath, in);
                    System.out.println(ftpClient.getReplyCode());
                    if (result) {
                        logger.info(filePath.toString());
                    } else {
                        logger_failed.info(filePath.toString());
                    }
                } else {
                    continue;
                }

            }
        } catch (Exception e) {
            e.printStackTrace();

        } finally {
            in.close();
            reader.close();
            facotry.destroyObject(ftpClient);
        }

    }
    long end = System.currentTimeMillis();
    logger.info("finished transfer,total time:" + (end - start) / 1000 + "s");
    return true;
}

From source file:fr.irit.smac.amasfactory.service.execution.impl.TwoStepAgExecutionService.java

@Override
public void start() {
    // first create the scheduler instance before registering
    // as agent listener (in order to assure that agentAdded and
    // agentRemoved
    // will not be called while this.systemStrategy = null)

    systemStrategy = new TwoStepsSystemStrategy(Collections.emptyList(),
            Executors.newFixedThreadPool(nbThreads));

    agentHandlerService.addAgentEventListener(this);
}

From source file:org.atennert.com.interpretation.InterpreterManager.java

public void init() {
    interpreterPool = Executors.newFixedThreadPool(THREAD_COUNT);
}

From source file:ezid.EZIDClient.java

private void startExecutorLoop() {
    // Query the runtime to see how many CPUs are available, and configure that many threads
    Runtime runtime = Runtime.getRuntime();
    int numCores = runtime.availableProcessors();
    log.info("Number of cores available: " + numCores);
    executor = Executors.newFixedThreadPool(numCores);
}

From source file:com.netflix.spinnaker.front50.config.AzureStorageConfig.java

@Bean
public ApplicationDAO applicationDAO(AzureStorageService storageService, Registry registry) {
    return new DefaultApplicationDAO(storageService, Schedulers.from(Executors.newFixedThreadPool(20)), 15000,
            registry);/*from ww  w  .java 2s  . c o  m*/
}

From source file:joachimeichborn.geotag.handlers.OpenTracksHandler.java

public static void openTracks(final String aPath, final String[] aFiles, final TracksRepo aTracksRepo) {
    final Job job = new Job("Reading tracks") {
        @Override/* w w w  .j a v  a 2  s .  co m*/
        protected IStatus run(final IProgressMonitor aMonitor) {
            aMonitor.beginTask("Reading " + aFiles.length + " tracks", aFiles.length);

            int threads = 2 * Runtime.getRuntime().availableProcessors();
            logger.fine("Using " + threads + " cores for loading tracks");

            final ExecutorService threadPool = Executors.newFixedThreadPool(threads);

            final List<Future<?>> futures = new LinkedList<>();

            for (final String file : aFiles) {
                final Path trackFile = Paths.get(aPath, file);
                futures.add(threadPool.submit(new TrackReader(aMonitor, trackFile, aTracksRepo)));
            }

            final IStatus status = waitForAllTracksToBeRead(futures);

            aMonitor.done();
            return status;
        }

        private IStatus waitForAllTracksToBeRead(final List<Future<?>> futures) {
            for (final Future<?> future : futures) {
                try {
                    future.get();
                } catch (InterruptedException e) {
                    logger.log(Level.FINE, "Waiting for track to be loaded was interrupted", e);
                    Thread.currentThread().interrupt();
                    return Status.CANCEL_STATUS;
                } catch (ExecutionException e) {
                    logger.log(Level.FINE, "Reading track failed", e);
                    Thread.currentThread().interrupt();
                    return Status.CANCEL_STATUS;
                }
            }

            logger.info("Reading " + futures.size() + " tracks completed");
            return Status.OK_STATUS;
        }
    };
    job.setUser(true);
    job.schedule();
}

From source file:org.kaaproject.kaa.server.appenders.rest.appender.RestLogAppender.java

@Override
protected void initFromConfiguration(LogAppenderDto appender, RestConfig configuration) {
    this.configuration = configuration;
    this.executor = Executors.newFixedThreadPool(configuration.getConnectionPoolSize());
    target = new HttpHost(configuration.getHost(), configuration.getPort(),
            configuration.getSsl() ? "https" : "http");
    HttpClientBuilder builder = HttpClients.custom();
    if (configuration.getUsername() != null && configuration.getPassword() != null) {
        LOG.info("Adding basic auth credentials provider");
        CredentialsProvider credsProvider = new BasicCredentialsProvider();
        credsProvider.setCredentials(new AuthScope(target.getHostName(), target.getPort()),
                new UsernamePasswordCredentials(configuration.getUsername(), configuration.getPassword()));
        builder.setDefaultCredentialsProvider(credsProvider);
    }/*from w  w  w . ja  va2s  . c  o m*/
    if (!configuration.getVerifySslCert()) {
        LOG.info("Adding trustful ssl context");
        SSLContextBuilder sslBuilder = new SSLContextBuilder();
        try {
            sslBuilder.loadTrustMaterial(null, new TrustSelfSignedStrategy());
            SSLConnectionSocketFactory sslsf = new SSLConnectionSocketFactory(sslBuilder.build());
            builder.setSSLSocketFactory(sslsf);
        } catch (NoSuchAlgorithmException | KeyStoreException | KeyManagementException ex) {
            LOG.error("Failed to init socket factory {}", ex.getMessage(), ex);
        }
    }
    PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager();
    cm.setDefaultMaxPerRoute(configuration.getConnectionPoolSize());
    cm.setMaxTotal(configuration.getConnectionPoolSize());
    builder.setConnectionManager(cm);
    this.client = builder.build();
}

From source file:com.ning.arecibo.collector.TestEventCollectorServer.java

@BeforeMethod(alwaysRun = true)
public void setUp() throws Exception {
    final String ddl = IOUtils.toString(TestEventCollectorServer.class.getResourceAsStream("/collector.sql"));

    helper.startMysql();/*from   www .j  a v  a  2s  .c o  m*/
    helper.initDb(ddl);

    Executors.newFixedThreadPool(1).submit(new Runnable() {
        @Override
        public void run() {
            try {
                server.start();
            } catch (Exception e) {
                Assert.fail();
            }
        }
    });

    while (!server.isRunning()) {
        Thread.sleep(1000);
    }

    Assert.assertEquals(eventHandlers.size(), 1);
    timelineEventHandler = (TimelineEventHandler) eventHandlers.get(0);
}

From source file:com.turn.griffin.utils.GriffinConsumer.java

private void run(int threadCount) {

    /* Create set of streams */
    List<KafkaStream<byte[], byte[]>> streams = this.consumer
            .createMessageStreamsByFilter(new Whitelist(this.topicRegEx), threadCount);

    /* Create a thread pool */
    this.kafkaStreamsExecutor = Executors.newFixedThreadPool(streams.size());

    logger.debug(String.format("Consuming topic:%s with %s streams", this.topicRegEx, streams.size()));
    for (KafkaStream<byte[], byte[]> stream : streams) {
        kafkaStreamsExecutor.submit(new KafkaConsumer<>(stream, this.msgQueue));
    }//from   www.j av  a  2s  .co m
}

From source file:com.gs.collections.impl.jmh.AggregateByTest.java

@Before
@Setup(Level.Iteration)//from  w  w w  . j a v a 2 s. c om
public void setUp() {
    this.executorService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
    Collections.shuffle(this.gscPositions);
    Collections.shuffle(this.jdkPositions);
}