Example usage for java.util.concurrent Executors newFixedThreadPool

List of usage examples for java.util.concurrent Executors newFixedThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newFixedThreadPool.

Prototype

public static ExecutorService newFixedThreadPool(int nThreads) 

Source Link

Document

Creates a thread pool that reuses a fixed number of threads operating off a shared unbounded queue.

Usage

From source file:com.yattatech.dbtc.facade.SystemFacade.java

private SystemFacade() {
    mExecutor = Executors.newFixedThreadPool(Constants.DEFAULT_THREADS_NUM);
    // To guarantee that executor will be killed as soon as 
    // vm halt/*w ww  .  ja  v  a 2  s .  c o m*/
    Runtime.getRuntime().addShutdownHook(new Thread() {

        /*
         * (non-Javadoc)
         * @see java.lang.Thread#run()
         */
        @Override
        public void run() {
            mExecutor.shutdown();
        }
    });
}

From source file:com.liubida.sohu.android.wuhan.HttpRead.java

public boolean init(String accessToken) {
    // ???, ??token, 
    if (bInit) {//w ww  .j  a  va 2  s  . com
        return true;
    }
    // ?
    if (StringUtils.isBlank(accessToken)) {
        return false;
    }
    // 
    exec = Executors.newFixedThreadPool(THREAD_POOL_SIZE);
    try {
        this.accessToken = accessToken;
        this.durl = new DirectUrl2(accessToken);

        /**
         * 1. , taskPool, taskPool?task
         * 2. ?durl?call_url(...), ?conn,
         * 3. taskPool????, ?conn??, ??
         */
        //            for (int i = 0; i < THREAD_POOL_SIZE; i++) {
        //                exec.execute(new AsyncTask(durl, taskPool));
        //            }
        return (bInit = true);
    } catch (Exception e) {
        bInit = false;
        return false;
    }
}

From source file:io.druid.server.initialization.JettyTest.java

@Test
@Ignore // this test will deadlock if it hits an issue, so ignored by default
public void testTimeouts() throws Exception {
    // test for request timeouts properly not locking up all threads
    final Executor executor = Executors.newFixedThreadPool(100);
    final AtomicLong count = new AtomicLong(0);
    final CountDownLatch latch = new CountDownLatch(1000);
    for (int i = 0; i < 10000; i++) {
        executor.execute(new Runnable() {
            @Override//from www .ja  v  a 2 s .  c  om
            public void run() {
                executor.execute(new Runnable() {
                    @Override
                    public void run() {
                        long startTime = System.currentTimeMillis();
                        long startTime2 = 0;
                        try {
                            ListenableFuture<StatusResponseHolder> go = client.go(
                                    new Request(HttpMethod.GET,
                                            new URL("http://localhost:" + port + "/slow/hello")),
                                    new StatusResponseHandler(Charset.defaultCharset()));
                            startTime2 = System.currentTimeMillis();
                            go.get();
                        } catch (Exception e) {
                            e.printStackTrace();
                        } finally {
                            System.out.println("Response time client" + (System.currentTimeMillis() - startTime)
                                    + "time taken for getting future"
                                    + (System.currentTimeMillis() - startTime2) + "Counter "
                                    + count.incrementAndGet());
                            latch.countDown();

                        }
                    }
                });
            }
        });
    }

    latch.await();
}

From source file:com.comcast.cats.vision.concurrent.RemoteWorkerTest.java

public void example() throws InterruptedException {
    CatsEventDispatcher dispatcher = new CatsEventDispatcherImpl();

    Settop settop = new MockSettop() {
        private static final long serialVersionUID = 1L;

        public boolean pressKey(RemoteCommand command) {
            LOGGER.info("pressKey");
            return true;
        }/*from  w  w  w . ja va  2  s . c o m*/
    };

    dispatcher.addListener(this, CatsEventType.REMOTE_RESPONSE);

    RemoteEvent evt = new RemoteEvent(ActionType.PRESS, RemoteCommand.GUIDE, "10", null);
    ExecutorService executor = Executors.newFixedThreadPool(5);

    executor.execute(new PressKeyWorker(settop, evt, dispatcher));
    executor.awaitTermination(5, TimeUnit.SECONDS);
}

From source file:edu.iu.daal_pca.PCAUtil.java

/**
 * Generate data and upload to the data dir.
 *
 * @param numOfDataPoints/*from  w w w. ja  va  2 s  . c  o m*/
 * @param vectorSize
 * @param numPointFiles
 * @param localInputDir
 * @param fs
 * @param dataDir
 * @throws IOException
 * @throws InterruptedException
 * @throws ExecutionException
 */
static void generatePoints(int numOfDataPoints, int vectorSize, int numPointFiles, String localInputDir,
        FileSystem fs, Path dataDir) throws IOException, InterruptedException, ExecutionException {
    int pointsPerFile = numOfDataPoints / numPointFiles;
    System.out.println("Writing " + pointsPerFile + " vectors to a file");
    // Check data directory
    if (fs.exists(dataDir)) {
        fs.delete(dataDir, true);
    }
    // Check local directory
    File localDir = new File(localInputDir);
    // If existed, regenerate data
    if (localDir.exists() && localDir.isDirectory()) {
        for (File file : localDir.listFiles()) {
            file.delete();
        }
        localDir.delete();
    }
    boolean success = localDir.mkdir();
    if (success) {
        System.out.println("Directory: " + localInputDir + " created");
    }
    if (pointsPerFile == 0) {
        throw new IOException("No point to write.");
    }
    // Create random data points
    int poolSize = Runtime.getRuntime().availableProcessors();
    ExecutorService service = Executors.newFixedThreadPool(poolSize);
    List<Future<?>> futures = new LinkedList<Future<?>>();
    for (int k = 0; k < numPointFiles; k++) {
        // Future<?> f = service.submit(new DataGenRunnable(pointsPerFile, localInputDir, Integer.toString(k), vectorSize));
        Future<?> f = service
                .submit(new DataGenMMDense(pointsPerFile, localInputDir, Integer.toString(k), vectorSize));
        futures.add(f); // add a new thread
    }
    for (Future<?> f : futures) {
        f.get();
    }
    // Shut down the executor service so that this
    // thread can exit
    service.shutdownNow();
    // Wrap to path object
    Path localInput = new Path(localInputDir);
    fs.copyFromLocalFile(localInput, dataDir);
    DeleteFileFolder(localInputDir);
}

From source file:com.redhat.red.build.koji.ExternalHttpClientTest.java

@Before
public void setUp() throws KojiClientException {
    String hubUrl = System.getProperty("koji.hubUrl");

    assumeTrue(hubUrl != null);//from  www . j  a va2 s. c o m

    KojiConfig config = new SimpleKojiConfigBuilder().withKojiURL(hubUrl).build();
    client = new KojiClient(config, null, Executors.newFixedThreadPool(5));
}

From source file:eu.serco.dhus.plugin.slstr.SlstrPlugin.java

public SlstrPlugin() {
    executor = Executors.newFixedThreadPool(MAX_THREADS_NUMBER);
    map = new HashMap<String, String>();
    map.put("SL_1_RBT___", "SL_1");
    map.put("SL_2_WCT___", "SL_2");
    map.put("SL_2_WST___", "SL_2");
    map.put("SL_2_LST___", "SL_2");

    try {//w w  w. ja v  a  2s . c o  m

        loadTaskTables();
        loadSelectionRulesProperties();
        externalDHuSUrl = ConfigurationManager.getExternalDHuSHost();
        hashedString = ConfigurationManager.getHashedConnectionString();
    } catch (ParserConfigurationException pce) {
        pce.printStackTrace();
    } catch (SAXException saxe) {
        saxe.printStackTrace();
    } catch (IOException ioe) {
        ioe.printStackTrace();
    } catch (Exception e) {
        e.printStackTrace();
    }

}

From source file:com.iveely.computing.node.Communicator.java

private Communicator() {
    this.slots = new ArrayList<>();
    int slotCount = ConfigWrapper.get().getSlave().getSlotCount();
    int port = ConfigWrapper.get().getSlave().getPort();
    String slaveRoot = ConfigWrapper.get().getSlave().getRoot();
    this.threadPool = Executors.newFixedThreadPool(slotCount);
    for (int i = 0; i < slotCount; i++) {
        Slot slot = new Slot(i);
        this.slots.add(slot);
        Coordinator.getInstance().setNodeValue(
                slaveRoot + "/" + Internet.getLocalIpAddress() + "," + port + "/slots/" + slot.getName(),
                slot.getConnectString());
        this.threadPool.execute(slot);
    }//from  w w  w .  ja  v  a 2  s  .co  m
}

From source file:edu.iu.daal_svd.SVDUtil.java

/**
 * Generate data and upload to the data dir.
 * /*from  w  w  w .j ava  2 s  .c o m*/
 * @param numOfDataPoints
 * @param vectorSize
 * @param numPointFiles
 * @param localInputDir
 * @param fs
 * @param dataDir
 * @throws IOException
 * @throws InterruptedException
 * @throws ExecutionException
 */
static void generatePoints(int numOfDataPoints, int vectorSize, int numPointFiles, String localInputDir,
        FileSystem fs, Path dataDir) throws IOException, InterruptedException, ExecutionException {
    int pointsPerFile = numOfDataPoints / numPointFiles;
    System.out.println("Writing " + pointsPerFile + " vectors to a file");
    // Check data directory
    if (fs.exists(dataDir)) {
        fs.delete(dataDir, true);
    }
    // Check local directory
    File localDir = new File(localInputDir);
    // If existed, regenerate data
    if (localDir.exists() && localDir.isDirectory()) {
        for (File file : localDir.listFiles()) {
            file.delete();
        }
        localDir.delete();
    }
    boolean success = localDir.mkdir();
    if (success) {
        System.out.println("Directory: " + localInputDir + " created");
    }
    if (pointsPerFile == 0) {
        throw new IOException("No point to write.");
    }
    // Create random data points
    int poolSize = Runtime.getRuntime().availableProcessors();
    ExecutorService service = Executors.newFixedThreadPool(poolSize);
    List<Future<?>> futures = new LinkedList<Future<?>>();
    for (int k = 0; k < numPointFiles; k++) {
        // Future<?> f =
        //   service.submit(new DataGenRunnable(
        //     pointsPerFile, localInputDir, Integer
        //       .toString(k), vectorSize));
        Future<?> f = service
                .submit(new DataGenMMDense(pointsPerFile, localInputDir, Integer.toString(k), vectorSize));

        futures.add(f); // add a new thread
    }
    for (Future<?> f : futures) {
        f.get();
    }
    // Shut down the executor service so that this
    // thread can exit
    service.shutdownNow();
    // Wrap to path object
    Path localInput = new Path(localInputDir);
    fs.copyFromLocalFile(localInput, dataDir);
    DeleteFileFolder(localInputDir);
}

From source file:eu.freme.bpt.service.EPublishing.java

@Override
public void run(FailurePolicy failurePolicy, int nrThreads, Callback callback) {
    logger.info("Running service EPublishing");
    ExecutorService executorService = Executors.newFixedThreadPool(nrThreads);
    Unirest.setTimeouts(30000, 300000); // TODO: configurable?

    // iterate over zip files
    File[] zipFiles = inputDirectory.listFiles((dir, name) -> {
        return name.endsWith(".zip");
    });/*from ww  w.  ja v  a  2  s. com*/

    for (final File zipFile : zipFiles) {
        executorService.submit(() -> {
            File jsonFile = new File(zipFile.getParentFile(), zipFile.getName().replace(".zip", ".json"));
            if (jsonFile.exists()) {
                File outputFile = new File(outputDirectory, zipFile.getName().replace(".zip", ".epub"));
                try {
                    String json = new String(Files.readAllBytes(jsonFile.toPath()), StandardCharsets.UTF_8);
                    HttpResponse<InputStream> response = Unirest.post(endpoint).field("htmlZip", zipFile)
                            .field("metadata", json).asBinary();
                    if (response.getStatus() == 200) {
                        logger.debug("Request alright.");
                        try (InputStream responseInput = response.getBody();
                                OutputStream out = new FileOutputStream(outputFile)) {
                            IOUtils.copy(responseInput, out);
                            callback.onTaskComplete(zipFile, outputFile);
                        }
                        //Files.write(outputFile.toPath(), IOUtils.toByteArray())
                    } else {
                        String body = IOUtils.toString(response.getBody());
                        String msg = "Error response from service " + endpoint + ": Status "
                                + response.getStatus() + ": " + response.getStatusText() + " - " + body;
                        logger.error(msg);
                        callback.onTaskFails(zipFile, outputFile, msg);
                        if (!failurePolicy.check()) {
                            System.exit(3);
                        }
                    }
                } catch (IOException e) {
                    logger.error("Error while reading json file: {}", jsonFile, e);
                    callback.onTaskFails(zipFile, outputFile,
                            "Error while reading json file: " + jsonFile + " " + e.getMessage());
                    if (!failurePolicy.check()) {
                        System.exit(3);
                    }
                } catch (UnirestException e) {
                    logger.error("Request to {} failed." + endpoint, e);
                    callback.onTaskFails(zipFile, outputFile,
                            "Request to " + endpoint + " failed. " + e.getMessage());
                    if (!failurePolicy.check()) {
                        System.exit(3);
                    }
                }

            } else {
                String msg = "Missing metatada file " + jsonFile + " for input file " + zipFile;
                logger.error(msg);
                callback.onTaskFails(zipFile, null, msg);
                if (!failurePolicy.check()) {
                    System.exit(3);
                }
            }
        });
    }
}