Example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

List of usage examples for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService.

Prototype

public ExecutorCompletionService(Executor executor) 

Source Link

Document

Creates an ExecutorCompletionService using the supplied executor for base task execution and a LinkedBlockingQueue as a completion queue.

Usage

From source file:uniol.apt.analysis.synthesize.FindWords.java

static private void generateList(final PNProperties properties, SortedSet<Character> alphabet,
        final boolean quickFail, WordCallback wordCallback, LengthDoneCallback lengthDoneCallback,
        ForkJoinPool executor) throws PreconditionFailedException {
    if (properties.isPlain() && properties.isKMarking())
        throw new PreconditionFailedException("The combination of plain and k-marking is not supported"
                + ", because 'minimal unsolvable' cannot be defined");

    CompletionService<Pair<String, SynthesizePN>> completion = new ExecutorCompletionService<>(executor);
    List<String> currentLevel = Collections.singletonList("");
    while (!currentLevel.isEmpty()) {

        // Lazily create new Callables to avoid OOM errors
        Iterator<Callable<Pair<String, SynthesizePN>>> jobGenerator = IteratorUtils.transformedIterator(
                new NextWordsIterator(properties, alphabet, currentLevel),
                new Transformer<String, Callable<Pair<String, SynthesizePN>>>() {
                    @Override//  w  w w  . j  a v  a 2  s  .  c om
                    public Callable<Pair<String, SynthesizePN>> transform(final String word) {
                        return new Callable<Pair<String, SynthesizePN>>() {
                            @Override
                            public Pair<String, SynthesizePN> call() {
                                List<Character> wordList = toList(word);
                                SynthesizePN synthesize = solveWord(wordList, properties, quickFail);
                                return new Pair<>(word, synthesize);
                            }
                        };
                    }
                });

        // Wait for and handle results
        List<String> nextLevel = new ArrayList<>();
        int tasksSubmitted = submitTasks(executor, completion, jobGenerator);
        int tasksFinished = 0;
        while (tasksSubmitted != tasksFinished) {
            String word;
            SynthesizePN synthesize;
            try {
                Pair<String, SynthesizePN> pair = completion.take().get();
                word = pair.getFirst();
                synthesize = pair.getSecond();
            } catch (ExecutionException e) {
                throw new RuntimeException(e);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                return;
            }

            List<Character> wordList = toList(word);
            wordCallback.call(wordList, word, synthesize);
            if (synthesize.wasSuccessfullySeparated()) {
                nextLevel.add(word);
            }
            tasksFinished++;

            tasksSubmitted += submitTasks(executor, completion, jobGenerator);
        }

        int currentLength = currentLevel.iterator().next().length() + 1;
        lengthDoneCallback.call(currentLength);
        currentLevel = nextLevel;
        Collections.sort(currentLevel);
    }
}

From source file:com.palantir.atlasdb.transaction.impl.SnapshotTransactionTest.java

@Test
public void testConcurrentWriteWriteConflicts() throws InterruptedException, ExecutionException {
    CompletionService<Void> executor = new ExecutorCompletionService<Void>(PTExecutors.newFixedThreadPool(8));
    final Cell cell = Cell.create("row1".getBytes(), "column1".getBytes());
    Transaction t1 = txManager.createNewTransaction();
    t1.put(TABLE, ImmutableMap.of(cell, EncodingUtils.encodeVarLong(0L)));
    t1.commit();//from   w w w .  j  a v a 2  s  .  com
    for (int i = 0; i < 1000; i++) {
        executor.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                txManager.runTaskWithRetry(new TxTask() {
                    @Override
                    public Void execute(Transaction t) throws RuntimeException {
                        long prev = EncodingUtils
                                .decodeVarLong(t.get(TABLE, ImmutableSet.of(cell)).values().iterator().next());
                        t.put(TABLE, ImmutableMap.of(cell, EncodingUtils.encodeVarLong(prev + 1)));
                        return null;
                    }
                });
                return null;
            }
        });
    }
    for (int i = 0; i < 1000; i++) {
        Future<Void> future = executor.take();
        future.get();
    }
    t1 = txManager.createNewTransaction();
    long val = EncodingUtils.decodeVarLong(t1.get(TABLE, ImmutableSet.of(cell)).values().iterator().next());
    assertEquals(1000, val);
}

From source file:com.laudandjolynn.mytv.crawler.tvmao.TvMaoCrawler.java

/**
 * ??//from  w w  w .j  a  va 2s  . c o m
 * 
 * @param files
 * @return
 */
private List<TvStation> crawlAllTvStationFromFile(File[] files) {
    logger.info("crawl all tv station from files.");
    List<TvStation> resultList = new ArrayList<TvStation>();
    ThreadFactory threadFactory = new BasicThreadFactory.Builder()
            .namingPattern("Mytv_Crawl_All_TV_Station_Of_TvMao_%d").build();
    ExecutorService executorService = Executors.newFixedThreadPool(2, threadFactory);
    CompletionService<List<TvStation>> completionService = new ExecutorCompletionService<List<TvStation>>(
            executorService);
    int size = files == null ? 0 : files.length;
    for (int i = 0; i < size; i++) {
        final File file = files[i];
        Callable<List<TvStation>> task = new Callable<List<TvStation>>() {
            @Override
            public List<TvStation> call() throws Exception {
                String filePath = file.getPath();
                String classifyEnds = filePath.substring(0, filePath.lastIndexOf(Constant.UNDERLINE));
                String city = classifyEnds.substring(classifyEnds.lastIndexOf(Constant.UNDERLINE) + 1);
                String html = null;
                try {
                    logger.debug("parse tv station file: " + filePath);
                    html = MyTvUtils.readAsHtml(filePath);
                } catch (IOException e) {
                    logger.error("read as xml error: " + filePath, e);
                    return null;
                }
                return parseTvStation(city, html);
            }
        };
        completionService.submit(task);
    }
    executorService.shutdown();
    int count = 0;
    while (count < size) {
        try {
            List<TvStation> stationList = completionService.take().get();
            if (stationList != null) {
                resultList.addAll(stationList);
            }
        } catch (InterruptedException e) {
            logger.error("crawl all tv station task interrupted.", e);
        } catch (ExecutionException e) {
            logger.error("crawl all tv station task executed fail.", e);
        }
        count++;
    }
    return resultList;
}

From source file:org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil.java

public static void concurrentVisitReferencedFiles(final Configuration conf, final FileSystem fs,
        final SnapshotManifest manifest, final StoreFileVisitor visitor) throws IOException {
    final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription();
    final Path snapshotDir = manifest.getSnapshotDir();

    List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
    if (regionManifests == null || regionManifests.size() == 0) {
        LOG.debug("No manifest files present: " + snapshotDir);
        return;//from   w  ww. j av a 2s .c om
    }

    ExecutorService exec = SnapshotManifest.createExecutor(conf, "VerifySnapshot");
    final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(exec);
    try {
        for (final SnapshotRegionManifest regionManifest : regionManifests) {
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws IOException {
                    visitRegionStoreFiles(regionManifest, visitor);
                    return null;
                }
            });
        }
        try {
            for (int i = 0; i < regionManifests.size(); ++i) {
                completionService.take().get();
            }
        } catch (InterruptedException e) {
            throw new InterruptedIOException(e.getMessage());
        } catch (ExecutionException e) {
            if (e.getCause() instanceof CorruptedSnapshotException) {
                throw new CorruptedSnapshotException(e.getCause().getMessage(), snapshotDesc);
            } else {
                IOException ex = new IOException();
                ex.initCause(e.getCause());
                throw ex;
            }
        }
    } finally {
        exec.shutdown();
    }
}

From source file:org.apache.hadoop.hive.llap.cli.LlapServiceDriver.java

private int run(String[] args) throws Exception {
    LlapOptionsProcessor optionsProcessor = new LlapOptionsProcessor();
    final LlapOptions options = optionsProcessor.processOptions(args);

    final Properties propsDirectOptions = new Properties();

    if (options == null) {
        // help//from   ww w .j  a  v a  2 s . c om
        return 1;
    }

    // Working directory.
    Path tmpDir = new Path(options.getDirectory());

    if (conf == null) {
        throw new Exception("Cannot load any configuration to run command");
    }

    final long t0 = System.nanoTime();

    final FileSystem fs = FileSystem.get(conf);
    final FileSystem lfs = FileSystem.getLocal(conf).getRawFileSystem();

    int threadCount = Math.max(1, Runtime.getRuntime().availableProcessors() / 2);
    final ExecutorService executor = Executors.newFixedThreadPool(threadCount,
            new ThreadFactoryBuilder().setNameFormat("llap-pkg-%d").build());
    final CompletionService<Void> asyncRunner = new ExecutorCompletionService<Void>(executor);

    int rc = 0;
    try {

        // needed so that the file is actually loaded into configuration.
        for (String f : NEEDED_CONFIGS) {
            conf.addResource(f);
            if (conf.getResource(f) == null) {
                throw new Exception("Unable to find required config file: " + f);
            }
        }
        for (String f : OPTIONAL_CONFIGS) {
            conf.addResource(f);
        }

        conf.reloadConfiguration();

        populateConfWithLlapProperties(conf, options.getConfig());

        if (options.getName() != null) {
            // update service registry configs - caveat: this has nothing to do with the actual settings
            // as read by the AM
            // if needed, use --hiveconf llap.daemon.service.hosts=@llap0 to dynamically switch between
            // instances
            conf.set(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + options.getName());
            propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + options.getName());
        }

        if (options.getLogger() != null) {
            HiveConf.setVar(conf, ConfVars.LLAP_DAEMON_LOGGER, options.getLogger());
            propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_LOGGER.varname, options.getLogger());
        }
        boolean isDirect = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT);

        if (options.getSize() != -1) {
            if (options.getCache() != -1) {
                if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_MAPPED) == false) {
                    // direct heap allocations need to be safer
                    Preconditions.checkArgument(options.getCache() < options.getSize(),
                            "Cache size (" + LlapUtil.humanReadableByteCount(options.getCache())
                                    + ") has to be smaller" + " than the container sizing ("
                                    + LlapUtil.humanReadableByteCount(options.getSize()) + ")");
                } else if (options.getCache() < options.getSize()) {
                    LOG.warn("Note that this might need YARN physical memory monitoring to be turned off "
                            + "(yarn.nodemanager.pmem-check-enabled=false)");
                }
            }
            if (options.getXmx() != -1) {
                Preconditions.checkArgument(options.getXmx() < options.getSize(),
                        "Working memory (Xmx=" + LlapUtil.humanReadableByteCount(options.getXmx())
                                + ") has to be" + " smaller than the container sizing ("
                                + LlapUtil.humanReadableByteCount(options.getSize()) + ")");
            }
            if (isDirect && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_MAPPED)) {
                // direct and not memory mapped
                Preconditions.checkArgument(options.getXmx() + options.getCache() <= options.getSize(),
                        "Working memory (Xmx=" + LlapUtil.humanReadableByteCount(options.getXmx())
                                + ") + cache size (" + LlapUtil.humanReadableByteCount(options.getCache())
                                + ") has to be smaller than the container sizing ("
                                + LlapUtil.humanReadableByteCount(options.getSize()) + ")");
            }
        }

        if (options.getExecutors() != -1) {
            conf.setLong(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname, options.getExecutors());
            propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname,
                    String.valueOf(options.getExecutors()));
            // TODO: vcpu settings - possibly when DRFA works right
        }

        if (options.getIoThreads() != -1) {
            conf.setLong(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname, options.getIoThreads());
            propsDirectOptions.setProperty(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname,
                    String.valueOf(options.getIoThreads()));
        }

        long cache = -1, xmx = -1;
        if (options.getCache() != -1) {
            cache = options.getCache();
            conf.set(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname, Long.toString(cache));
            propsDirectOptions.setProperty(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname,
                    Long.toString(cache));
        }

        if (options.getXmx() != -1) {
            // Needs more explanation here
            // Xmx is not the max heap value in JDK8. You need to subtract 50% of the survivor fraction
            // from this, to get actual usable memory before it goes into GC
            xmx = options.getXmx();
            long xmxMb = (xmx / (1024L * 1024L));
            conf.setLong(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname, xmxMb);
            propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname,
                    String.valueOf(xmxMb));
        }

        long size = options.getSize();
        if (size == -1) {
            long heapSize = xmx;
            if (!isDirect) {
                heapSize += cache;
            }
            size = Math.min((long) (heapSize * 1.2), heapSize + 1024L * 1024 * 1024);
            if (isDirect) {
                size += cache;
            }
        }
        long containerSize = size / (1024 * 1024);
        final long minAlloc = conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1);
        Preconditions.checkArgument(containerSize >= minAlloc,
                "Container size (" + LlapUtil.humanReadableByteCount(options.getSize()) + ") should be greater"
                        + " than minimum allocation("
                        + LlapUtil.humanReadableByteCount(minAlloc * 1024L * 1024L) + ")");
        conf.setLong(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname, containerSize);
        propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname,
                String.valueOf(containerSize));

        LOG.info("Memory settings: container memory: {} executor memory: {} cache memory: {}",
                LlapUtil.humanReadableByteCount(options.getSize()),
                LlapUtil.humanReadableByteCount(options.getXmx()),
                LlapUtil.humanReadableByteCount(options.getCache()));

        if (options.getLlapQueueName() != null && !options.getLlapQueueName().isEmpty()) {
            conf.set(ConfVars.LLAP_DAEMON_QUEUE_NAME.varname, options.getLlapQueueName());
            propsDirectOptions.setProperty(ConfVars.LLAP_DAEMON_QUEUE_NAME.varname, options.getLlapQueueName());
        }

        final URL logger = conf.getResource(LlapConstants.LOG4j2_PROPERTIES_FILE);

        if (null == logger) {
            throw new Exception("Unable to find required config file: llap-daemon-log4j2.properties");
        }

        Path home = new Path(System.getenv("HIVE_HOME"));
        Path scriptParent = new Path(new Path(home, "scripts"), "llap");
        Path scripts = new Path(scriptParent, "bin");

        if (!lfs.exists(home)) {
            throw new Exception("Unable to find HIVE_HOME:" + home);
        } else if (!lfs.exists(scripts)) {
            LOG.warn("Unable to find llap scripts:" + scripts);
        }

        final Path libDir = new Path(tmpDir, "lib");
        final Path tezDir = new Path(libDir, "tez");
        final Path udfDir = new Path(libDir, "udfs");
        final Path confPath = new Path(tmpDir, "conf");
        if (!lfs.mkdirs(confPath)) {
            LOG.warn("mkdirs for " + confPath + " returned false");
        }
        if (!lfs.mkdirs(tezDir)) {
            LOG.warn("mkdirs for " + tezDir + " returned false");
        }
        if (!lfs.mkdirs(udfDir)) {
            LOG.warn("mkdirs for " + udfDir + " returned false");
        }

        NamedCallable<Void> downloadTez = new NamedCallable<Void>("downloadTez") {
            @Override
            public Void call() throws Exception {
                synchronized (fs) {
                    String tezLibs = conf.get(TezConfiguration.TEZ_LIB_URIS);
                    if (tezLibs == null) {
                        LOG.warn("Missing tez.lib.uris in tez-site.xml");
                    }
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Copying tez libs from " + tezLibs);
                    }
                    lfs.mkdirs(tezDir);
                    fs.copyToLocalFile(new Path(tezLibs), new Path(libDir, "tez.tar.gz"));
                    CompressionUtils.unTar(new Path(libDir, "tez.tar.gz").toString(), tezDir.toString(), true);
                    lfs.delete(new Path(libDir, "tez.tar.gz"), false);
                }
                return null;
            }
        };

        NamedCallable<Void> copyLocalJars = new NamedCallable<Void>("copyLocalJars") {
            @Override
            public Void call() throws Exception {
                Class<?>[] dependencies = new Class<?>[] { LlapDaemonProtocolProtos.class, // llap-common
                        LlapTezUtils.class, // llap-tez
                        LlapInputFormat.class, // llap-server
                        HiveInputFormat.class, // hive-exec
                        SslContextFactory.class, // hive-common (https deps)
                        Rule.class, // Jetty rewrite class
                        RegistryUtils.ServiceRecordMarshal.class, // ZK registry
                        // log4j2
                        com.lmax.disruptor.RingBuffer.class, // disruptor
                        org.apache.logging.log4j.Logger.class, // log4j-api
                        org.apache.logging.log4j.core.Appender.class, // log4j-core
                        org.apache.logging.slf4j.Log4jLogger.class, // log4j-slf4j
                        // log4j-1.2-API needed for NDC
                        org.apache.log4j.NDC.class, };

                for (Class<?> c : dependencies) {
                    Path jarPath = new Path(Utilities.jarFinderGetJar(c));
                    lfs.copyFromLocalFile(jarPath, libDir);
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Copying " + jarPath + " to " + libDir);
                    }
                }
                return null;
            }
        };

        // copy default aux classes (json/hbase)

        NamedCallable<Void> copyAuxJars = new NamedCallable<Void>("copyAuxJars") {
            @Override
            public Void call() throws Exception {
                for (String className : DEFAULT_AUX_CLASSES) {
                    localizeJarForClass(lfs, libDir, className, false);
                }
                Collection<String> codecs = conf.getStringCollection("io.compression.codecs");
                if (codecs != null) {
                    for (String codecClassName : codecs) {
                        localizeJarForClass(lfs, libDir, codecClassName, false);
                    }
                }

                if (options.getIsHBase()) {
                    try {
                        localizeJarForClass(lfs, libDir, HBASE_SERDE_CLASS, true);
                        Job fakeJob = new Job(new JobConf()); // HBase API is convoluted.
                        TableMapReduceUtil.addDependencyJars(fakeJob);
                        Collection<String> hbaseJars = fakeJob.getConfiguration()
                                .getStringCollection("tmpjars");
                        for (String jarPath : hbaseJars) {
                            if (!jarPath.isEmpty()) {
                                lfs.copyFromLocalFile(new Path(jarPath), libDir);
                            }
                        }
                    } catch (Throwable t) {
                        String err = "Failed to add HBase jars. Use --auxhbase=false to avoid localizing them";
                        LOG.error(err);
                        System.err.println(err);
                        throw new RuntimeException(t);
                    }
                }

                HashSet<String> auxJars = new HashSet<>();
                // There are many ways to have AUX jars in Hive... sigh
                if (options.getIsHiveAux()) {
                    // Note: we don't add ADDED jars, RELOADABLE jars, etc. That is by design; there are too many ways
                    // to add jars in Hive, some of which are session/etc. specific. Env + conf + arg should be enough.
                    addAuxJarsToSet(auxJars, conf.getAuxJars());
                    addAuxJarsToSet(auxJars, System.getenv("HIVE_AUX_JARS_PATH"));
                    LOG.info("Adding the following aux jars from the environment and configs: " + auxJars);
                }

                addAuxJarsToSet(auxJars, options.getAuxJars());
                for (String jarPath : auxJars) {
                    lfs.copyFromLocalFile(new Path(jarPath), libDir);
                }
                return null;
            }

            private void addAuxJarsToSet(HashSet<String> auxJarSet, String auxJars) {
                if (auxJars != null && !auxJars.isEmpty()) {
                    // TODO: transitive dependencies warning?
                    String[] jarPaths = auxJars.split(",");
                    for (String jarPath : jarPaths) {
                        if (!jarPath.isEmpty()) {
                            auxJarSet.add(jarPath);
                        }
                    }
                }
            }
        };

        NamedCallable<Void> copyUdfJars = new NamedCallable<Void>("copyUdfJars") {
            @Override
            public Void call() throws Exception {
                // UDFs
                final Set<String> allowedUdfs;

                if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOW_PERMANENT_FNS)) {
                    synchronized (fs) {
                        allowedUdfs = downloadPermanentFunctions(conf, udfDir);
                    }
                } else {
                    allowedUdfs = Collections.emptySet();
                }

                PrintWriter udfStream = new PrintWriter(lfs
                        .create(new Path(confPath, StaticPermanentFunctionChecker.PERMANENT_FUNCTIONS_LIST)));
                for (String udfClass : allowedUdfs) {
                    udfStream.println(udfClass);
                }

                udfStream.close();
                return null;
            }
        };

        String java_home;
        if (options.getJavaPath() == null || options.getJavaPath().isEmpty()) {
            java_home = System.getenv("JAVA_HOME");
            String jre_home = System.getProperty("java.home");
            if (java_home == null) {
                java_home = jre_home;
            } else if (!java_home.equals(jre_home)) {
                LOG.warn("Java versions might not match : JAVA_HOME=[{}],process jre=[{}]", java_home,
                        jre_home);
            }
        } else {
            java_home = options.getJavaPath();
        }
        if (java_home == null || java_home.isEmpty()) {
            throw new RuntimeException(
                    "Could not determine JAVA_HOME from command line parameters, environment or system properties");
        }
        LOG.info("Using [{}] for JAVA_HOME", java_home);

        NamedCallable<Void> copyConfigs = new NamedCallable<Void>("copyConfigs") {
            @Override
            public Void call() throws Exception {
                // Copy over the mandatory configs for the package.
                for (String f : NEEDED_CONFIGS) {
                    copyConfig(lfs, confPath, f);
                }
                for (String f : OPTIONAL_CONFIGS) {
                    try {
                        copyConfig(lfs, confPath, f);
                    } catch (Throwable t) {
                        LOG.info("Error getting an optional config " + f + "; ignoring: " + t.getMessage());
                    }
                }
                createLlapDaemonConfig(lfs, confPath, conf, propsDirectOptions, options.getConfig());
                setUpLogAndMetricConfigs(lfs, logger, confPath);
                return null;
            }
        };

        @SuppressWarnings("unchecked")
        final NamedCallable<Void>[] asyncWork = new NamedCallable[] { downloadTez, copyUdfJars, copyLocalJars,
                copyAuxJars, copyConfigs };
        @SuppressWarnings("unchecked")
        final Future<Void>[] asyncResults = new Future[asyncWork.length];
        for (int i = 0; i < asyncWork.length; i++) {
            asyncResults[i] = asyncRunner.submit(asyncWork[i]);
        }

        // TODO: need to move from Python to Java for the rest of the script.
        JSONObject configs = createConfigJson(containerSize, cache, xmx, java_home);
        writeConfigJson(tmpDir, lfs, configs);

        if (LOG.isDebugEnabled()) {
            LOG.debug("Config generation took " + (System.nanoTime() - t0) + " ns");
        }
        for (int i = 0; i < asyncWork.length; i++) {
            final long t1 = System.nanoTime();
            asyncResults[i].get();
            final long t2 = System.nanoTime();
            if (LOG.isDebugEnabled()) {
                LOG.debug(asyncWork[i].getName() + " waited for " + (t2 - t1) + " ns");
            }
        }
        if (options.isStarting()) {
            String version = System.getenv("HIVE_VERSION");
            if (version == null || version.isEmpty()) {
                version = DateTime.now().toString("ddMMMyyyy");
            }

            String outputDir = options.getOutput();
            Path packageDir = null;
            if (outputDir == null) {
                outputDir = OUTPUT_DIR_PREFIX + version;
                packageDir = new Path(Paths.get(".").toAbsolutePath().toString(), OUTPUT_DIR_PREFIX + version);
            } else {
                packageDir = new Path(outputDir);
            }
            rc = runPackagePy(args, tmpDir, scriptParent, version, outputDir);
            if (rc == 0) {
                LlapSliderUtils.startCluster(conf, options.getName(), "llap-" + version + ".zip", packageDir,
                        HiveConf.getVar(conf, ConfVars.LLAP_DAEMON_QUEUE_NAME));
            }
        } else {
            rc = 0;
        }
    } finally {
        executor.shutdown();
        lfs.close();
        fs.close();
    }

    if (rc == 0) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Exiting successfully");
        }
    } else {
        LOG.info("Exiting with rc = " + rc);
    }
    return rc;
}

From source file:com.appdynamics.monitors.hadoop.communicator.AmbariCommunicator.java

/**
 * Parses a JSON Reader object as service metrics and collect service state plus service
 * component metrics. Prefixes metric name with <code>hierarchy</code>.
 * @see #getComponentMetrics(java.io.Reader, String)
 *
 * @param response/*from   w  w  w . j av a 2 s .  c om*/
 * @param hierarchy
 */
private void getServiceMetrics(Reader response, String hierarchy) {
    try {
        Map<String, Object> json = (Map<String, Object>) parser.parse(response, simpleContainer);
        try {
            Map serviceInfo = (Map) json.get("ServiceInfo");
            String serviceName = (String) serviceInfo.get("service_name");
            String serviceState = (String) serviceInfo.get("state");

            List<String> states = new ArrayList<String>();
            states.add("INIT");
            states.add("INSTALLING");
            states.add("INSTALL_FAILED");
            states.add("INSTALLED");
            states.add("STARTING");
            states.add("STARTED");
            states.add("STOPPING");
            states.add("UNINSTALLING");
            states.add("UNINSTALLED");
            states.add("WIPING_OUT");
            states.add("UPGRADING");
            states.add("MAINTENANCE");
            states.add("UNKNOWN");
            metrics.put(hierarchy + "|" + serviceName + "|state", states.indexOf(serviceState));

            List<Map> components = (ArrayList<Map>) json.get("components");

            CompletionService<Reader> threadPool = new ExecutorCompletionService<Reader>(executor);
            int count = 0;
            for (Map component : components) {
                if (xmlParser.isIncludeServiceComponent(serviceName,
                        (String) ((Map) component.get("ServiceComponentInfo")).get("component_name"))) {
                    threadPool.submit(new Response(component.get("href") + COMPONENT_FIELDS));
                    count++;
                }
            }
            for (; count > 0; count--) {
                getComponentMetrics(threadPool.take().get(), hierarchy + "|" + serviceName);
            }
        } catch (Exception e) {
            logger.error("Failed to parse service metrics: " + stackTraceToString(e));
        }
    } catch (Exception e) {
        logger.error("Failed to get response for service metrics: " + stackTraceToString(e));
    }
}

From source file:org.zenoss.zep.dao.impl.EventSummaryDaoImplIT.java

@Test
public void testSummaryMultiThreadDedup() throws ZepException, InterruptedException, ExecutionException {
    // Attempts to create the same event from multiple threads - verifies we get the appropriate de-duping behavior
    // for the count and that we are holding the lock on the database appropriately.
    int poolSize = 10;
    final CyclicBarrier barrier = new CyclicBarrier(poolSize);
    ExecutorService executorService = Executors.newFixedThreadPool(poolSize);
    ExecutorCompletionService<String> ecs = new ExecutorCompletionService<String>(executorService);
    final Event event = EventTestUtils.createSampleEvent();
    final EventPreCreateContext context = new EventPreCreateContextImpl();
    for (int i = 0; i < poolSize; i++) {
        ecs.submit(new Callable<String>() {
            @Override//from   w w w . ja v a2 s.c  om
            public String call() throws Exception {
                barrier.await();
                return eventSummaryDao.create(event, context);
            }
        });
    }
    String uuid = null;
    for (int i = 0; i < poolSize; i++) {
        String thisUuid = ecs.take().get();
        if (uuid == null) {
            assertNotNull(thisUuid);
            uuid = thisUuid;
        } else {
            assertEquals(uuid, thisUuid);
        }
    }
    // Now look up the event and make sure the count is equal to the number of submitted workers
    assertEquals(poolSize, this.eventSummaryDao.findByUuid(uuid).getCount());
}

From source file:org.apache.solr.handler.component.AlfrescoHttpShardHandlerFactory.java

/**
 * Creates a new completion service for use by a single set of distributed requests.
 *//*from ww  w  . ja  v a 2s. c o  m*/
public CompletionService newCompletionService() {
    return new ExecutorCompletionService<ShardResponse>(commExecutor);
}

From source file:ddf.catalog.cache.solr.impl.CachingFederationStrategy.java

private QueryResponse sourceFederate(List<Source> sources, final QueryRequest queryRequest) {
    if (LOGGER.isDebugEnabled()) {
        for (Source source : sources) {
            if (source != null) {
                LOGGER.debug("source to query: {}", source.getId());
            }//from  w  w  w.  j  av a2  s  .  co m
        }
    }

    Query originalQuery = queryRequest.getQuery();

    int offset = originalQuery.getStartIndex();
    final int pageSize = originalQuery.getPageSize();

    // limit offset to max value
    if (offset > this.maxStartIndex) {
        offset = this.maxStartIndex;
    }

    final QueryResponseImpl queryResponseQueue = new QueryResponseImpl(queryRequest, null);

    Map<Future<SourceResponse>, QueryRequest> futures = new HashMap<>();

    Query modifiedQuery = getModifiedQuery(originalQuery, sources.size(), offset, pageSize);
    QueryRequest modifiedQueryRequest = new QueryRequestImpl(modifiedQuery, queryRequest.isEnterprise(),
            queryRequest.getSourceIds(), queryRequest.getProperties());

    CompletionService<SourceResponse> queryCompletion = new ExecutorCompletionService<>(queryExecutorService);

    // Do NOT call source.isAvailable() when checking sources
    for (final Source source : sources) {
        if (source != null) {
            LOGGER.debug("running query on source: {}", source.getId());

            QueryRequest sourceQueryRequest = new QueryRequestImpl(modifiedQuery, queryRequest.isEnterprise(),
                    Collections.singleton(source.getId()), new HashMap<>(queryRequest.getProperties()));
            try {
                for (PreFederatedQueryPlugin service : preQuery) {
                    try {
                        sourceQueryRequest = service.process(source, sourceQueryRequest);
                    } catch (PluginExecutionException e) {
                        LOGGER.info("Error executing PreFederatedQueryPlugin", e);
                    }
                }
            } catch (StopProcessingException e) {
                LOGGER.info("Plugin stopped processing", e);
            }

            futures.put(queryCompletion.submit(new CallableSourceResponse(source, sourceQueryRequest)),
                    sourceQueryRequest);
        }
    }

    QueryResponseImpl offsetResults = null;
    // If there are offsets and more than one source, we have to get all the
    // results back and then
    // transfer them into a different Queue. That is what the
    // OffsetResultHandler does.
    if (offset > 1 && sources.size() > 1) {
        offsetResults = new QueryResponseImpl(queryRequest, null);
        queryExecutorService
                .submit(new OffsetResultHandler(queryResponseQueue, offsetResults, pageSize, offset));
    }

    queryExecutorService.submit(sortedQueryMonitorFactory.createMonitor(queryCompletion, futures,
            queryResponseQueue, modifiedQueryRequest, postQuery));

    QueryResponse queryResponse;
    if (offset > 1 && sources.size() > 1) {
        queryResponse = offsetResults;
        LOGGER.debug("returning offsetResults");
    } else {
        queryResponse = queryResponseQueue;
        LOGGER.debug("returning returnResults: {}", queryResponse);
    }

    LOGGER.debug("returning Query Results: {}", queryResponse);
    return queryResponse;
}

From source file:com.redhat.red.build.koji.KojiClient.java

public void setup() {
    uploadService = new ExecutorCompletionService<KojiUploaderResult>(executorService);
    objectMapper = new KojiObjectMapper();

    Logger logger = LoggerFactory.getLogger(getClass());
    logger.debug("SETUP: Starting KojiClient for: " + config.getKojiURL());
    try {//from   w  w  w .j av a 2  s . co  m
        xmlrpcClient = new HC4SyncObjectClient(httpFactory, bindery, config.getKojiSiteConfig());
    } catch (IOException e) {
        logger.error("Cannot construct koji HTTP site-config: " + e.getMessage(), e);
        xmlrpcClient.close();
        xmlrpcClient = null;
    }

    if (xmlrpcClient != null) {
        try {
            ApiVersionResponse response = xmlrpcClient.call(new ApiVersionRequest(), ApiVersionResponse.class,
                    NO_OP_URL_BUILDER, STANDARD_REQUEST_MODIFIER);

            if (1 != response.getApiVersion()) {
                logger.error("Cannot connect to koji at: " + config.getKojiURL() + ". API Version reported is '"
                        + response.getApiVersion() + "' but this client only supports version 1.");
                xmlrpcClient.close();
                xmlrpcClient = null;
            }
        } catch (XmlRpcException e) {
            logger.error("Cannot retrieve koji API version from: " + config.getKojiURL() + ". (Reason: "
                    + e.getMessage() + ")", e);
            xmlrpcClient.close();
            xmlrpcClient = null;
        }
    }
}