Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:org.apache.hadoop.hbase.master.TestDistributedLogSplitting.java

@Test(timeout = 30000)
public void testDelayedDeleteOnFailure() throws Exception {
    LOG.info("testDelayedDeleteOnFailure");
    startCluster(1);//from  ww  w  . j av a  2s . c  o m
    final SplitLogManager slm = master.getMasterFileSystem().splitLogManager;
    final FileSystem fs = master.getMasterFileSystem().getFileSystem();
    final Path logDir = new Path(FSUtils.getRootDir(conf), "x");
    fs.mkdirs(logDir);
    ExecutorService executor = null;
    try {
        final Path corruptedLogFile = new Path(logDir, "x");
        FSDataOutputStream out;
        out = fs.create(corruptedLogFile);
        out.write(0);
        out.write(Bytes.toBytes("corrupted bytes"));
        out.close();
        slm.ignoreZKDeleteForTesting = true;
        executor = Executors.newSingleThreadExecutor();
        Runnable runnable = new Runnable() {
            @Override
            public void run() {
                try {
                    // since the logDir is a fake, corrupted one, so the split log worker
                    // will finish it quickly with error, and this call will fail and throw
                    // an IOException.
                    slm.splitLogDistributed(logDir);
                } catch (IOException ioe) {
                    try {
                        assertTrue(fs.exists(corruptedLogFile));
                        // this call will block waiting for the task to be removed from the
                        // tasks map which is not going to happen since ignoreZKDeleteForTesting
                        // is set to true, until it is interrupted.
                        slm.splitLogDistributed(logDir);
                    } catch (IOException e) {
                        assertTrue(Thread.currentThread().isInterrupted());
                        return;
                    }
                    fail("did not get the expected IOException from the 2nd call");
                }
                fail("did not get the expected IOException from the 1st call");
            }
        };
        Future<?> result = executor.submit(runnable);
        try {
            result.get(2000, TimeUnit.MILLISECONDS);
        } catch (TimeoutException te) {
            // it is ok, expected.
        }
        waitForCounter(tot_mgr_wait_for_zk_delete, 0, 1, 10000);
        executor.shutdownNow();
        executor = null;

        // make sure the runnable is finished with no exception thrown.
        result.get();
    } finally {
        if (executor != null) {
            // interrupt the thread in case the test fails in the middle.
            // it has no effect if the thread is already terminated.
            executor.shutdownNow();
        }
        fs.delete(logDir, true);
    }
}

From source file:com.netflix.curator.framework.recipes.queue.TestBoundedDistributedQueue.java

@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Test//from  www  .j a  v a2 s  .c o  m
public void testMulti() throws Exception {
    final String PATH = "/queue";
    final int CLIENT_QTY = 4;
    final int MAX_ITEMS = 10;
    final int ADD_ITEMS = MAX_ITEMS * 100;
    final int SLOP_FACTOR = 2;

    final QueueConsumer<String> consumer = new QueueConsumer<String>() {
        @Override
        public void consumeMessage(String message) throws Exception {
            Thread.sleep(10);
        }

        @Override
        public void stateChanged(CuratorFramework client, ConnectionState newState) {
        }
    };

    final Timing timing = new Timing();
    final ExecutorService executor = Executors.newCachedThreadPool();
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executor);

    final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
            timing.session(), timing.connection(), new RetryOneTime(1));
    try {
        client.start();
        client.create().forPath(PATH);

        final CountDownLatch isWaitingLatch = new CountDownLatch(1);
        final AtomicBoolean isDone = new AtomicBoolean(false);
        final List<Integer> counts = new CopyOnWriteArrayList<Integer>();
        final Object lock = new Object();
        executor.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                Watcher watcher = new Watcher() {
                    @Override
                    public void process(WatchedEvent event) {
                        synchronized (lock) {
                            lock.notifyAll();
                        }
                    }
                };

                while (!Thread.currentThread().isInterrupted() && client.isStarted() && !isDone.get()) {
                    synchronized (lock) {
                        int size = client.getChildren().usingWatcher(watcher).forPath(PATH).size();
                        counts.add(size);
                        isWaitingLatch.countDown();
                        lock.wait();
                    }
                }
                return null;
            }
        });
        isWaitingLatch.await();

        for (int i = 0; i < CLIENT_QTY; ++i) {
            final int index = i;
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    CuratorFramework client = null;
                    DistributedQueue<String> queue = null;

                    try {
                        client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
                                timing.connection(), new RetryOneTime(1));
                        client.start();
                        queue = QueueBuilder.builder(client, consumer, serializer, PATH).executor(executor)
                                .maxItems(MAX_ITEMS).putInBackground(false).lockPath("/locks").buildQueue();
                        queue.start();

                        for (int i = 0; i < ADD_ITEMS; ++i) {
                            queue.put("" + index + "-" + i);
                        }
                    } finally {
                        IOUtils.closeQuietly(queue);
                        IOUtils.closeQuietly(client);
                    }
                    return null;
                }
            });
        }

        for (int i = 0; i < CLIENT_QTY; ++i) {
            completionService.take().get();
        }

        isDone.set(true);
        synchronized (lock) {
            lock.notifyAll();
        }

        for (int count : counts) {
            Assert.assertTrue(counts.toString(), count <= (MAX_ITEMS * SLOP_FACTOR));
        }
    } finally {
        executor.shutdownNow();
        IOUtils.closeQuietly(client);
    }
}

From source file:org.apache.accumulo.test.merkle.cli.GenerateHashes.java

public void run(final Connector conn, final String inputTableName, final String outputTableName,
        final String digestName, int numThreads, final boolean iteratorPushdown, final Collection<Range> ranges)
        throws TableNotFoundException, AccumuloSecurityException, AccumuloException, NoSuchAlgorithmException {
    if (!conn.tableOperations().exists(outputTableName)) {
        throw new IllegalArgumentException(outputTableName + " does not exist, please create it");
    }//from w  w  w  .  j a v  a  2s  . c  o m

    ExecutorService svc = Executors.newFixedThreadPool(numThreads);
    final BatchWriter bw = conn.createBatchWriter(outputTableName, new BatchWriterConfig());

    try {
        for (final Range range : ranges) {
            final MessageDigest digest = getDigestAlgorithm(digestName);

            svc.execute(new Runnable() {

                @Override
                public void run() {
                    Scanner s;
                    try {
                        s = conn.createScanner(inputTableName, Authorizations.EMPTY);
                    } catch (Exception e) {
                        log.error("Could not get scanner for " + inputTableName, e);
                        throw new RuntimeException(e);
                    }

                    s.setRange(range);

                    Value v = null;
                    Mutation m = null;
                    if (iteratorPushdown) {
                        IteratorSetting cfg = new IteratorSetting(50, DigestIterator.class);
                        cfg.addOption(DigestIterator.HASH_NAME_KEY, digestName);
                        s.addScanIterator(cfg);

                        // The scanner should only ever return us one Key-Value, otherwise this approach won't work
                        Entry<Key, Value> entry = Iterables.getOnlyElement(s);

                        v = entry.getValue();
                        m = RangeSerialization.toMutation(range, v);
                    } else {
                        ByteArrayOutputStream baos = new ByteArrayOutputStream();
                        for (Entry<Key, Value> entry : s) {
                            DataOutputStream out = new DataOutputStream(baos);
                            try {
                                entry.getKey().write(out);
                                entry.getValue().write(out);
                            } catch (Exception e) {
                                log.error("Error writing {}", entry, e);
                                throw new RuntimeException(e);
                            }

                            digest.update(baos.toByteArray());
                            baos.reset();
                        }

                        v = new Value(digest.digest());
                        m = RangeSerialization.toMutation(range, v);
                    }

                    // Log some progress
                    log.info("{} computed digest for {} of {}", Thread.currentThread().getName(), range,
                            Hex.encodeHexString(v.get()));

                    try {
                        bw.addMutation(m);
                    } catch (MutationsRejectedException e) {
                        log.error("Could not write mutation", e);
                        throw new RuntimeException(e);
                    }
                }
            });
        }

        svc.shutdown();

        // Wait indefinitely for the scans to complete
        while (!svc.isTerminated()) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                log.error("Interrupted while waiting for executor service to gracefully complete. Exiting now");
                svc.shutdownNow();
                return;
            }
        }
    } finally {
        // We can only safely close this when we're exiting or we've completely all tasks
        bw.close();
    }
}

From source file:org.apache.accumulo.test.replication.merkle.cli.GenerateHashes.java

public void run(final Connector conn, final String inputTableName, final String outputTableName,
        final String digestName, int numThreads, final boolean iteratorPushdown, final Collection<Range> ranges)
        throws TableNotFoundException, AccumuloSecurityException, AccumuloException, NoSuchAlgorithmException {
    if (!conn.tableOperations().exists(outputTableName)) {
        throw new IllegalArgumentException(outputTableName + " does not exist, please create it");
    }/*www  .j  a v  a 2 s . co  m*/

    // Get some parallelism
    ExecutorService svc = Executors.newFixedThreadPool(numThreads);
    final BatchWriter bw = conn.createBatchWriter(outputTableName, new BatchWriterConfig());

    try {
        for (final Range range : ranges) {
            final MessageDigest digest = getDigestAlgorithm(digestName);

            svc.execute(new Runnable() {

                @Override
                public void run() {
                    Scanner s;
                    try {
                        s = conn.createScanner(inputTableName, Authorizations.EMPTY);
                    } catch (Exception e) {
                        log.error("Could not get scanner for " + inputTableName, e);
                        throw new RuntimeException(e);
                    }

                    s.setRange(range);

                    Value v = null;
                    Mutation m = null;
                    if (iteratorPushdown) {
                        IteratorSetting cfg = new IteratorSetting(50, DigestIterator.class);
                        cfg.addOption(DigestIterator.HASH_NAME_KEY, digestName);
                        s.addScanIterator(cfg);

                        // The scanner should only ever return us one Key-Value, otherwise this approach won't work
                        Entry<Key, Value> entry = Iterables.getOnlyElement(s);

                        v = entry.getValue();
                        m = RangeSerialization.toMutation(range, v);
                    } else {
                        ByteArrayOutputStream baos = new ByteArrayOutputStream();
                        for (Entry<Key, Value> entry : s) {
                            DataOutputStream out = new DataOutputStream(baos);
                            try {
                                entry.getKey().write(out);
                                entry.getValue().write(out);
                            } catch (Exception e) {
                                log.error("Error writing {}", entry, e);
                                throw new RuntimeException(e);
                            }

                            digest.update(baos.toByteArray());
                            baos.reset();
                        }

                        v = new Value(digest.digest());
                        m = RangeSerialization.toMutation(range, v);
                    }

                    // Log some progress
                    log.info("{} computed digest for {} of {}", Thread.currentThread().getName(), range,
                            Hex.encodeHexString(v.get()));

                    try {
                        bw.addMutation(m);
                    } catch (MutationsRejectedException e) {
                        log.error("Could not write mutation", e);
                        throw new RuntimeException(e);
                    }
                }
            });
        }

        svc.shutdown();

        // Wait indefinitely for the scans to complete
        while (!svc.isTerminated()) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                log.error("Interrupted while waiting for executor service to gracefully complete. Exiting now");
                svc.shutdownNow();
                return;
            }
        }
    } finally {
        // We can only safely close this when we're exiting or we've completely all tasks
        bw.close();
    }
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java

@Test
public void testCluster() throws Exception {
    final int QTY = 20;
    final int OPERATION_TIME_MS = 1000;
    final String PATH = "/foo/bar/lock";

    ExecutorService executorService = Executors.newFixedThreadPool(QTY);
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService);
    final Timing timing = new Timing();
    TestingCluster cluster = new TestingCluster(3);
    List<SemaphoreClient> semaphoreClients = Lists.newArrayList();
    try {/*from  w  ww. j  av  a 2  s  .c o  m*/
        cluster.start();

        final AtomicInteger opCount = new AtomicInteger(0);
        for (int i = 0; i < QTY; ++i) {
            SemaphoreClient semaphoreClient = new SemaphoreClient(cluster.getConnectString(), PATH,
                    new Callable<Void>() {
                        @Override
                        public Void call() throws Exception {
                            opCount.incrementAndGet();
                            Thread.sleep(OPERATION_TIME_MS);
                            return null;
                        }
                    });
            completionService.submit(semaphoreClient);
            semaphoreClients.add(semaphoreClient);
        }

        timing.forWaiting().sleepABit();

        Assert.assertNotNull(SemaphoreClient.getActiveClient());

        final CountDownLatch latch = new CountDownLatch(1);
        CuratorFramework client = CuratorFrameworkFactory.newClient(cluster.getConnectString(),
                timing.session(), timing.connection(), new ExponentialBackoffRetry(100, 3));
        ConnectionStateListener listener = new ConnectionStateListener() {
            @Override
            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                if (newState == ConnectionState.LOST) {
                    latch.countDown();
                }
            }
        };
        client.getConnectionStateListenable().addListener(listener);
        client.start();
        try {
            client.getZookeeperClient().blockUntilConnectedOrTimedOut();

            cluster.stop();

            latch.await();
        } finally {
            IOUtils.closeQuietly(client);
        }

        long startTicks = System.currentTimeMillis();
        for (;;) {
            int thisOpCount = opCount.get();
            Thread.sleep(2 * OPERATION_TIME_MS);
            if (thisOpCount == opCount.get()) {
                break; // checking that the op count isn't increasing
            }
            Assert.assertTrue((System.currentTimeMillis() - startTicks) < timing.forWaiting().milliseconds());
        }

        int thisOpCount = opCount.get();

        Iterator<InstanceSpec> iterator = cluster.getInstances().iterator();
        cluster = new TestingCluster(iterator.next(), iterator.next());
        cluster.start();
        timing.forWaiting().sleepABit();

        startTicks = System.currentTimeMillis();
        for (;;) {
            Thread.sleep(2 * OPERATION_TIME_MS);
            if (opCount.get() > thisOpCount) {
                break; // checking that semaphore has started working again
            }
            Assert.assertTrue((System.currentTimeMillis() - startTicks) < timing.forWaiting().milliseconds());
        }
    } finally {
        for (SemaphoreClient semaphoreClient : semaphoreClients) {
            IOUtils.closeQuietly(semaphoreClient);
        }
        IOUtils.closeQuietly(cluster);
        executorService.shutdownNow();
    }
}

From source file:org.apache.hadoop.hive.ql.metadata.Hive.java

private static void copyFiles(final HiveConf conf, final FileSystem destFs, FileStatus[] srcs,
        final FileSystem srcFs, final Path destf, final boolean isSrcLocal, final List<Path> newFiles)
        throws HiveException {

    final HdfsUtils.HadoopFileStatus fullDestStatus;
    try {/*from  w  w  w.  j  av  a2  s. c  om*/
        fullDestStatus = new HdfsUtils.HadoopFileStatus(conf, destFs, destf);
    } catch (IOException e1) {
        throw new HiveException(e1);
    }

    if (!fullDestStatus.getFileStatus().isDirectory()) {
        throw new HiveException(destf + " is not a directory.");
    }
    final List<Future<ObjectPair<Path, Path>>> futures = new LinkedList<>();
    final ExecutorService pool = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0
            ? Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25),
                    new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Move-Thread-%d").build())
            : null;
    for (FileStatus src : srcs) {
        FileStatus[] files;
        if (src.isDirectory()) {
            try {
                files = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
            } catch (IOException e) {
                pool.shutdownNow();
                throw new HiveException(e);
            }
        } else {
            files = new FileStatus[] { src };
        }

        final SessionState parentSession = SessionState.get();
        for (final FileStatus srcFile : files) {
            final Path srcP = srcFile.getPath();
            final boolean needToCopy = needToCopy(srcP, destf, srcFs, destFs);

            final boolean isRenameAllowed = !needToCopy && !isSrcLocal;

            final String msg = "Unable to move source " + srcP + " to destination " + destf;

            // If we do a rename for a non-local file, we will be transfering the original
            // file permissions from source to the destination. Else, in case of mvFile() where we
            // copy from source to destination, we will inherit the destination's parent group ownership.
            if (null == pool) {
                try {
                    Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isRenameAllowed);

                    if (null != newFiles) {
                        newFiles.add(destPath);
                    }
                } catch (Exception e) {
                    throw getHiveException(e, msg, "Failed to move: {}");
                }
            } else {
                futures.add(pool.submit(new Callable<ObjectPair<Path, Path>>() {
                    @Override
                    public ObjectPair<Path, Path> call() throws HiveException {
                        SessionState.setCurrentSessionState(parentSession);

                        try {
                            Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal,
                                    isRenameAllowed);

                            if (null != newFiles) {
                                newFiles.add(destPath);
                            }
                            return ObjectPair.create(srcP, destPath);
                        } catch (Exception e) {
                            throw getHiveException(e, msg);
                        }
                    }
                }));
            }
        }
    }
    if (null != pool) {
        pool.shutdown();
        for (Future<ObjectPair<Path, Path>> future : futures) {
            try {
                ObjectPair<Path, Path> pair = future.get();
                LOG.debug("Moved src: {}", pair.getFirst().toString(), ", to dest: {}",
                        pair.getSecond().toString());
            } catch (Exception e) {
                throw handlePoolException(pool, e);
            }
        }
    }
}

From source file:org.apache.hadoop.hive.ql.metadata.Hive.java

static private HiveException handlePoolException(ExecutorService pool, Exception e) {
    HiveException he = null;/*from  ww w. j  ava 2s  .co  m*/

    if (e instanceof HiveException) {
        he = (HiveException) e;
        if (he.getCanonicalErrorMsg() != ErrorMsg.GENERIC_ERROR) {
            if (he.getCanonicalErrorMsg() == ErrorMsg.UNRESOLVED_RT_EXCEPTION) {
                LOG.error(String.format("Failed to move: {}", he.getMessage()));
            } else {
                LOG.info(String.format("Failed to move: {}", he.getRemoteErrorMsg()));
            }
        }
    } else {
        LOG.error(String.format("Failed to move: {}", e.getMessage()));
        he = new HiveException(e.getCause());
    }
    pool.shutdownNow();
    return he;
}

From source file:com.blackducksoftware.integration.hub.detect.tool.signaturescanner.BlackDuckSignatureScannerTool.java

public SignatureScannerToolResult runScanTool(NameVersion projectNameVersion, Optional<File> dockerTar)
        throws DetectUserFriendlyException {
    DetectConfiguration detectConfiguration = detectContext.getBean(DetectConfiguration.class);
    DetectConfigurationFactory detectConfigurationFactory = detectContext
            .getBean(DetectConfigurationFactory.class);
    ConnectionManager connectionManager = detectContext.getBean(ConnectionManager.class);
    ConnectivityManager connectivityManager = detectContext.getBean(ConnectivityManager.class);
    DirectoryManager directoryManager = detectContext.getBean(DirectoryManager.class);

    Optional<BlackDuckServerConfig> hubServerConfig = Optional.empty();
    if (connectivityManager.isDetectOnline() && connectivityManager.getBlackDuckServerConfig().isPresent()) {
        hubServerConfig = connectivityManager.getBlackDuckServerConfig();
    }/* w w w  .  j a va 2 s.c  om*/

    logger.info("Will run the signature scanner tool.");
    final String offlineLocalScannerInstallPath = detectConfiguration.getProperty(
            DetectProperty.DETECT_BLACKDUCK_SIGNATURE_SCANNER_OFFLINE_LOCAL_PATH, PropertyAuthority.None);
    final String onlineLocalScannerInstallPath = detectConfiguration
            .getProperty(DetectProperty.DETECT_BLACKDUCK_SIGNATURE_SCANNER_LOCAL_PATH, PropertyAuthority.None);

    String localScannerInstallPath = "";
    if (StringUtils.isNotBlank(offlineLocalScannerInstallPath)) {
        localScannerInstallPath = offlineLocalScannerInstallPath;
        logger.debug("Determined offline local scanner path: " + localScannerInstallPath);
    } else if (StringUtils.isNotBlank(onlineLocalScannerInstallPath)) {
        localScannerInstallPath = onlineLocalScannerInstallPath;
        logger.debug("Determined online local scanner path: " + localScannerInstallPath);
    }

    final String userProvidedScannerInstallUrl = detectConfiguration
            .getProperty(DetectProperty.DETECT_BLACKDUCK_SIGNATURE_SCANNER_HOST_URL, PropertyAuthority.None);

    BlackDuckSignatureScannerOptions blackDuckSignatureScannerOptions = detectConfigurationFactory
            .createBlackDuckSignatureScannerOptions();
    final ExecutorService executorService = Executors
            .newFixedThreadPool(blackDuckSignatureScannerOptions.getParrallelProcessors());
    IntEnvironmentVariables intEnvironmentVariables = new IntEnvironmentVariables();

    ScanBatchRunnerFactory scanBatchRunnerFactory = new ScanBatchRunnerFactory(intEnvironmentVariables,
            executorService);
    ScanBatchRunner scanBatchRunner;
    File installDirectory = directoryManager.getPermanentDirectory();
    if (hubServerConfig.isPresent() && StringUtils.isBlank(userProvidedScannerInstallUrl)
            && StringUtils.isBlank(localScannerInstallPath)) {
        logger.debug(
                "Signature scanner will use the hub server to download/update the scanner - this is the most likely situation.");
        scanBatchRunner = scanBatchRunnerFactory.withHubInstall(hubServerConfig.get());
    } else {
        if (StringUtils.isNotBlank(userProvidedScannerInstallUrl)) {
            logger.debug("Signature scanner will use the provided url to download/update the scanner.");
            scanBatchRunner = scanBatchRunnerFactory.withUserProvidedUrl(userProvidedScannerInstallUrl,
                    connectionManager);
        } else {
            logger.debug(
                    "Signature scanner either given an existing path for the scanner or is offline - either way, we won't attempt to manage the install.");
            if (StringUtils.isNotBlank(localScannerInstallPath)) {
                logger.debug("Using provided path: " + localScannerInstallPath);
                installDirectory = new File(localScannerInstallPath);
            } else {
                logger.debug("Using default scanner path.");
            }
            scanBatchRunner = scanBatchRunnerFactory.withoutInstall(installDirectory);
        }
    }
    logger.debug("Determined install directory: " + installDirectory.getAbsolutePath());

    try {
        if (hubServerConfig.isPresent()) {
            logger.debug("Signature scan is online.");
            CodeLocationCreationService codeLocationCreationService = connectivityManager
                    .getBlackDuckServicesFactory().get().createCodeLocationCreationService();
            OnlineBlackDuckSignatureScanner blackDuckSignatureScanner = detectContext.getBean(
                    OnlineBlackDuckSignatureScanner.class, signatureScannerOptions, scanBatchRunner,
                    codeLocationCreationService, hubServerConfig.get());
            CodeLocationCreationData<ScanBatchOutput> codeLocationCreationData = blackDuckSignatureScanner
                    .performOnlineScan(projectNameVersion, installDirectory, dockerTar.orElse(null));
            return SignatureScannerToolResult.createOnlineResult(codeLocationCreationData);
        } else {
            logger.debug("Signature scan is offline.");
            OfflineBlackDuckSignatureScanner blackDuckSignatureScanner = detectContext
                    .getBean(OfflineBlackDuckSignatureScanner.class, signatureScannerOptions, scanBatchRunner);
            ScanBatchOutput scanBatchOutput = blackDuckSignatureScanner.performScanActions(projectNameVersion,
                    installDirectory, dockerTar.orElse(null));
            return SignatureScannerToolResult.createOfflineResult(scanBatchOutput);
        }
    } catch (IOException | InterruptedException | IntegrationException e) {
        logger.info("Signature scan failed!");
        logger.debug("Signature scan error!", e);
        return SignatureScannerToolResult.createFailureResult();
    } finally {
        executorService.shutdownNow();
    }
}

From source file:org.apache.hadoop.hive.ql.exec.Utilities.java

/**
 * Calculate the total size of input files.
 *
 * @param ctx//  w w w.j a  v  a  2  s.  c om
 *          the hadoop job context
 * @param work
 *          map reduce job plan
 * @param filter
 *          filter to apply to the input paths before calculating size
 * @return the summary of all the input paths.
 * @throws IOException
 */
public static ContentSummary getInputSummary(final Context ctx, MapWork work, PathFilter filter)
        throws IOException {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.INPUT_SUMMARY);

    long[] summary = { 0, 0, 0 };

    final Set<Path> pathNeedProcess = new HashSet<>();

    // Since multiple threads could call this method concurrently, locking
    // this method will avoid number of threads out of control.
    synchronized (INPUT_SUMMARY_LOCK) {
        // For each input path, calculate the total size.
        for (Path path : work.getPathToAliases().keySet()) {
            Path p = path;

            if (filter != null && !filter.accept(p)) {
                continue;
            }

            ContentSummary cs = ctx.getCS(path);
            if (cs == null) {
                if (path == null) {
                    continue;
                }
                pathNeedProcess.add(path);
            } else {
                summary[0] += cs.getLength();
                summary[1] += cs.getFileCount();
                summary[2] += cs.getDirectoryCount();
            }
        }

        // Process the case when name node call is needed
        final Map<String, ContentSummary> resultMap = new ConcurrentHashMap<String, ContentSummary>();
        ArrayList<Future<?>> results = new ArrayList<Future<?>>();
        final ExecutorService executor;

        int numExecutors = getMaxExecutorsForInputListing(ctx.getConf(), pathNeedProcess.size());
        if (numExecutors > 1) {
            LOG.info("Using " + numExecutors + " threads for getContentSummary");
            executor = Executors.newFixedThreadPool(numExecutors,
                    new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Get-Input-Summary-%d").build());
        } else {
            executor = null;
        }

        HiveInterruptCallback interrup = HiveInterruptUtils.add(new HiveInterruptCallback() {
            @Override
            public void interrupt() {
                for (Path path : pathNeedProcess) {
                    try {
                        path.getFileSystem(ctx.getConf()).close();
                    } catch (IOException ignore) {
                        LOG.debug("Failed to close filesystem", ignore);
                    }
                }
                if (executor != null) {
                    executor.shutdownNow();
                }
            }
        });
        try {
            Configuration conf = ctx.getConf();
            JobConf jobConf = new JobConf(conf);
            for (Path path : pathNeedProcess) {
                final Path p = path;
                final String pathStr = path.toString();
                // All threads share the same Configuration and JobConf based on the
                // assumption that they are thread safe if only read operations are
                // executed. It is not stated in Hadoop's javadoc, the sourcce codes
                // clearly showed that they made efforts for it and we believe it is
                // thread safe. Will revisit this piece of codes if we find the assumption
                // is not correct.
                final Configuration myConf = conf;
                final JobConf myJobConf = jobConf;
                final Map<String, Operator<?>> aliasToWork = work.getAliasToWork();
                final Map<Path, ArrayList<String>> pathToAlias = work.getPathToAliases();
                final PartitionDesc partDesc = work.getPathToPartitionInfo().get(p);
                Runnable r = new Runnable() {
                    @Override
                    public void run() {
                        try {
                            Class<? extends InputFormat> inputFormatCls = partDesc.getInputFileFormatClass();
                            InputFormat inputFormatObj = HiveInputFormat.getInputFormatFromCache(inputFormatCls,
                                    myJobConf);
                            if (inputFormatObj instanceof ContentSummaryInputFormat) {
                                ContentSummaryInputFormat cs = (ContentSummaryInputFormat) inputFormatObj;
                                resultMap.put(pathStr, cs.getContentSummary(p, myJobConf));
                                return;
                            }

                            String metaTableStorage = null;
                            if (partDesc.getTableDesc() != null
                                    && partDesc.getTableDesc().getProperties() != null) {
                                metaTableStorage = partDesc.getTableDesc().getProperties()
                                        .getProperty(hive_metastoreConstants.META_TABLE_STORAGE, null);
                            }
                            if (partDesc.getProperties() != null) {
                                metaTableStorage = partDesc.getProperties().getProperty(
                                        hive_metastoreConstants.META_TABLE_STORAGE, metaTableStorage);
                            }

                            HiveStorageHandler handler = HiveUtils.getStorageHandler(myConf, metaTableStorage);
                            if (handler instanceof InputEstimator) {
                                long total = 0;
                                TableDesc tableDesc = partDesc.getTableDesc();
                                InputEstimator estimator = (InputEstimator) handler;
                                for (String alias : HiveFileFormatUtils.doGetAliasesFromPath(pathToAlias, p)) {
                                    JobConf jobConf = new JobConf(myJobConf);
                                    TableScanOperator scanOp = (TableScanOperator) aliasToWork.get(alias);
                                    Utilities.setColumnNameList(jobConf, scanOp, true);
                                    Utilities.setColumnTypeList(jobConf, scanOp, true);
                                    PlanUtils.configureInputJobPropertiesForStorageHandler(tableDesc);
                                    Utilities.copyTableJobPropertiesToConf(tableDesc, jobConf);
                                    total += estimator.estimate(jobConf, scanOp, -1).getTotalLength();
                                }
                                resultMap.put(pathStr, new ContentSummary(total, -1, -1));
                            } else {
                                // todo: should nullify summary for non-native tables,
                                // not to be selected as a mapjoin target
                                FileSystem fs = p.getFileSystem(myConf);
                                resultMap.put(pathStr, fs.getContentSummary(p));
                            }
                        } catch (Exception e) {
                            // We safely ignore this exception for summary data.
                            // We don't update the cache to protect it from polluting other
                            // usages. The worst case is that IOException will always be
                            // retried for another getInputSummary(), which is fine as
                            // IOException is not considered as a common case.
                            LOG.info("Cannot get size of " + pathStr + ". Safely ignored.");
                        }
                    }
                };

                if (executor == null) {
                    r.run();
                } else {
                    Future<?> result = executor.submit(r);
                    results.add(result);
                }
            }

            if (executor != null) {
                for (Future<?> result : results) {
                    boolean executorDone = false;
                    do {
                        try {
                            result.get();
                            executorDone = true;
                        } catch (InterruptedException e) {
                            LOG.info("Interrupted when waiting threads: ", e);
                            Thread.currentThread().interrupt();
                            break;
                        } catch (ExecutionException e) {
                            throw new IOException(e);
                        }
                    } while (!executorDone);
                }
                executor.shutdown();
            }
            HiveInterruptUtils.checkInterrupted();
            for (Map.Entry<String, ContentSummary> entry : resultMap.entrySet()) {
                ContentSummary cs = entry.getValue();

                summary[0] += cs.getLength();
                summary[1] += cs.getFileCount();
                summary[2] += cs.getDirectoryCount();

                ctx.addCS(entry.getKey(), cs);
                LOG.info("Cache Content Summary for " + entry.getKey() + " length: " + cs.getLength()
                        + " file count: " + cs.getFileCount() + " directory count: " + cs.getDirectoryCount());
            }

            perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.INPUT_SUMMARY);
            return new ContentSummary(summary[0], summary[1], summary[2]);
        } finally {
            HiveInterruptUtils.remove(interrup);
        }
    }
}

From source file:org.dllearner.algorithms.qtl.experiments.SPARQLLearningProblemsGenerator.java

public void generateBenchmark(int nrOfSPARQLQueries, final int minDepth, final int maxDepth,
        int minNrOfExamples) {
    Collection<OWLClass> classes = getClasses();
    ArrayList<OWLClass> classesList = new ArrayList<>(classes);
    Collections.shuffle(classesList, new Random(123));
    classes = classesList;//ww  w  .  j a va  2 s.c o m
    //      classes = Sets.newHashSet(new OWLClassImpl(IRI.create("http://semantics.crl.ibm.com/univ-bench-dl.owl#TennisFan")));

    //      ExecutorService tp = Executors.newFixedThreadPool(threadCount);
    List<Path> allPaths = new ArrayList<>();

    //      ThreadPoolExecutor tp = new CustomFutureReturningExecutor(
    //            threadCount, threadCount,
    //                5000L, TimeUnit.MILLISECONDS,
    //                new ArrayBlockingQueue<Runnable>(classes.size(), true));

    ExecutorService tp = Executors.newFixedThreadPool(threadCount);

    CompletionService<List<Path>> ecs = new ExecutorCompletionService<List<Path>>(tp);

    JDKRandomGenerator rndGen = new JDKRandomGenerator();
    rndGen.setSeed(123);

    int nrOfQueriesPerDepth = nrOfSPARQLQueries / (maxDepth - minDepth + 1);

    // for each depth <= maxDepth
    for (int depth = minDepth; depth <= maxDepth; depth++) {
        System.out.println("Generating " + nrOfQueriesPerDepth + " queries for depth " + depth);

        Iterator<OWLClass> iterator = classes.iterator();

        // generate paths of depths <= maxDepth
        List<Path> pathsForDepth = new ArrayList<>();

        while (pathsForDepth.size() < nrOfQueriesPerDepth && iterator.hasNext()) {

            Collection<Future<List<Path>>> futures = new ArrayList<>();

            try {
                int cnt = 0;
                while (iterator.hasNext() && (pathsForDepth.size() + ++cnt < nrOfQueriesPerDepth)) {
                    // pick next class
                    OWLClass cls = iterator.next();

                    //            int depth = rndGen.nextInt(maxDepth) + 1;

                    Future<List<Path>> future = ecs
                            .submit(new PathDetectionTask(dataDir, ks, schema, cls, depth, minNrOfExamples));
                    futures.add(future);
                }

                int n = futures.size();
                try {
                    for (int i = 0; i < n; ++i) {
                        Future<List<Path>> f = ecs.take();
                        if (!f.isCancelled()) {
                            List<Path> paths = f.get();

                            if (paths != null) {
                                for (int j = 0; j < Math.min(paths.size(), maxPathsPerClassAndDepth); j++) {
                                    pathsForDepth.add(paths.get(j));
                                }
                            }
                            //                        System.out.println("#Paths: " + paths.size());
                            //                        paths.forEach(p -> System.out.println(p));

                            if (pathsForDepth.size() >= nrOfQueriesPerDepth) {
                                break;
                            }
                        }
                    }
                } catch (InterruptedException | ExecutionException e) {
                    e.printStackTrace();
                }
            } finally {
                for (Future<List<Path>> f : futures) {
                    f.cancel(true);
                }
            }
        }

        allPaths.addAll(pathsForDepth);
    }

    //      for (Future<Path> future : futures) {
    //            try {
    //               Path path = future.get();
    //               if(path != null) {
    //                  paths.add(path);
    //               }
    //               if(paths.size() == nrOfSPARQLQueries) {
    //                  System.err.println("Benchmark generation finished. Stopping all running threads.");
    //                  tp.shutdownNow();
    //               }
    //         } catch (InterruptedException | ExecutionException e) {
    //            e.printStackTrace();
    //         }
    //            if(paths.size() == nrOfSPARQLQueries) {
    //               System.err.println("Benchmark generation finished. Stopping all running threads.");
    //               tp.shutdownNow();
    //            }
    //      }

    tp.shutdownNow();
    try {
        tp.awaitTermination(1, TimeUnit.HOURS);
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    //      try {
    //         tp.awaitTermination(1, TimeUnit.DAYS);
    //      } catch (InterruptedException e) {
    //         e.printStackTrace();
    //      }

    // write queries to disk
    String queries = "";
    for (Path path : allPaths) {
        System.out.println(path);
        queries += path.asSPARQLQuery(Var.alloc("s")) + "\n";
    }
    File file = new File(benchmarkDirectory,
            "queries_" + nrOfSPARQLQueries + "_" + minDepth + "-" + maxDepth + "_" + minNrOfExamples + ".txt");
    try {
        Files.write(queries, file, Charsets.UTF_8);
    } catch (IOException e) {
        e.printStackTrace();
    }
}