Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:org.deri.iris.performance.IRISPerformanceTest.java

/**
 * Executes a set of datalog queries using the given configuration
 * @param queries The set of Datalog queries
 * @param config The configuration for the test suite
 * @return a list of IRISTestCase objects with the result of the test campaign
 *///from  w  w w  .  ja  v a 2  s .  com
public List<IRISTestCase> executeTests(final List<String> queries, final TestConfiguration config) {

    // Get the logger
    LOGGER = Logger.getLogger(IRISPerformanceTest.class.getName());

    // Construct a valid IRIS+- program using the queries and the configuration file
    String program = "";

    // add the query and its IRIS execution command to the program
    program += "/// Query ///\n";
    for (final String s : queries) {
        program += s + "\n";
        program += "?-" + s.substring(0, s.indexOf(":-")) + ".\n";
    }
    program += "\n";

    // If reasoning is enabled, add the TBOX to the program
    program += "/// TBox ///\n";
    if (config.getReasoning()) {
        String tboxPath = config.getTestHomePath() + "/" + config.getDataset() + "/tbox";
        if (config.getExpressiveness().compareTo("RDFS") == 0) {
            tboxPath += "/rdfs";
        }
        if (config.getExpressiveness().compareTo("OWL-QL") == 0) {
            tboxPath += "/owlql";
        }
        final String tbox = loadFile(tboxPath + "/" + config.getDataset() + ".dtg");
        program += tbox + "\n";
    } else {
        program += "/// EMPTY ///\n";
    }

    // Add the SBox
    program += "/// SBox ///\n";
    String sboxPath = config.getTestHomePath() + "/" + config.getDataset() + "/sbox";
    if (config.getExpressiveness().compareTo("RDFS") == 0) {
        sboxPath += "/rdfs";
    }
    if (config.getExpressiveness().compareTo("OWL-QL") == 0) {
        sboxPath += "/owlql";
    }
    final String sbox = loadFile(sboxPath + "/" + config.getDataset() + ".dtg");
    program += sbox + "\n\n";

    LOGGER.debug(program);

    // Get the parser
    final Parser parser = new Parser();

    // Parse the program
    try {
        parser.parse(program);
    } catch (final ParserException e) {
        e.printStackTrace();
    }

    // Get the TGDs from the set of rules
    final List<IRule> tgds = RewritingUtils.getTGDs(parser.getRules(), parser.getQueries());

    // Get the query bodies
    final List<IRule> bodies = new ArrayList<IRule>(parser.getRules());
    final List<IRule> datalogQueries = RewritingUtils.getQueries(bodies, parser.getQueries());

    // Get the constraints from the set of rules
    final Set<IRule> constraints = RewritingUtils.getConstraints(parser.getRules(), parser.getQueries());

    // Get the SBox rules from the set of rules
    final List<IRule> storageRules = RewritingUtils.getSBoxRules(parser.getRules(), parser.getQueries());

    // Check that the TBox is FO-reducible
    IRuleSafetyProcessor ruleProc = new LinearReducibleRuleSafetyProcessor();
    try {
        ruleProc.process(tgds);
    } catch (final RuleUnsafeException e) {
        e.printStackTrace();
    }

    // Check that the SBox rules are Safe Datalog
    ruleProc = new StandardRuleSafetyProcessor();
    try {
        ruleProc.process(storageRules);
    } catch (final RuleUnsafeException e) {
        e.printStackTrace();
    }

    // Connect to the storage
    StorageManager.getInstance();
    try {
        StorageManager.connect(config.getDBVendor(), config.getDBProtocol(), config.getDBHost(),
                config.getDBPort(), config.getDBName(), config.getSchemaName(), config.getDBUsername(),
                config.getDBPassword());
    } catch (final SQLException e) {
        e.printStackTrace();
    }

    // Evaluate the queries
    final List<IRISTestCase> output = new LinkedList<IRISTestCase>();
    for (final IQuery q : parser.getQueries()) {
        // Generate a new test-case
        final IRISTestCase currentTest = new IRISTestCase();
        int nTask = -10;

        // Get the Factories
        final IRelationFactory rf = new RelationFactory();

        // Get the Rewriter Engine
        final ParallelRewriter rewriter = new ParallelRewriter(DecompositionStrategy.DECOMPOSE,
                RewritingLanguage.UCQ, SubCheckStrategy.TAIL, NCCheck.TAIL);

        // Get and log the rule corresponding to the query
        final IRule ruleQuery = getRuleQuery(q, datalogQueries);
        currentTest.setQuery(ruleQuery);

        final Map<Pair<IPosition, IPosition>, Set<List<IRule>>> deps = DepGraphUtils
                .computePositionDependencyGraph(tgds);

        final Set<Expressivity> exprs = RewritingUtils.getExpressivity(tgds);

        // Compute and log the FO-Rewriting
        LOGGER.info("Computing TBox Rewriting");
        float duration = -System.nanoTime();
        final Set<IRule> rewriting = rewriter.getRewriting(ruleQuery, tgds, constraints, deps, exprs);
        duration = ((duration + System.nanoTime()) / 1000000);
        currentTest.getTasks()
                .add(new Task(nTask++, "TBox Rewriting", duration, 0, 0, "ms", rewriting.toString()));
        LOGGER.info("done.");
        int count = 0;
        for (final IRule r : rewriting) {
            LOGGER.debug("(Qr" + ++count + ")" + r);
        }

        // Produce the rewriting according to the Nyaya Data Model
        final IQueryRewriter ndmRewriter = new NDMRewriter(storageRules);

        // Create a buffer for the output
        final IRelation outRelation = rf.createRelation();

        // Get the SBox rewriting
        try {
            LOGGER.info("Computing SBox Rewriting");
            final Set<IRule> sboxRewriting = new LinkedHashSet<IRule>();
            duration = -System.nanoTime();
            for (final IRule pr : rewriting) {
                sboxRewriting.addAll(ndmRewriter.getRewriting(pr));
            }
            duration = ((duration + System.nanoTime()) / 1000000);
            currentTest.getTasks()
                    .add(new Task(nTask++, "SBox Rewriting", duration, 0, 0, "ms", sboxRewriting.toString()));
            LOGGER.info("done.");
            count = 0;
            for (final IRule n : sboxRewriting) {
                LOGGER.debug("(Qn" + ++count + ")" + n);
            }

            // Produce the SQL rewriting for each query in the program
            final SQLRewriter sqlRewriter = new SQLRewriter(sboxRewriting);

            // Get the SQL rewriting as Union of Conjunctive Queries (UCQ)
            LOGGER.info("Computing SQL Rewriting");
            duration = -System.nanoTime();
            final List<String> ucqSQLRewriting = new LinkedList<String>();
            ucqSQLRewriting.add(sqlRewriter.getUCQSQLRewriting("", 10000, 0));
            duration = ((duration + System.nanoTime()) / 1000000);
            currentTest.getTasks()
                    .add(new Task(nTask++, "SQL Rewriting", duration, 0, 0, "ms", ucqSQLRewriting.toString()));
            LOGGER.info("done.");
            count = 0;
            for (final String s : ucqSQLRewriting) {
                LOGGER.debug("(Qs" + ++count + ") " + s);
            }

            // Execute the UCQ
            LOGGER.info("Executing SQL");

            // float ansConstructOverall = 0;

            // The synchronized structure to store the output tuples
            final Set<ITuple> result = Collections.synchronizedSet(new HashSet<ITuple>());

            /*
             * Prepare a set of runnable objects representing each partial rewriting to be executed in parallel
             */
            final List<RunnableQuery> rql = new LinkedList<RunnableQuery>();
            for (final String cq : ucqSQLRewriting) {
                // Construct a Runnable Query
                rql.add(new RunnableQuery(cq, result, currentTest.getTasks()));
            }

            // Get an executor that allows a number of parallel threads equals to the number of available processors
            // ExecutorService queryExecutor =
            // Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()*5);
            final ExecutorService queryExecutor = Executors.newSingleThreadScheduledExecutor();

            // Execute all the partial rewritings in parallel
            float ucqExecOverall = -System.nanoTime();
            for (final RunnableQuery rq : rql) {
                queryExecutor.execute(rq);
            }
            queryExecutor.shutdown();
            if (queryExecutor.awaitTermination(1, TimeUnit.DAYS)) {
                LOGGER.info("done.");
            } else
                throw new InterruptedException("Timeout Occured");
            ucqExecOverall = ((ucqExecOverall + System.nanoTime()) / 1000000);
            StorageManager.disconnect();

            // inizio aggiunta
            float minTime = System.nanoTime();
            float maxTime = 0;
            float avgTime = 0;
            int n = 0;
            for (final Task t : currentTest.getTasks()) {
                if (t.getName().contains("Execution")) {
                    avgTime += (t.getFinalTime() - t.getInitTime()) / 1000000;
                    n++;
                    if (t.getFinalTime() > maxTime) {
                        maxTime = t.getFinalTime();
                    }
                    if (t.getInitTime() < minTime) {
                        minTime = t.getInitTime();
                    }
                }
            }
            ucqExecOverall = (maxTime - minTime) / 1000000;
            // fine aggiunta

            currentTest.getTasks()
                    .add(new Task(nTask++, "UCQ Overall Execution Time", ucqExecOverall, 0, 0, "ms"));

            // inizio aggiunta
            avgTime = (avgTime / n);
            System.out.println(n);
            currentTest.getTasks().add(new Task(nTask++, "UCQ Average Execution Time", avgTime, 0, 0, "ms"));
            Collections.sort(currentTest.getTasks());
            // fine aggiunta

            for (final ITuple t : result) {
                outRelation.add(t);
            }

        } catch (final SQLException e) {
            e.printStackTrace();
        } catch (final EvaluationException e) {
            e.printStackTrace();
        } catch (final InterruptedException e) {
            e.printStackTrace();
        }
        currentTest.setAnswer(outRelation);
        output.add(currentTest);
    }
    return (output);
}

From source file:com.blacklocus.jres.request.index.JresUpdateDocumentTest.java

@Test
public void testRetryOnConflict() throws InterruptedException {
    final String index = "JresUpdateDocumentTest.testRetryOnConflict".toLowerCase();
    final String type = "test";
    final String id = "warzone";

    final AtomicReference<String> error = new AtomicReference<String>();

    final int numThreads = 16, numIterations = 100;

    ExecutorService x = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        x.submit(new Runnable() {
            @Override//  www. ja  v  a  2s .  c  om
            public void run() {
                try {
                    for (int j = 0; j < numIterations; j++) {
                        JresUpdateDocument req = new JresUpdateDocument(index, type, id,
                                ImmutableMap.of("value", 0));
                        req.setRetryOnConflict(numIterations * 10);
                        jres.quest(req);
                    }
                } catch (Exception e) {
                    error.set(e.getMessage());
                }
            }
        });
    }
    x.shutdown();
    x.awaitTermination(1, TimeUnit.MINUTES);

    Assert.assertNull("With so many retries, all of these should have gotten through without conflict error",
            error.get());
    jres.quest(new JresRefresh(index));
    JresGetDocumentReply getReply = jres.quest(new JresGetDocument(index, type, id));
    Map<String, Integer> doc = getReply.getSourceAsType(new TypeReference<Map<String, Integer>>() {
    });
    Assert.assertEquals("Should have been numThreads * numIterations versions committed",
            (Object) (numThreads * numIterations), getReply.getVersion());
}

From source file:org.onesec.core.provider.ProviderRegistryTest.java

@Test(timeOut = 180000)
public void test()
        throws InterruptedException, IOException, ProviderRegistryException, JtapiPeerUnavailableException {

    List<StateListenerConfiguration> listenersConfig = new ArrayList<StateListenerConfiguration>();
    listenersConfig.add(new StateListenerConfigurationImpl(new StateLogger(), null));

    StateListenersCoordinator listenersCoordinator = new StateListenersCoordinatorImpl(listenersConfig);

    final ProviderRegistry registry = new ProviderRegistryImpl(listenersCoordinator,
            LoggerFactory.getLogger(ProviderRegistry.class));

    final ProviderConfigurator configurator = new FileProviderConfigurator(
            new File(System.getProperty("user.home") + "/.onesec"),
            new ProviderConfiguratorListenersImpl(Arrays.asList((ProviderConfiguratorListener) registry)),
            LoggerFactory.getLogger(ProviderConfigurator.class));

    ProviderConfiguratorState state = configurator.getState();
    StateWaitResult res = state.waitForState(new int[] { ProviderConfiguratorState.CONFIGURATION_UPDATED },
            1000L);//from  w  ww . j a  v a  2s .  co m

    assertFalse(res.isWaitInterrupted());

    ExecutorService executor = Executors.newFixedThreadPool(1);
    executor.execute(new Runnable() {
        public void run() {
            try {
                boolean allControllersInService = false;
                while (!allControllersInService) {
                    allControllersInService = true;
                    for (ProviderController controller : registry.getProviderControllers())
                        if (controller.getState().getId() != ProviderControllerState.IN_SERVICE) {
                            allControllersInService = false;
                            break;
                        }
                    TimeUnit.SECONDS.sleep(1);
                }
            } catch (InterruptedException e) {

            }
        }
    });

    executor.shutdown();
    executor.awaitTermination(175, TimeUnit.SECONDS);

    List<String> numbers = FileUtils
            .readLines(new File(System.getProperty("user.home") + "/" + "/.onesec/numbers.txt"));

    for (String number : numbers) {
        ProviderController controller = registry.getProviderController(number);
        assertNotNull(controller);
        int intNumber = Integer.valueOf(number);
        assertTrue(intNumber >= controller.getFromNumber());
        assertTrue(intNumber <= controller.getToNumber());
    }

    registry.shutdown();
}

From source file:org.voyanttools.trombone.input.index.LuceneIndexer.java

public String index(List<StoredDocumentSource> storedDocumentSources) throws IOException {

    // let's check if we need to create new sources because of tokenization parameters
    if (parameters.getParameterValue("tokenization", "").isEmpty() == false) {
        StoredDocumentSourceStorage sourceDocumentSourceStorage = storage.getStoredDocumentSourceStorage();
        String params = parameters.getParameterValue("tokenization");
        for (int i = 0, len = storedDocumentSources.size(); i < len; i++) {
            StoredDocumentSource storedDocumentSource = storedDocumentSources.get(i);
            String id = storedDocumentSource.getId();
            String newId = DigestUtils.md5Hex(id + params);
            InputStream inputStream = sourceDocumentSourceStorage.getStoredDocumentSourceInputStream(id);
            DocumentMetadata metadata = storedDocumentSource.getMetadata();
            metadata.setLastTokenPositionIndex(TokenType.lexical, 0); // this is crucial to ensure that document is re-analyzed and metadata re-rewritten
            InputSource inputSource = new InputStreamInputSource(newId, metadata, inputStream);
            storedDocumentSources.set(i, sourceDocumentSourceStorage.getStoredDocumentSource(inputSource));
            inputStream.close();/*from   w  w w  .  ja va 2 s . c o  m*/
        }
    }

    List<String> ids = new ArrayList<String>();
    for (StoredDocumentSource storedDocumentSource : storedDocumentSources) {
        ids.add(storedDocumentSource.getId());
    }
    String corpusId = storage.storeStrings(ids);

    // determine if we need to modify the Lucene index
    Collection<StoredDocumentSource> storedDocumentSourceForLucene = new ArrayList<StoredDocumentSource>();
    if (storage.getLuceneManager().directoryExists()) {
        LeafReader reader = SlowCompositeReaderWrapper.wrap(storage.getLuceneManager().getDirectoryReader());
        Terms terms = reader.terms("id");
        if (terms == null) {
            storedDocumentSourceForLucene.addAll(storedDocumentSources);
        } else {
            TermsEnum termsEnum = terms.iterator();
            for (StoredDocumentSource storedDocumentSource : storedDocumentSources) {
                String id = storedDocumentSource.getId();
                if (!termsEnum.seekExact(new BytesRef(id))) {
                    storedDocumentSourceForLucene.add(storedDocumentSource);
                }
            }
        }
    } else {
        storedDocumentSourceForLucene.addAll(storedDocumentSources);
    }

    if (storedDocumentSourceForLucene.isEmpty() == false) {

        // index documents (or at least add corpus to document if not already there), we need to get a new writer
        IndexWriter indexWriter = storage.getLuceneManager().getIndexWriter();
        DirectoryReader indexReader = DirectoryReader.open(indexWriter, true);
        IndexSearcher indexSearcher = new IndexSearcher(indexReader);
        boolean verbose = parameters.getParameterBooleanValue("verbose");
        int processors = Runtime.getRuntime().availableProcessors();
        ExecutorService executor;

        // index
        executor = Executors.newFixedThreadPool(processors);
        for (StoredDocumentSource storedDocumentSource : storedDocumentSourceForLucene) {
            Runnable worker = new StoredDocumentSourceIndexer(storage, indexWriter, indexSearcher,
                    storedDocumentSource, corpusId, verbose);
            executor.execute(worker);
        }
        executor.shutdown();
        try {
            if (!executor.awaitTermination(parameters.getParameterIntValue("luceneIndexingTimeout", 60 * 10),
                    TimeUnit.SECONDS)) { // default 10 minutes
                throw new InterruptedException("Lucene indexing has run out of time.");
            }
        } catch (InterruptedException e) {
            throw new RuntimeException("Lucene indexing has been interrupted.", e);
        } finally {

            try {
                indexWriter.commit();
            } catch (IOException e) {
                indexWriter.close(); // this may also throw an exception, but docs say to close on commit error
                throw e;
            }
        }

        // this should almost never be called
        if (parameters.containsKey("forceMerge")) {
            indexWriter.forceMerge(parameters.getParameterIntValue("forceMerge"));
        }

        indexReader = DirectoryReader.open(indexWriter, true);
        storage.getLuceneManager().setDirectoryReader(indexReader); // make sure it's available afterwards            

        // now determine which documents need to be analyzed
        Collection<StoredDocumentSource> storedDocumentSourceForAnalysis = new ArrayList<StoredDocumentSource>();
        for (StoredDocumentSource storedDocumentSource : storedDocumentSourceForLucene) {
            if (storedDocumentSource.getMetadata().getLastTokenPositionIndex(TokenType.lexical) == 0) { // don't re-analyze
                storedDocumentSourceForAnalysis.add(storedDocumentSource);
            }
        }

        if (storedDocumentSourceForAnalysis.isEmpty() == false) {
            indexSearcher = new IndexSearcher(indexReader);
            executor = Executors.newFixedThreadPool(processors);
            for (StoredDocumentSource storedDocumentSource : storedDocumentSourceForAnalysis) {
                if (storedDocumentSource.getMetadata().getLastTokenPositionIndex(TokenType.lexical) == 0) { // don't re-analyze
                    Runnable worker = new IndexedDocumentAnalyzer(storage, indexSearcher, storedDocumentSource,
                            corpusId, verbose);
                    executor.execute(worker);
                }
            }
            executor.shutdown();
            try {
                if (!executor.awaitTermination(
                        parameters.getParameterIntValue("luceneAnalysisTimeout", 60 * 10), TimeUnit.SECONDS)) { // default 10 minutes
                    throw new InterruptedException("Lucene analysis has run out of time.");
                }
            } catch (InterruptedException e) {
                throw new RuntimeException("Lucene document analysis run out of time", e);
            }
        }

    }

    return corpusId;

}

From source file:com.thoughtworks.go.server.security.LdapAuthenticationTest.java

@Test
public void shouldAuthenticateConcurrently() throws Exception {
    ldapServer.addUser(employeesOrgUnit, "foleys", "some-password", "Shilpa Foley", "foleys@somecompany.com");

    ExecutorService pool = Executors.newFixedThreadPool(100);
    List<Callable<String>> allCallables = new ArrayList<Callable<String>>();

    for (int i = 0; i < 100; i++) {
        final boolean even = i % 2 == 0;

        allCallables.add(new Callable<String>() {
            @Override//w ww.j ava  2 s  .  c  om
            public String call() throws Exception {
                if (even) {
                    assertAuthenticationOfValidAdminUser("foleys", "some-password");
                } else {
                    assertFailedAuthentication("invalid_user", "");
                }

                return "";
            }
        });
    }

    List<Future<String>> futures = pool.invokeAll(allCallables);
    pool.shutdown();

    boolean finishedWithoutTimeout = pool.awaitTermination(10, TimeUnit.SECONDS);
    assertThat(finishedWithoutTimeout, is(true));

    // Assert no exceptions, by getting result.
    for (Future<String> future : futures) {
        future.get();
    }
}

From source file:org.geowebcache.sqlite.SqlitlePerf.java

/**
 * Retrieve the created tiles using the mbtiles blobstore.
 *//*from   ww  w  . ja  v  a2 s.  c  o  m*/
private static void mbtilesStore(File rootDirectory, File seedFile, long[][] tiles) throws Exception {
    // creating a new database by copying the seeded one
    File databaseFile = new File(rootDirectory,
            Utils.buildPath("grid", "layer", "image_png", "mbtiles_perf_test.sqlite"));
    if (LOGGER.isInfoEnabled()) {
        LOGGER.info(String.format("Start mbtiles select from file '%s'.", databaseFile));
    }
    FileUtils.copyFile(seedFile, databaseFile);
    // submitting the select tasks
    ExecutorService executor = Executors.newFixedThreadPool(WORKERS);
    long startTime = System.currentTimeMillis();
    // mbtiles store configuration
    MbtilesConfiguration configuration = new MbtilesConfiguration();
    configuration.setRootDirectory(rootDirectory.getPath());
    configuration.setTemplatePath(Utils.buildPath("{grid}", "{layer}", "{format}", "mbtiles_perf_test.sqlite"));
    configuration.setUseCreateTime(false);
    // instantiate the mbtiles blobstore
    SqliteConnectionManager connectionManager = new SqliteConnectionManager(10, 2000);
    MbtilesBlobStore mbtilesBlobStore = new MbtilesBlobStore(configuration, connectionManager);
    for (int i = 0; i < tiles.length; i++) {
        long[] tile = tiles[i];
        executor.submit((Runnable) () -> {
            TileObject mbtile = TileObject.createQueryTileObject("layer", tile, "grid", "image/png", null);
            try {
                mbtilesBlobStore.get(mbtile);
            } catch (Exception exception) {
                throw Utils.exception(exception, "Error retrieving tile '%s'.", mbtile);
            }
        });
        if (i != 0 && i % 10000 == 0) {
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug(String.format("Submitted %d select tasks.", i));
            }
        }
    }
    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug(String.format("Submitted %d select tasks.", TILES));
    }
    // lets wait for the workers to finish
    executor.shutdown();
    executor.awaitTermination(5, TimeUnit.MINUTES);
    // computing some stats
    long endTime = System.currentTimeMillis();
    if (LOGGER.isInfoEnabled()) {
        LOGGER.info(String.format("Tiles mbtiles blobstore select time '%d'.", endTime - startTime));
    }
    if (LOGGER.isInfoEnabled()) {
        LOGGER.info(String.format("Tiles mbtiles blobstore selected per second '%f'.",
                TILES / (float) (endTime - startTime) * 1000));
    }
    // clean everything
    connectionManager.reapAllConnections();
    connectionManager.stopPoolReaper();
    FileUtils.deleteQuietly(databaseFile);
}

From source file:org.apache.bookkeeper.tools.perf.dlog.PerfReader.java

@Override
protected void execute(Namespace namespace) throws Exception {
    List<Pair<Integer, DistributedLogManager>> managers = new ArrayList<>(flags.numLogs);
    for (int i = 0; i < flags.numLogs; i++) {
        String logName = String.format(flags.logName, i);
        managers.add(Pair.of(i, namespace.openLog(logName)));
    }//from   w w w.  java 2s . com
    log.info("Successfully open {} logs", managers.size());

    // register shutdown hook to aggregate stats
    Runtime.getRuntime().addShutdownHook(new Thread(() -> {
        isDone.set(true);
        printAggregatedStats(cumulativeRecorder);
    }));

    ExecutorService executor = Executors.newFixedThreadPool(flags.numThreads);
    try {
        for (int i = 0; i < flags.numThreads; i++) {
            final int idx = i;
            final List<DistributedLogManager> logsThisThread = managers.stream()
                    .filter(pair -> pair.getLeft() % flags.numThreads == idx).map(pair -> pair.getRight())
                    .collect(Collectors.toList());
            executor.submit(() -> {
                try {
                    read(logsThisThread);
                } catch (Exception e) {
                    log.error("Encountered error at writing records", e);
                }
            });
        }
        log.info("Started {} write threads", flags.numThreads);
        reportStats();
    } finally {
        executor.shutdown();
        if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
            executor.shutdownNow();
        }
        managers.forEach(manager -> manager.getRight().asyncClose());
    }
}

From source file:org.apache.zeppelin.hopshive.HopsHiveInterpreter.java

private SqlCompleter createOrUpdateSqlCompleter(SqlCompleter sqlCompleter, final Connection connection,
        String propertyKey, final String buf, final int cursor) {
    String schemaFiltersKey = String.format("%s.%s", propertyKey, COMPLETER_SCHEMA_FILTERS_KEY);
    String sqlCompleterTtlKey = String.format("%s.%s", propertyKey, COMPLETER_TTL_KEY);
    final String schemaFiltersString = getProperty(schemaFiltersKey);
    int ttlInSeconds = Integer
            .valueOf(StringUtils.defaultIfEmpty(getProperty(sqlCompleterTtlKey), DEFAULT_COMPLETER_TTL));
    final SqlCompleter completer;
    if (sqlCompleter == null) {
        completer = new SqlCompleter(ttlInSeconds);
    } else {/*from w  ww  .  ja va  2s  .  c  o m*/
        completer = sqlCompleter;
    }
    ExecutorService executorService = Executors.newFixedThreadPool(1);
    executorService.execute(new Runnable() {
        @Override
        public void run() {
            completer.createOrUpdateFromConnection(connection, schemaFiltersString, buf, cursor);
        }
    });

    executorService.shutdown();

    try {
        // protection to release connection
        executorService.awaitTermination(3, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
    }
    return completer;
}

From source file:com.linkedin.pinot.tools.segment.converter.DictionaryToRawIndexConverter.java

/**
 * Method to perform the conversion for a set of segments in the {@link #_dataDir}
 *
 * @return True if successful, False otherwise
 * @throws Exception/*from ww  w .j  a v  a2 s  . com*/
 */
public boolean convert() throws Exception {
    if (_help) {
        printUsage();
        return true;
    }

    File dataDir = new File(_dataDir);
    File outputDir = new File(_outputDir);

    if (!dataDir.exists()) {
        LOGGER.error("Data directory '{}' does not exist.", _dataDir);
        return false;
    } else if (outputDir.exists()) {
        if (_overwrite) {
            LOGGER.info("Overwriting existing output directory '{}'", _outputDir);
            FileUtils.deleteQuietly(outputDir);
            outputDir = new File(_outputDir);
            outputDir.mkdir();
        } else {
            LOGGER.error("Output directory '{}' already exists, use -overwrite to overwrite", outputDir);
            return false;
        }
    }

    File[] segmentFiles = dataDir.listFiles();
    if (segmentFiles == null || segmentFiles.length == 0) {
        LOGGER.error("Empty data directory '{}'.", _dataDir);
        return false;
    }

    boolean ret = true;
    final File outDir = outputDir;
    ExecutorService executorService = Executors.newFixedThreadPool(_numThreads);
    for (final File segmentDir : segmentFiles) {
        executorService.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    convertSegment(segmentDir, _columns.split("\\s*,\\s*"), outDir, _compressOutput);
                } catch (Exception e) {
                    LOGGER.error("Exception caught while converting segment {}", segmentDir.getName(), e);
                    e.printStackTrace();
                }
            }
        });
    }

    executorService.shutdown();
    executorService.awaitTermination(1, TimeUnit.HOURS);
    return ret;
}

From source file:com.b2international.index.GroovyMemoryLeakTest.java

@Ignore
@Test//from  w  ww.ja  va2 s  .c  o m
public void tryToGenerateMemoryLeak() throws Exception {
    final List<String> orderedItems = newArrayList();
    final Map<String, Data> documents = newHashMap();

    for (int i = 0; i < NUM_DOCS; i++) {
        String item = null;
        while (item == null || orderedItems.contains(item)) {
            item = RandomStringUtils.randomAlphabetic(10);
        }
        orderedItems.add(item);

        final Data data = new Data();
        data.setField1(item);
        data.setFloatField(100.0f - i);
        documents.put(Integer.toString(i), data);
    }

    indexDocuments(documents);

    ExecutorService executor = Executors.newFixedThreadPool(2);

    final Runnable theQuery = () -> {
        for (int i = 0; i < 10_000; i++) {
            final Query<Data> query = Query.select(Data.class)
                    .where(Expressions.scriptScore(Expressions.matchAll(), "floatField")).limit(NUM_DOCS)
                    .sortBy(SortBy.SCORE).build();
            search(query);
        }
    };

    // run 4 threads to simulate a bit higher load on the index
    executor.submit(theQuery, null);
    executor.submit(theQuery, null);
    executor.submit(theQuery, null);
    executor.submit(theQuery, null);

    executor.shutdown();
    // this won't pass at all, even if the fix is applied
    // the purpose of this test to detect and verify the GC via external monitoring thus it cannot be automated properly
    assertTrue(executor.awaitTermination(5, TimeUnit.MINUTES));
}