Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:com.spotify.docker.client.DefaultDockerClientTest.java

@Test
public void interruptTest() throws Exception {

    // Pull image
    sut.pull(BUSYBOX_LATEST);// www.j a v a  2s .com

    // Create container
    final ContainerConfig config = ContainerConfig.builder().image(BUSYBOX_LATEST)
            .cmd("sh", "-c", "while :; do sleep 1; done").build();
    final String name = randomName();
    final ContainerCreation creation = sut.createContainer(config, name);
    final String id = creation.id();

    // Start container
    sut.startContainer(id);

    // Wait for container on a thread
    final ExecutorService executorService = Executors.newSingleThreadExecutor();
    final SettableFuture<Boolean> started = SettableFuture.create();
    final SettableFuture<Boolean> interrupted = SettableFuture.create();

    final Future<ContainerExit> exitFuture = executorService.submit(new Callable<ContainerExit>() {
        @Override
        public ContainerExit call() throws Exception {
            try {
                started.set(true);
                return sut.waitContainer(id);
            } catch (InterruptedException e) {
                interrupted.set(true);
                throw e;
            }
        }
    });

    // Interrupt waiting thread
    started.get();
    executorService.shutdownNow();
    try {
        exitFuture.get();
        fail();
    } catch (ExecutionException e) {
        assertThat(e.getCause(), instanceOf(InterruptedException.class));
    }

    // Verify that the thread was interrupted
    assertThat(interrupted.get(), is(true));
}

From source file:org.opencb.opencga.app.cli.analysis.VariantCommandExecutor.java

private void query() throws Exception {

    AnalysisCliOptionsParser.QueryVariantCommandOptions cliOptions = variantCommandOptions.queryVariantCommandOptions;

    Map<Long, String> studyIds = getStudyIds(sessionId);
    Query query = VariantQueryCommandUtils.parseQuery(cliOptions, studyIds);
    QueryOptions queryOptions = VariantQueryCommandUtils.parseQueryOptions(cliOptions);

    VariantFetcher variantFetcher = new VariantFetcher(catalogManager, storageManagerFactory);

    if (cliOptions.count) {
        QueryResult<Long> result = variantFetcher.count(query, sessionId);
        System.out.println("Num. results\t" + result.getResult().get(0));
    } else if (StringUtils.isNotEmpty(cliOptions.groupBy)) {
        ObjectMapper objectMapper = new ObjectMapper();
        QueryResult groupBy = variantFetcher.groupBy(query, queryOptions, cliOptions.groupBy, sessionId);
        System.out/*from  w w  w .  j av a2 s.  co m*/
                .println("rank = " + objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(groupBy));
    } else if (StringUtils.isNotEmpty(cliOptions.rank)) {
        ObjectMapper objectMapper = new ObjectMapper();
        QueryResult rank = variantFetcher.rank(query, queryOptions, cliOptions.rank, sessionId);
        System.out.println("rank = " + objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(rank));
    } else {
        final String outputFormat;
        if (StringUtils.isNotEmpty(cliOptions.outputFormat)) {
            outputFormat = cliOptions.outputFormat.toLowerCase();
        } else {
            outputFormat = "vcf";
        }

        try (OutputStream outputStream = VariantQueryCommandUtils.getOutputStream(cliOptions);
                VariantDBIterator iterator = variantFetcher.iterator(query, queryOptions, sessionId)) {

            StudyConfiguration studyConfiguration;
            final DataWriter<Variant> exporter;
            switch (VariantQueryCommandUtils.VariantOutputFormat.safeValueOf(outputFormat)) {
            case VCF:
                //                StudyConfigurationManager studyConfigurationManager = variantDBAdaptor.getStudyConfigurationManager();
                //                Map<Long, List<Sample>> samplesMetadata = variantFetcher.getSamplesMetadata(studyId, query, queryOptions, sessionId);
                //                QueryResult<StudyConfiguration> studyConfigurationResult = studyConfigurationManager.getStudyConfiguration(
                //                        query.getAsStringList(RETURNED_STUDIES.key()).get(0), null);
                studyConfiguration = variantFetcher.getStudyConfiguration(
                        query.getAsIntegerList(RETURNED_STUDIES.key()).get(0), null, sessionId);
                if (studyConfiguration != null) {
                    // Samples to be returned
                    if (query.containsKey(RETURNED_SAMPLES.key())) {
                        queryOptions.put(RETURNED_SAMPLES.key(), query.get(RETURNED_SAMPLES.key()));
                    }

                    //                        options.add("includeAnnotations", queryVariantsCommandOptions.includeAnnotations);
                    if (cliOptions.annotations != null) {
                        queryOptions.add("annotations", cliOptions.annotations);
                    }
                    //                            VariantVcfExporter.htsExport(iterator, studyConfiguration, outputStream, queryOptions);
                    long studyId = variantFetcher.getMainStudyId(query);
                    VariantSourceDBAdaptor sourceDBAdaptor = variantFetcher.getSourceDBAdaptor((int) studyId,
                            sessionId);
                    exporter = new VariantVcfExporter(studyConfiguration, sourceDBAdaptor, outputStream,
                            queryOptions);
                } else {
                    throw new IllegalArgumentException(
                            "No study found named " + query.getAsStringList(RETURNED_STUDIES.key()).get(0));
                }
                break;
            case JSON:
                // we know that it is JSON, otherwise we have not reached this point
                exporter = batch -> {
                    batch.forEach(variant -> {
                        try {
                            outputStream.write(variant.toJson().getBytes());
                            outputStream.write('\n');
                        } catch (IOException e) {
                            throw new UncheckedIOException(e);
                        }
                    });
                    return true;
                };

                break;
            case AVRO:
                String codecName = "";
                if (VariantQueryCommandUtils.VariantOutputFormat.isGzip(outputFormat)) {
                    codecName = "gzip";
                }
                if (outputFormat.endsWith("snappy")) {
                    codecName = "snappy";
                }
                exporter = new VariantAvroWriter(VariantAvro.getClassSchema(), codecName, outputStream);

                break;
            case STATS:
                studyConfiguration = variantFetcher.getStudyConfiguration(
                        query.getAsIntegerList(RETURNED_STUDIES.key()).get(0), null, sessionId);
                List<String> cohorts = new ArrayList<>(studyConfiguration.getCohortIds().keySet());
                cohorts.sort(String::compareTo);

                exporter = new VariantStatsTsvExporter(outputStream, studyConfiguration.getStudyName(),
                        cohorts);

                break;
            case CELLBASE:
                exporter = new VariantStatsPopulationFrequencyExporter(outputStream);
                break;
            default:
                throw new ParameterException("Unknown output format " + outputFormat);
            }

            ParallelTaskRunner.Task<Variant, Variant> progressTask;
            ExecutorService executor;
            if (VariantQueryCommandUtils.isStandardOutput(cliOptions)) {
                progressTask = batch -> batch;
                executor = null;
            } else {
                executor = Executors.newSingleThreadExecutor();
                Future<Long> future = executor.submit(() -> {
                    Long count = variantFetcher.count(query, sessionId).first();
                    count = Math.min(queryOptions.getLong(QueryOptions.LIMIT, Long.MAX_VALUE),
                            count - queryOptions.getLong(QueryOptions.SKIP, 0));
                    return count;
                });
                executor.shutdown();
                ProgressLogger progressLogger = new ProgressLogger("Export variants", future, 200);
                progressTask = batch -> {
                    progressLogger.increment(batch.size());
                    return batch;
                };
            }
            ParallelTaskRunner.Config config = ParallelTaskRunner.Config.builder().setNumTasks(1)
                    .setBatchSize(10).setAbortOnFail(true).build();
            ParallelTaskRunner<Variant, Variant> ptr = new ParallelTaskRunner<>(batchSize -> {
                List<Variant> variants = new ArrayList<>(batchSize);
                while (iterator.hasNext() && variants.size() < batchSize) {
                    variants.add(iterator.next());
                }
                return variants;
            }, progressTask, exporter, config);

            ptr.run();
            if (executor != null) {
                executor.shutdownNow();
            }
            logger.info(
                    "Time fetching data: " + iterator.getTimeFetching(TimeUnit.MILLISECONDS) / 1000.0 + "s");
            logger.info("Time converting data: " + iterator.getTimeConverting(TimeUnit.MILLISECONDS) / 1000.0
                    + "s");

        }
    }
}

From source file:org.apache.nifi.controller.scheduling.ProcessorLifecycleIT.java

/**
 * Concurrency test that is basically hammers on both stop and start
 * operation validating their idempotency.
 *//*from   w  ww  .ja  v  a 2 s .c o m*/
@Test
@Ignore
public void validateLifecycleOperationOrderWithConcurrentCallsToStartStop() throws Exception {
    final FlowManagerAndSystemBundle fcsb = this.buildFlowControllerForTest();
    flowManager = fcsb.getFlowManager();

    ProcessGroup testGroup = flowManager.createProcessGroup(UUID.randomUUID().toString());
    final ProcessorNode testProcNode = flowManager.createProcessor(TestProcessor.class.getName(),
            UUID.randomUUID().toString(), fcsb.getSystemBundle().getBundleDetails().getCoordinate());
    testProcNode.setProperties(properties);
    TestProcessor testProcessor = (TestProcessor) testProcNode.getProcessor();

    // sets the scenario for the processor to run
    this.noop(testProcessor);

    ExecutorService executor = Executors.newFixedThreadPool(100);
    int startCallsCount = 10000;
    final CountDownLatch countDownCounter = new CountDownLatch(startCallsCount);
    assertCondition(() -> ScheduledState.STOPPED == testProcNode.getScheduledState());
    final Random random = new Random();
    for (int i = 0; i < startCallsCount / 2; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                LockSupport.parkNanos(random.nextInt(9000000));
                processScheduler.stopProcessor(testProcNode);
                countDownCounter.countDown();
            }
        });
    }
    for (int i = 0; i < startCallsCount / 2; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                LockSupport.parkNanos(random.nextInt(9000000));
                processScheduler.startProcessor(testProcNode, true);
                countDownCounter.countDown();
            }
        });
    }
    assertTrue(countDownCounter.await(1000000, TimeUnit.MILLISECONDS));
    String previousOperation = null;
    for (String operationName : testProcessor.operationNames) {
        if (previousOperation == null || previousOperation.equals("@OnStopped")) {
            assertEquals("@OnScheduled", operationName);
        } else if (previousOperation.equals("@OnScheduled")) {
            assertEquals("@OnUnscheduled", operationName);
        } else if (previousOperation.equals("@OnUnscheduled")) {
            assertTrue(operationName.equals("@OnStopped") || operationName.equals("@OnScheduled"));
        }
        previousOperation = operationName;
    }
    executor.shutdownNow();
}

From source file:org.apache.nifi.controller.scheduling.TestProcessorLifecycle.java

/**
 * Concurrency test that is basically hammers on both stop and start
 * operation validating their idempotency.
 *///from   w w w . ja v  a2s  . co  m
@Test
@Ignore
public void validateLifecycleOperationOrderWithConcurrentCallsToStartStop() throws Exception {
    fc = this.buildFlowControllerForTest();
    ProcessGroup testGroup = fc.createProcessGroup(UUID.randomUUID().toString());
    this.setControllerRootGroup(fc, testGroup);
    final ProcessorNode testProcNode = fc.createProcessor(TestProcessor.class.getName(),
            UUID.randomUUID().toString());
    testProcNode.setProperties(properties);
    TestProcessor testProcessor = (TestProcessor) testProcNode.getProcessor();

    // sets the scenario for the processor to run
    this.noop(testProcessor);

    final ProcessScheduler ps = fc.getProcessScheduler();
    ExecutorService executor = Executors.newFixedThreadPool(100);
    int startCallsCount = 10000;
    final CountDownLatch countDownCounter = new CountDownLatch(startCallsCount);
    assertTrue(testProcNode.getScheduledState() == ScheduledState.STOPPED);
    final Random random = new Random();
    for (int i = 0; i < startCallsCount / 2; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                LockSupport.parkNanos(random.nextInt(9000000));
                ps.stopProcessor(testProcNode);
                countDownCounter.countDown();
            }
        });
    }
    for (int i = 0; i < startCallsCount / 2; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                LockSupport.parkNanos(random.nextInt(9000000));
                ps.startProcessor(testProcNode);
                countDownCounter.countDown();
            }
        });
    }
    assertTrue(countDownCounter.await(1000000, TimeUnit.MILLISECONDS));
    String previousOperation = null;
    for (String operationName : testProcessor.operationNames) {
        if (previousOperation == null || previousOperation.equals("@OnStopped")) {
            assertEquals("@OnScheduled", operationName);
        } else if (previousOperation.equals("@OnScheduled")) {
            assertEquals("@OnUnscheduled", operationName);
        } else if (previousOperation.equals("@OnUnscheduled")) {
            assertTrue(operationName.equals("@OnStopped") || operationName.equals("@OnScheduled"));
        }
        previousOperation = operationName;
    }
    executor.shutdownNow();
}

From source file:de.hybris.platform.servicelayer.tx.ItemModelTransactionTest.java

private <V> V runInOtherThread(final Callable<V> callable, final int timeoutSeconds) {
    final ExecutorService pool = Executors.newFixedThreadPool(1, new ThreadFactory() {
        final Tenant tenant = Registry.getCurrentTenantNoFallback();

        @Override/*  w  ww .jav  a  2  s  .c om*/
        public Thread newThread(final Runnable r) {
            return new Thread(r) {
                @Override
                public void run() {
                    try {
                        Registry.setCurrentTenant(tenant);
                        super.run();
                    } finally {
                        JaloSession.deactivate();
                        Registry.unsetCurrentTenant();
                    }
                }
            };
        }
    });
    try {
        final Future<V> future = pool.submit(callable);
        return future.get(timeoutSeconds, TimeUnit.SECONDS);
    } catch (final InterruptedException e) {
        Thread.currentThread().interrupt();
        fail("interrupted while waiting");
    } catch (final ExecutionException e) {
        fail("unexpected execution exception " + e.getCause());
    } catch (final TimeoutException e) {
        fail("callable " + callable + " did not finish within maximum " + timeoutSeconds + " seconds to wait");
    } finally {
        pool.shutdownNow();
    }
    return null;
}

From source file:cloudlens.notebook.JSInterpreter.java

public InterpreterResult interpret(Callable<BlockObject> task, CL cl) {
    if (cl.out instanceof ByteArrayOutputStream) {
        ((ByteArrayOutputStream) cl.out).reset();
    }/*from ww w . j a v a 2  s  .  c o m*/
    if (cl.err instanceof ByteArrayOutputStream) {
        ((ByteArrayOutputStream) cl.err).reset();
    }
    final ExecutorService executor = Executors.newCachedThreadPool(new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            return new Thread(r) {
                @Override
                public void interrupt() {
                    stop();
                }
            };
        }
    });
    cl.future = executor.submit(task);
    final Gson gson = new GsonBuilder().create();
    try {
        final BlockObject obj = cl.future.get();
        cl.outWriter.flush();
        cl.errWriter.flush();
        if (obj instanceof InterpreterResult) {
            return (InterpreterResult) obj;
        }
        if (cl.out instanceof ByteArrayOutputStream && ((ByteArrayOutputStream) cl.out).size() == 0) {
            if (null != obj && obj.isMapArray()) {
                final Map<String, Map<String, Object>> entries = obj.asMapArray();
                cl.outWriter.print("%table\n");
                int i = 0;
                for (final Map<?, ?> entry : entries.values()) {
                    cl.outWriter.print("\n");
                    if (++i > maxResult) {
                        cl.outWriter.println(
                                "%html <font color=red>Results are limited by zeppelin.cloudlens.maxResult = "
                                        + maxResult + ".</font>");
                        break;
                    }
                    for (final Map.Entry<?, ?> field : entry.entrySet()) {
                        cl.outWriter.print("%html <font color=blue>"
                                + StringEscapeUtils.escapeHtml4(field.getKey().toString()) + "</font>:"
                                + StringEscapeUtils.escapeHtml4(gson.toJson(field.getValue()).toString())
                                + "\t");
                    }
                }
            } else {
                cl.engine.bind("__Result__", obj);
                cl.engine.eval(
                        "print(JSON.stringify(__Result__, function(key, val) { if (typeof val === 'function') return val + ''; return val; }, 2))");
            }
        }
        // }
    } catch (final InterruptedException |

            ExecutionException e) {
        return new InterpreterResult(Code.ERROR, InterpreterUtils.getMostRelevantMessage(e));
    } finally {
        cl.outWriter.flush();
        cl.errWriter.flush();
        executor.shutdownNow();
    }
    return new InterpreterResult(Code.SUCCESS, cl.out.toString());
}

From source file:de.ingrid.mdek.quartz.jobs.URLValidatorJob.java

@Override
protected void executeInternal(JobExecutionContext jobExecutionContext) throws JobExecutionException {

    ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS);
    JobDataMap mergedJobDataMap = jobExecutionContext.getMergedJobDataMap();
    Map<String, URLState> urlMap = (Map<String, URLState>) mergedJobDataMap.get(URL_MAP);
    List<URLValidator> validatorTasks = new ArrayList<URLValidator>(urlMap.size());

    Map<String, URLState> capabilitiesMap = (Map<String, URLState>) mergedJobDataMap.get(CAP_URL_MAP);
    List<CapabilitiesValidator> capabilitiesValidatorTasks = new ArrayList<CapabilitiesValidator>(
            capabilitiesMap.size());//w  ww.  ja v  a2s.  co  m

    HttpClientParams httpClientParams = new HttpClientParams();
    httpClientParams.setConnectionManagerTimeout(0);
    httpClientParams.setSoTimeout(SOCKET_TIMEOUT);
    HttpConnectionParams httpParams = new HttpConnectionParams();
    httpParams.setConnectionTimeout(CONNECTION_TIMEOUT);
    httpClientParams.setDefaults(httpParams);
    HttpClient httpClient = new HttpClient(httpClientParams, new MultiThreadedHttpConnectionManager());
    if (System.getProperty("http.proxyHost") != null && System.getProperty("http.proxyPort") != null) {
        httpClient.getHostConfiguration().setProxy(System.getProperty("http.proxyHost"),
                Integer.parseInt(System.getProperty("http.proxyPort")));
    }
    for (URLState urlState : urlMap.values()) {
        validatorTasks.add(new URLValidator(httpClient, urlState));
    }
    for (URLState urlState : capabilitiesMap.values()) {
        capabilitiesValidatorTasks.add(new CapabilitiesValidator(httpClient, urlState));
    }

    log.debug("Starting url validation...");
    long startTime = System.currentTimeMillis();
    List<Future<URLState>> resultFutureList = new ArrayList<Future<URLState>>();
    for (URLValidator validator : validatorTasks) {
        resultFutureList.add(executorService.submit(validator));
    }
    for (CapabilitiesValidator validator : capabilitiesValidatorTasks) {
        resultFutureList.add(executorService.submit(validator));
    }

    for (Future<URLState> future : resultFutureList) {
        try {
            if (!cancelJob) {
                future.get();

            } else {
                log.debug("forcing shutdown of executor service...");
                executorService.shutdownNow();
                break;
            }

        } catch (Exception ex) {
            log.debug("Exception while fetching result from future.", ex);
        }
    }
    long endTime = System.currentTimeMillis();
    log.debug("URL Validation took " + (endTime - startTime) + " ms.");

    executorService.shutdown();

    // Only store if job was not cancelled
    if (!cancelJob) {
        Map<String, List<URLObjectReference>> map = new HashMap<String, List<URLObjectReference>>();
        map.put(MdekKeys.URL_RESULT, (List<URLObjectReference>) mergedJobDataMap.get(URL_OBJECT_REFERENCES));
        map.put(MdekKeys.CAP_RESULT, (List<URLObjectReference>) mergedJobDataMap.get(CAPABILITIES_REFERENCES));
        jobExecutionContext.setResult(map);
    }
}

From source file:org.openiam.idm.srvc.synch.srcadapter.RDBMSAdapter.java

@Override
public SyncResponse startSynch(final SynchConfig config, SynchReviewEntity sourceReview,
        final SynchReviewEntity resultReview) {

    log.debug("RDBMS SYNCH STARTED ^^^^^^^^");

    SyncResponse res = new SyncResponse(ResponseStatus.SUCCESS);
    SynchReview review = null;/* w ww.  j  a  v  a 2s.c  o m*/
    if (sourceReview != null) {
        review = synchReviewDozerConverter.convertToDTO(sourceReview, false);
    }
    LineObject rowHeaderForReport = null;
    InputStream input = null;

    try {
        final ValidationScript validationScript = org.mule.util.StringUtils.isNotEmpty(
                config.getValidationRule()) ? SynchScriptFactory.createValidationScript(config, review) : null;
        final List<TransformScript> transformScripts = SynchScriptFactory.createTransformationScript(config,
                review);
        final MatchObjectRule matchRule = matchRuleFactory.create(config.getCustomMatchRule()); // check if matchRule exists

        if (validationScript == null || transformScripts == null || matchRule == null) {
            res = new SyncResponse(ResponseStatus.FAILURE);
            res.setErrorText("The problem in initialization of RDBMSAdapter, please check validationScript= "
                    + validationScript + ", transformScripts=" + transformScripts + ", matchRule=" + matchRule
                    + " all must be set!");
            res.setErrorCode(ResponseCode.INVALID_ARGUMENTS);
            return res;
        }

        if (sourceReview != null && !sourceReview.isSourceRejected()) {
            return startSynchReview(config, sourceReview, resultReview, validationScript, transformScripts,
                    matchRule);
        }

        if (!connect(config)) {
            SyncResponse resp = new SyncResponse(ResponseStatus.FAILURE);
            resp.setErrorCode(ResponseCode.FAIL_SQL_ERROR);
            return resp;
        }

        java.util.Date lastExec = null;

        if (config.getLastExecTime() != null) {
            lastExec = config.getLastExecTime();
        }
        final String changeLog = config.getQueryTimeField();
        StringBuilder sql = new StringBuilder(config.getQuery());
        // if its incremental synch, then add the change log parameter
        if (config.getSynchType().equalsIgnoreCase("INCREMENTAL")) {
            // execute the query
            if (StringUtils.isNotEmpty(sql.toString()) && (lastExec != null)) {

                String temp = sql.toString().toUpperCase();
                // strip off any trailing semi-colons. Not needed for jbdc
                temp = StringUtils.removeEnd(temp, ";");

                if (temp.contains("WHERE")) {
                    sql.append(" AND ");
                } else {
                    sql.append(" WHERE ");
                }
                sql.append(changeLog).append(" >= ?");
            }
        }

        log.debug("-SYNCH SQL=" + sql.toString());
        log.debug("-last processed record =" + lastExec);

        PreparedStatement ps = con.prepareStatement(sql.toString());
        if (config.getSynchType().equalsIgnoreCase("INCREMENTAL") && (lastExec != null)) {
            ps.setTimestamp(1, new Timestamp(lastExec.getTime()));
        }
        ResultSet rs = ps.executeQuery();

        // get the list of columns
        ResultSetMetaData rsMetadata = rs.getMetaData();
        DatabaseUtil.populateTemplate(rsMetadata, rowHeader);

        //Read Resultset to List
        List<LineObject> results = new LinkedList<LineObject>();
        while (rs.next()) {
            LineObject rowObj = rowHeader.copy();
            DatabaseUtil.populateRowObject(rowObj, rs, changeLog);
            results.add(rowObj);
        }

        // test
        log.debug("Result set contains following number of columns : " + rowHeader.getColumnMap().size());

        // Multithreading
        int allRowsCount = results.size();
        if (allRowsCount > 0) {
            int threadCoount = THREAD_COUNT;
            int rowsInOneExecutors = allRowsCount / threadCoount;
            int remains = rowsInOneExecutors > 0 ? allRowsCount % (rowsInOneExecutors * threadCoount) : 0;
            if (remains != 0) {
                threadCoount++;
            }
            log.debug("Thread count = " + threadCoount + "; Rows in one thread = " + rowsInOneExecutors
                    + "; Remains rows = " + remains);
            System.out.println("Thread count = " + threadCoount + "; Rows in one thread = " + rowsInOneExecutors
                    + "; Remains rows = " + remains);
            List<Future> threadResults = new LinkedList<Future>();
            // store the latest processed record by thread indx
            final Map<String, Timestamp> recentRecordByThreadInx = new HashMap<String, Timestamp>();
            final ExecutorService service = Executors.newCachedThreadPool();
            for (int i = 0; i < threadCoount; i++) {
                final int threadIndx = i;
                final int startIndex = i * rowsInOneExecutors;
                // Start index for current thread
                int shiftIndex = threadCoount > THREAD_COUNT && i == threadCoount - 1 ? remains
                        : rowsInOneExecutors;
                // Part of the rowas that should be processing with this thread
                final List<LineObject> part = results.subList(startIndex, startIndex + shiftIndex);
                threadResults.add(service.submit(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            Timestamp mostRecentRecord = proccess(config, resultReview, provService, part,
                                    validationScript, transformScripts, matchRule, resultReview, startIndex);
                            recentRecordByThreadInx.put("Thread_" + threadIndx, mostRecentRecord);
                        } catch (ClassNotFoundException e) {
                            log.error(e);
                            /*
                            synchStartLog.updateSynchAttributes("FAIL", ResponseCode.CLASS_NOT_FOUND.toString(), e.toString());
                            auditHelper.logEvent(synchStartLog);
                            */
                        }
                    }
                }));
                //Give THREAD_DELAY_BEFORE_START seconds time for thread to be UP (load all cache and begin the work)
                Thread.sleep(THREAD_DELAY_BEFORE_START);
            }
            Runtime.getRuntime().addShutdownHook(new Thread() {
                public void run() {
                    service.shutdown();
                    try {
                        if (!service.awaitTermination(SHUTDOWN_TIME, TimeUnit.MILLISECONDS)) { //optional *
                            log.warn("Executor did not terminate in the specified time."); //optional *
                            List<Runnable> droppedTasks = service.shutdownNow(); //optional **
                            log.warn("Executor was abruptly shut down. " + droppedTasks.size()
                                    + " tasks will not be executed."); //optional **
                        }
                    } catch (InterruptedException e) {
                        log.error(e);
                        /*
                        synchStartLog.updateSynchAttributes("FAIL", ResponseCode.INTERRUPTED_EXCEPTION.toString(), e.toString());
                        auditHelper.logEvent(synchStartLog);
                        */
                        SyncResponse resp = new SyncResponse(ResponseStatus.FAILURE);
                        resp.setErrorCode(ResponseCode.INTERRUPTED_EXCEPTION);
                    }
                }
            });
            waitUntilWorkDone(threadResults);

        }
    } catch (ClassNotFoundException cnfe) {
        log.error(cnfe);
        res = new SyncResponse(ResponseStatus.FAILURE);
        res.setErrorCode(ResponseCode.CLASS_NOT_FOUND);
        return res;
    } catch (FileNotFoundException fe) {
        fe.printStackTrace();
        log.error(fe);
        //            auditBuilder.addAttribute(AuditAttributeName.DESCRIPTION, "FileNotFoundException: "+fe.getMessage());
        //            auditLogProvider.persist(auditBuilder);
        SyncResponse resp = new SyncResponse(ResponseStatus.FAILURE);
        resp.setErrorCode(ResponseCode.FILE_EXCEPTION);
        log.debug("RDBMS SYNCHRONIZATION COMPLETE WITH ERRORS ^^^^^^^^");
        return resp;
    } catch (IOException io) {
        io.printStackTrace();
        /*
        synchStartLog.updateSynchAttributes("FAIL", ResponseCode.IO_EXCEPTION.toString(), io.toString());
        auditHelper.logEvent(synchStartLog);
        */
        SyncResponse resp = new SyncResponse(ResponseStatus.FAILURE);
        resp.setErrorCode(ResponseCode.IO_EXCEPTION);
        log.debug("RDBMS SYNCHRONIZATION COMPLETE WITH ERRORS ^^^^^^^^");
        return resp;

    } catch (SQLException se) {

        log.error(se);
        closeConnection();
        /*
        synchStartLog.updateSynchAttributes("FAIL", ResponseCode.SQL_EXCEPTION.toString(), se.toString());
        auditHelper.logEvent(synchStartLog);
        */
        SyncResponse resp = new SyncResponse(ResponseStatus.FAILURE);
        resp.setErrorCode(ResponseCode.SQL_EXCEPTION);
        resp.setErrorText(se.toString());
        return resp;

    } catch (InterruptedException e) {
        log.error(e);
        SyncResponse resp = new SyncResponse(ResponseStatus.FAILURE);
        resp.setErrorCode(ResponseCode.INTERRUPTED_EXCEPTION);

    } finally {
        if (resultReview != null) {
            if (CollectionUtils.isNotEmpty(resultReview.getReviewRecords())) { // add header row
                resultReview.addRecord(generateSynchReviewRecord(rowHeader, true));
            }
        }

        closeConnection();
    }

    log.debug("RDBMS SYNCH COMPLETE.^^^^^^^^");
    return new SyncResponse(ResponseStatus.SUCCESS);

}

From source file:org.extensiblecatalog.ncip.v2.voyager.VoyagerLookupItemSetService.java

/**
 * Handles a NCIP LookupItem service by returning data from voyager.
 *
 * @param initData       the LookupItemInitiationData
 * @param serviceManager provides access to remote services
 * @return LookupItemResponseData/*from  w  ww .j av  a2s.c  om*/
 */
@Override
public LookupItemSetResponseData performService(LookupItemSetInitiationData initData,
        ServiceContext serviceContext, RemoteServiceManager serviceManager) throws ServiceException {

    voyagerSvcMgr = (VoyagerRemoteServiceManager) serviceManager;
    LookupItemSetResponseData luisResponseData = new LookupItemSetResponseData();
    List<Problem> problems = new ArrayList<Problem>();
    Date sService = new Date();

    log.info("Performing LUIS service.");

    List<BibliographicId> bibIds = initData.getBibliographicIds();
    if (bibIds == null) {
        problems.addAll(ServiceHelper.generateProblems(Version1GeneralProcessingError.NEEDED_DATA_MISSING, null,
                null, "Missing Bib IDs"));
        luisResponseData.setProblems(problems);
        return luisResponseData;
    }

    List<HoldingsSet> holdingSets = new ArrayList<HoldingsSet>();
    List<String> holdingIds = null;
    List<String> itemIds = null;
    Document holdingsDocFromRestful = new Document();
    Document holdingsDocFromXml = new Document();

    int itemCount = 0;
    boolean reachedMaxItemCount = false;

    String token = initData.getNextItemToken();
    ItemToken nextItemToken = null;
    if (token != null) {
        nextItemToken = tokens.get(token);
        if (nextItemToken != null) {
            int index = getIndexOfBibId(bibIds, nextItemToken.getBibliographicId());
            if (index != -1) {
                bibIds.subList(0, index).clear();
            }
            // Remove token from memory hashmap
            tokens.remove(token);
        } else {
            problems.addAll(
                    ServiceHelper.generateProblems(Version1GeneralProcessingError.TEMPORARY_PROCESSING_FAILURE,
                            null, token, "Invalid nextItemToken"));
            luisResponseData.setProblems(problems);
            return luisResponseData;
        }
        log.debug("after removing already processed Bib ids = " + bibIds);
    }

    // Retrieve XML from vxws web services
    List<Document> holdingsDocsFromRestful = new ArrayList<Document>(bibIds.size());
    List<Document> holdingsDocsFromXml = new ArrayList<Document>(bibIds.size());
    int i;
    int numBibs = bibIds.size();

    // In a non-consortial environment, most LUIS calls will be on a single bib
    if (numBibs < 2) {
        for (i = 0; i < numBibs; i++) {
            BibliographicId bibId = bibIds.get(i);
            holdingsDocsFromRestful.add(i, getHoldingRecordsFromRestful(bibId));
            holdingsDocsFromXml.add(i, getHoldingRecordsFromXml(bibId));
        }
        // But in a consortial environment, there can be many bibs in a LUIS call
        // For now, let's look up each bib in a separate thread; perhaps later, we may need to get more sophisticated/smart w/r/t resources?
    } else {
        ExecutorService exec = Executors.newFixedThreadPool(numBibs);
        List<Future<HoldingInfoFromWeb>> futureList = new ArrayList<Future<HoldingInfoFromWeb>>(numBibs);
        List<Callable<HoldingInfoFromWeb>> callList = new ArrayList<Callable<HoldingInfoFromWeb>>(numBibs);

        for (i = 0; i < numBibs; i++) {
            BibliographicId bibId = bibIds.get(i);
            callList.add(new HoldingInfoFromWeb(i, bibId));
        }
        try {
            futureList = exec.invokeAll(callList);
            for (Future<HoldingInfoFromWeb> future : futureList) {
                HoldingInfoFromWeb result = future.get();
                holdingsDocsFromRestful.add(result.getIndex(), result.getRestful());
                holdingsDocsFromXml.add(result.getIndex(), result.getXml());
            }
        } catch (InterruptedException ex) {
            log.error("Error calling vxws services via ExecutorService: " + ex);
        } catch (ExecutionException ex) {
            log.error("Error calling vxws services via ExecutorService: " + ex);
        } finally {
            exec.shutdownNow();
        }
    }

    List<BibInformation> bibInformations = new ArrayList<BibInformation>();

    for (i = 0; i < numBibs; i++) {
        BibliographicId bibId = bibIds.get(i);

        String id = null;
        String itemAgencyId = null;

        id = bibId.getBibliographicRecordId().getBibliographicRecordIdentifier();
        itemAgencyId = bibId.getBibliographicRecordId().getAgencyId().getValue();

        try {
            BibInformation bibInformation = new BibInformation();
            bibInformation.setBibliographicId(bibId);

            if (!checkValidAgencyId(itemAgencyId)) {
                log.error("Unrecognized Bibliographic Record Agency Id: " + itemAgencyId);
                problems.addAll(
                        ServiceHelper.generateProblems(Version1GeneralProcessingError.NEEDED_DATA_MISSING, null,
                                null, "Unrecognized Bibliographic Record Agency Id"));
                bibInformation.setProblems(problems);
                bibInformations.add(bibInformation);
                continue;
            }

            // Is the bib field empty?
            if (id.equals("") || itemAgencyId.equals("")) {
                log.error("Missing Bib Id or Agency Id");
                problems.addAll(
                        ServiceHelper.generateProblems(Version1GeneralProcessingError.NEEDED_DATA_MISSING, null,
                                null, "Missing Bib ID or item Agency Id"));
                bibInformation.setProblems(problems);
                bibInformations.add(bibInformation);
                continue;
            }

            // We had already called these services in the beginning
            holdingsDocFromRestful = holdingsDocsFromRestful.get(i);
            holdingsDocFromXml = holdingsDocsFromXml.get(i);

            if (holdingsDocFromXml == null) {
                problems.addAll(ServiceHelper.generateProblems(
                        Version1GeneralProcessingError.TEMPORARY_PROCESSING_FAILURE, null, id,
                        "Problem contacting the vxws service"));
                luisResponseData.setProblems(problems);
                return luisResponseData;
            }

            if (!doesRecordExist(holdingsDocFromXml)) {
                log.error("Record does not exist");
                problems.addAll(ServiceHelper.generateProblems(Version1LookupItemProcessingError.UNKNOWN_ITEM,
                        null, id, "Record does not exist"));
                bibInformation.setProblems(problems);
                bibInformations.add(bibInformation);
                continue;
            }

            // Get holding Ids belonging to this bib
            holdingIds = getHoldingIdsFromHoldingDoc(holdingsDocFromXml);

            if (nextItemToken != null) {
                int index = holdingIds.indexOf(nextItemToken.getHoldingsId());
                if (index != -1) {
                    holdingIds.subList(0, index).clear();
                }
            }

            if (holdingIds == null) {
                log.error("Bib does not have a holding record associated with it");
                problems.addAll(ServiceHelper.generateProblems(Version1LookupItemProcessingError.UNKNOWN_ITEM,
                        null, id, "Record does not have a holding record associated with it"));
                bibInformation.setProblems(problems);
                bibInformations.add(bibInformation);
                continue; // Bib record exists but has no Holding records
            }

            // Set bib desc
            BibliographicDescription bDesc = null;
            if (initData.getBibliographicDescriptionDesired()) {
                bDesc = getBibliographicDescriptionForBibId(holdingsDocFromXml);
                bibInformation.setBibliographicDescription(bDesc);
            }

            // title hold queue length
            // Ignoring in vxws release
            /*BigDecimal titleHoldQueue = voyagerSvcMgr.getTitleLevelHoldQueueLength(id);
            if (titleHoldQueue != null) {
            bibInformation.setTitleHoldQueueLength(titleHoldQueue);
            }*/

            holdingSets = new ArrayList<HoldingsSet>();

            // Build HoldingSet with items in it
            for (String holdingId : holdingIds) {
                log.debug("Processing Holding id = " + holdingId);
                itemIds = getItemIdsFromHoldingDoc(holdingId, holdingsDocFromRestful);
                log.debug("All itemIds: " + itemIds);

                // We need to distinguish between Holdings with actual Item Records vs. ones without.
                // This is so we may parse Item Record-less Holdings records differently, e.g.,
                // add item-like info to them later (sans ItemID, of course, since they doen't refer to actual Item Records).
                // There are quite a few instances of Holdings without Item Records, but we still want to supply
                // useful information about them. The major (only?) difference will be the lack of ItemID.
                boolean hasItems = false;
                // Get Bib desc, holding set info only if items exist for that holdings
                if (itemIds != null && itemIds.size() > 0) {
                    hasItems = true;
                }

                if (nextItemToken != null) {
                    int index = itemIds.indexOf(nextItemToken.getItemId());
                    log.debug("Index of nextitem: " + index);
                    if (index != -1) {
                        itemIds.subList(0, index + 1).clear();
                    }
                    log.debug("after removing already processed item ids = " + itemIds);
                    if (itemIds.size() < 1) {
                        continue;
                    }
                }

                HoldingsSet holdingSet = new HoldingsSet();
                // Set Bib Id and holdings set id
                holdingSet.setHoldingsSetId(holdingId);

                //                    if (initData.getElectronicResourceDesired()) {
                ElectronicResource eResource = getElectronicResourceForHoldingId(holdingId, holdingsDocFromXml);
                if (eResource != null) {
                    holdingSet.setElectronicResource(eResource);
                }
                //                    }

                String callNumber = null;
                if (hasItems) {
                    callNumber = getCallNumberForHoldingDoc(holdingId, holdingsDocFromRestful);
                } else {
                    callNumber = getCallNumberForHoldingDocFromXml(holdingId, holdingsDocFromXml);
                }
                if (callNumber != null) {
                    holdingSet.setCallNumber(callNumber);
                }

                if (hasItems) {
                    int newItemCount = itemCount + itemIds.size();

                    if (newItemCount > MAX_ITEMS_TO_RETURN) {
                        itemIds = getItemIdSubset(itemIds, itemCount);
                        log.debug("Subset itemIds: " + itemIds);
                    }

                    Map<String, ItemInformation> itemInformations = new HashMap<String, ItemInformation>();

                    for (String itemId : itemIds) {
                        ItemInformation itemInformation = new ItemInformation();
                        ItemId item = new ItemId();
                        item.setItemIdentifierValue(itemId);
                        item.setAgencyId(new AgencyId(itemAgencyId));
                        itemInformation.setItemId(item);
                        itemInformations.put(itemId, itemInformation);
                    }

                    Map<String, String> statuses = null;
                    if (initData.getCirculationStatusDesired()) {
                        statuses = getCirculationStatusForItemIds(itemIds, holdingsDocFromRestful);
                    }

                    // TODO: Double check that this really isn't available through GetHoldings
                    /* Ignoring for vxws release
                    Map<String, BigDecimal> lengths = null;
                    if (initData.getHoldQueueLengthDesired()) {
                    lengths = voyagerSvcMgr.getHoldQueueLengthForItemIds(itemIds);
                    }
                    */

                    Map<String, ItemDescription> itemDescriptions = null;
                    //if (initData.getItemDescriptionDesired()) {
                    itemDescriptions = getItemDescriptionForItemIds(itemIds, holdingsDocFromRestful);
                    //}

                    Map<String, Location> locations = null;
                    if (initData.getLocationDesired()) {
                        locations = getLocationForItemIds(itemIds, holdingsDocFromRestful);
                    }

                    Map<String, String> copyNumbers = new HashMap<String, String>();
                    Iterator<String> itrId = itemDescriptions.keySet().iterator();
                    while (itrId.hasNext()) {
                        String key = itrId.next();
                        copyNumbers.put(itemDescriptions.get(key).getCopyNumber(), key);
                    }

                    Map<String, GregorianCalendar> dueDates = null;
                    dueDates = getDueDateForItemIds(itemIds, holdingsDocFromXml, copyNumbers, holdingId);

                    Iterator<String> itr = itemInformations.keySet().iterator();
                    while (itr.hasNext()) {
                        ItemOptionalFields iof = new ItemOptionalFields();
                        String key = itr.next();
                        if (statuses != null) {
                            String status = statuses.get(key);
                            log.debug("Status for key " + status);
                            try {
                                if (statuses.get(key) != null) {
                                    iof.setCirculationStatus(XcCirculationStatus
                                            .find(XcCirculationStatus.XC_CIRCULATION_STATUS, status));
                                }
                            } catch (ServiceException se) {
                                log.error("Unrecognized item status");
                            }
                        }

                        /*  Ignoring for vxws release
                        if (lengths != null) {
                        iof.setHoldQueueLength(lengths.get(key));
                        } */

                        if (itemDescriptions != null) {
                            iof.setItemDescription(itemDescriptions.get(key));
                        }

                        if (locations != null) {
                            //List<Location> tempLocations = locations.get(key);
                            List<Location> tempLocations = new ArrayList<Location>();
                            tempLocations.add(locations.get(key));
                            if (tempLocations != null) {
                                LocationNameInstance lni = tempLocations.get(0).getLocationName()
                                        .getLocationNameInstances().get(0);
                                iof.setLocations(tempLocations);
                            }
                        }

                        ItemInformation itemInformation = itemInformations.get(key);
                        itemInformation.setItemOptionalFields(iof);
                        if (dueDates != null) {
                            itemInformation.setDateDue(dueDates.get(key));
                        }
                        itemInformations.put(key, itemInformation);
                    }

                    holdingSet.setItemInformations(new ArrayList<ItemInformation>(itemInformations.values()));

                    itemCount = itemCount + itemIds.size();
                    log.debug("Item count: " + itemCount);

                    if (itemCount == MAX_ITEMS_TO_RETURN) {
                        // Set next item token
                        ItemToken itemToken = new ItemToken();
                        itemToken.setBibliographicId(itemAgencyId + "_" + id);
                        itemToken.setHoldingsId(holdingId);
                        itemToken.setItemId(itemIds.get(itemIds.size() - 1));
                        int newToken = random.nextInt();
                        itemToken.setNextToken(Integer.toString(newToken));
                        tokens.put(Integer.toString(newToken), itemToken);

                        luisResponseData.setNextItemToken(Integer.toString(newToken));

                        reachedMaxItemCount = true;

                        log.info("Adding new holding set");
                        holdingSets.add(holdingSet);

                        break;
                    }

                    // No Item Records, but we still want to return some Holdings info.
                } else {
                    itemCount = itemCount + 1;
                    log.debug("Item count: " + itemCount);

                    Map<String, ItemInformation> itemInformations = new HashMap<String, ItemInformation>();
                    ItemInformation itemInformation = new ItemInformation();
                    ItemId item = new ItemId();
                    item.setItemIdentifierValue("");
                    item.setAgencyId(new AgencyId(itemAgencyId));

                    itemInformation.setItemId(item);

                    /*** plug in item-like information for the non-item ***/
                    ItemOptionalFields iof2 = new ItemOptionalFields();
                    // Set location
                    Location location = null;
                    if (initData.getLocationDesired()) {
                        location = getLocationForHoldingDocFromXml(holdingId, holdingsDocFromXml);
                        if (location != null) {
                            List<Location> tempLocations2 = new ArrayList<Location>();
                            tempLocations2.add(location);
                            iof2.setLocations(tempLocations2);
                        }
                    }
                    itemInformation.setItemOptionalFields(iof2);
                    itemInformations.put("N/A", itemInformation);
                    holdingSet.setItemInformations(new ArrayList<ItemInformation>(itemInformations.values()));

                    if (itemCount == MAX_ITEMS_TO_RETURN) {
                        // Set next item token
                        ItemToken itemToken = new ItemToken();
                        itemToken.setBibliographicId(itemAgencyId + "_" + id);
                        itemToken.setHoldingsId(holdingId);
                        itemToken.setItemId("");
                        int newToken = random.nextInt();
                        itemToken.setNextToken(Integer.toString(newToken));
                        tokens.put(Integer.toString(newToken), itemToken);

                        luisResponseData.setNextItemToken(Integer.toString(newToken));

                        reachedMaxItemCount = true;

                        log.info("Adding new holding set");
                        holdingSets.add(holdingSet);

                        break;
                    }
                }

                log.info("Adding new holding set");
                holdingSets.add(holdingSet);
            }

            if (holdingIds.size() != 0) {
                bibInformation.setHoldingsSets(holdingSets);
            }

            bibInformations.add(bibInformation);

            if (reachedMaxItemCount) {
                break;
            }

        } catch (ILSException e) {
            Problem p = new Problem();
            p.setProblemType(new ProblemType("Processing error"));
            p.setProblemDetail(e.getMessage());
            problems.add(p);
            luisResponseData.setProblems(problems);
        }
    }

    Date eService = new Date();
    log.debug("LUIS Service time log : " + (eService.getTime() - sService.getTime()) + "  "
            + ((eService.getTime() - sService.getTime()) / 1000) + " sec");

    luisResponseData.setBibInformations(bibInformations);

    return luisResponseData;
}

From source file:andromache.hadoop.CassandraInputFormat.java

public List<InputSplit> getSplits(JobContext context) throws IOException {
    Configuration conf = context.getConfiguration();
    validateConfiguration(conf);/*from w w w . j  a  va  2 s  . co  m*/

    // cannonical ranges and nodes holding replicas
    List<TokenRange> masterRangeNodes = getRangeMap(conf);

    keyspace = CassandraConfigHelper.getInputKeyspace(context.getConfiguration());

    cfNames = CassandraConfigHelper.getInputColumnFamilies(context.getConfiguration());

    // TODO: [IS] make sure this partitioner matches to what is set on each keyspace participating
    partitioner = CassandraConfigHelper.getInputPartitioner(context.getConfiguration());
    logger.debug("partitioner is " + partitioner);

    // cannonical ranges, split into pieces, fetching the splits in parallel

    ExecutorService executor = Executors.newCachedThreadPool();
    List<InputSplit> splits = new ArrayList<InputSplit>();

    try {
        List<Future<List<CassandraSplit>>> splitfutures = new ArrayList<Future<List<CassandraSplit>>>();
        KeyRange jobKeyRange = CassandraConfigHelper.getInputKeyRange(conf);
        Range<Token> jobRange = null;
        if (jobKeyRange != null && jobKeyRange.start_token != null) {
            assert partitioner
                    .preservesOrder() : "ConfigHelper.setInputKeyRange(..) can only be used with a order preserving paritioner";
            assert jobKeyRange.start_key == null : "only start_token supported";
            assert jobKeyRange.end_key == null : "only end_token supported";
            jobRange = new Range<Token>(partitioner.getTokenFactory().fromString(jobKeyRange.start_token),
                    partitioner.getTokenFactory().fromString(jobKeyRange.end_token), partitioner);
        }

        for (TokenRange range : masterRangeNodes) {
            if (jobRange == null) {
                // for each range, pick a live owner and ask it to compute bite-sized splits

                splitfutures.add(executor.submit(new SplitCallable(range, conf)));
            } else {
                Range<Token> dhtRange = new Range<Token>(
                        partitioner.getTokenFactory().fromString(range.start_token),
                        partitioner.getTokenFactory().fromString(range.end_token), partitioner);

                if (dhtRange.intersects(jobRange)) {
                    for (Range<Token> intersection : dhtRange.intersectionWith(jobRange)) {
                        range.start_token = partitioner.getTokenFactory().toString(intersection.left);
                        range.end_token = partitioner.getTokenFactory().toString(intersection.right);
                        // for each range, pick a live owner and ask it to compute bite-sized splits
                        splitfutures.add(executor.submit(new SplitCallable(range, conf)));
                    }
                }
            }
        }

        // wait until we have all the results back
        for (Future<List<CassandraSplit>> futureInputSplits : splitfutures) {
            try {
                splits.addAll(futureInputSplits.get());
            } catch (Exception e) {
                throw new IOException("Could not get input splits", e);
            }
        }
    } finally {
        executor.shutdownNow();
    }

    assert splits.size() > 0;
    Collections.shuffle(splits, new Random(System.nanoTime()));
    return splits;
}