Example usage for java.util.concurrent ExecutorService shutdown

List of usage examples for java.util.concurrent ExecutorService shutdown

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdown.

Prototype

void shutdown();

Source Link

Document

Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be accepted.

Usage

From source file:com.linkedin.pinot.integration.tests.HybridClusterIntegrationTest.java

@BeforeClass
public void setUp() throws Exception {
    //Clean up//from   ww  w  .j  ava2  s.  com
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_tarDir);

    // Start Zk, Kafka and Pinot
    startHybridCluster();

    // Unpack the Avro files
    TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class
            .getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))),
            _tmpDir);

    _tmpDir.mkdirs();

    final List<File> avroFiles = getAllAvroFiles();

    File schemaFile = getSchemaFile();
    schema = Schema.fromFile(schemaFile);
    addSchema(schemaFile, schema.getSchemaName());
    final List<String> invertedIndexColumns = makeInvertedIndexColumns();
    final String sortedColumn = makeSortedColumn();

    // Create Pinot table
    addHybridTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC,
            schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn,
            invertedIndexColumns, null);
    LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = "
            + invertedIndexColumns);

    // Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime)
    final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
    final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);

    // Load data into H2
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);

    // Create segments from Avro data
    LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles);
    buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null);

    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);

    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = new CountDownLatch(1);
    HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance",
            InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR);
    manager.connect();
    manager.addExternalViewChangeListener(new ExternalViewChangeListener() {
        @Override
        public void onExternalViewChange(List<ExternalView> externalViewList,
                NotificationContext changeContext) {
            for (ExternalView externalView : externalViewList) {
                if (externalView.getId().contains("mytable")) {

                    Set<String> partitionSet = externalView.getPartitionSet();
                    if (partitionSet.size() == offlineSegmentCount) {
                        int onlinePartitionCount = 0;

                        for (String partitionId : partitionSet) {
                            Map<String, String> partitionStateMap = externalView.getStateMap(partitionId);
                            if (partitionStateMap.containsValue("ONLINE")) {
                                onlinePartitionCount++;
                            }
                        }

                        if (onlinePartitionCount == offlineSegmentCount) {
                            System.out.println("Got " + offlineSegmentCount
                                    + " online tables, unlatching the main thread");
                            latch.countDown();
                        }
                    }
                }
            }
        }
    });

    // Upload the segments
    int i = 0;
    for (String segmentName : _tarDir.list()) {
        System.out.println("Uploading segment " + (i++) + " : " + segmentName);
        File file = new File(_tarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file),
                file.length());
    }

    // Wait for all offline segments to be online
    latch.await();

    // Load realtime data into Kafka
    LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles);
    pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC);

    // Wait until the Pinot event count matches with the number of events in the Avro files
    int pinotRecordCount, h2RecordCount;
    long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L;

    Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    statement.execute("select count(*) from mytable");
    ResultSet rs = statement.getResultSet();
    rs.first();
    h2RecordCount = rs.getInt(1);
    rs.close();

    waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes);
}

From source file:com.pliu.azuremgmtsdk.BasicFilter.java

private AuthenticationResult getAccessToken(AuthorizationCode authorizationCode, String currentUri)
        throws Throwable {
    String authCode = authorizationCode.getValue();
    ClientCredential credential = new ClientCredential(clientId, clientSecret);
    AuthenticationContext context;// w ww .  jav a2s .  co m
    AuthenticationResult result = null;
    ExecutorService service = null;
    try {
        service = Executors.newFixedThreadPool(1);
        context = new AuthenticationContext(authority + userTenant + "/", true, service);
        Future<AuthenticationResult> future = context.acquireTokenByAuthorizationCode(authCode,
                new URI(currentUri), credential, apiEndpoint, null);
        result = future.get();
    } catch (ExecutionException e) {
        throw e.getCause();
    } finally {
        service.shutdown();
    }

    if (result == null) {
        throw new ServiceUnavailableException("authentication result was null");
    }
    return result;
}

From source file:com.pliu.azuremgmtsdk.BasicFilter.java

private AuthenticationResult getAccessTokenFromRefreshToken(String refreshToken) throws Throwable {
    AuthenticationContext context;/* w w w . ja v  a  2  s .com*/
    AuthenticationResult result = null;
    ExecutorService service = null;
    try {
        service = Executors.newFixedThreadPool(1);
        context = new AuthenticationContext(authority + userTenant + "/", true, service);
        Future<AuthenticationResult> future = context.acquireTokenByRefreshToken(refreshToken,
                new ClientCredential(clientId, clientSecret), null, null);
        result = future.get();
    } catch (ExecutionException e) {
        throw e.getCause();
    } finally {
        service.shutdown();
    }

    if (result == null) {
        throw new ServiceUnavailableException("authentication result was null");
    }
    return result;
}

From source file:edu.lternet.pasta.portal.HarvesterServlet.java

/**
 * The doPost method of the servlet. <br>
 * /*from   w ww  .j a  v  a  2s  . c o m*/
 * This method is called when a form has its tag value method equals to post.
 * 
 * @param request
 *          the request send by the client to the server
 * @param response
 *          the response send by the server to the client
 * @throws ServletException
 *           if an error occurred
 * @throws IOException
 *           if an error occurred
 */
public void doPost(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    HttpSession httpSession = request.getSession();
    ServletContext servletContext = httpSession.getServletContext();
    ArrayList<String> documentURLs = null;
    File emlFile = null;
    String emlTextArea = null;
    Harvester harvester = null;
    String harvestId = null;
    String harvestListURL = null;
    String harvestReportId = null;
    boolean isDesktopUpload = false;
    boolean isEvaluate = false;
    String uid = (String) httpSession.getAttribute("uid");
    String urlTextArea = null;
    String warningMessage = "";

    try {
        if (uid == null) {
            throw new PastaAuthenticationException(LOGIN_WARNING);
        } else {
            /*
             * The "metadataSource" request parameter can have a value of
             * "emlText", "emlFile", "urlList", "harvestList", or
             * "desktopHarvester". It is set as a hidden input field in 
             * each of the harvester forms.
             */
            String metadataSource = request.getParameter("metadataSource");

            /*
             * "mode" can have a value of "evaluate" or "upgrade". It is set
             * as the value of the submit button in each of the harvester
             * forms.
             */
            String mode = request.getParameter("submit");
            if ((mode != null) && (mode.equalsIgnoreCase("evaluate"))) {
                isEvaluate = true;
            }

            if ((metadataSource != null) && (!metadataSource.equals("desktopHarvester"))) {
                harvestId = generateHarvestId();
                if (isEvaluate) {
                    harvestReportId = uid + "-evaluate-" + harvestId;
                } else {
                    harvestReportId = uid + "-upload-" + harvestId;
                }
            }

            if (metadataSource != null) {
                if (metadataSource.equals("emlText")) {
                    emlTextArea = request.getParameter("emlTextArea");
                    if (emlTextArea == null || emlTextArea.trim().isEmpty()) {
                        warningMessage = "<p class=\"warning\">Please enter the text of an EML document into the text area.</p>";
                    }
                } else if (metadataSource.equals("emlFile")) {
                    Collection<Part> parts = request.getParts();
                    for (Part part : parts) {
                        if (part.getContentType() != null) {
                            // save EML file to disk
                            emlFile = processUploadedFile(part);
                        } else {
                            /*
                             * Parse the request parameters.
                             */
                            String fieldName = part.getName();
                            String fieldValue = request.getParameter(fieldName);
                            if (fieldName != null && fieldValue != null) {
                                if (fieldName.equals("submit") && fieldValue.equalsIgnoreCase("evaluate")) {
                                    isEvaluate = true;
                                } else if (fieldName.equals("desktopUpload")
                                        && fieldValue.equalsIgnoreCase("1")) {
                                    isDesktopUpload = true;
                                }
                            }
                        }
                    }
                } else if (metadataSource.equals("urlList")) {
                    urlTextArea = request.getParameter("urlTextArea");
                    if (urlTextArea == null || urlTextArea.trim().isEmpty()) {
                        warningMessage = "<p class=\"warning\">Please enter one or more EML document URLs into the text area.</p>";
                    } else {
                        documentURLs = parseDocumentURLsFromTextArea(urlTextArea);
                        warningMessage = CHECK_BACK_LATER;
                    }
                } else if (metadataSource.equals("harvestList")) {
                    harvestListURL = request.getParameter("harvestListURL");
                    if (harvestListURL == null || harvestListURL.trim().isEmpty()) {
                        warningMessage = "<p class=\"warning\">Please enter the URL to a Metacat Harvest List.</p>";
                    } else {
                        documentURLs = parseDocumentURLsFromHarvestList(harvestListURL);
                        warningMessage = CHECK_BACK_LATER;
                    }
                }
                /*
                 * If the metadata source is "desktopHarvester", we already have the
                 * EML file stored in a session attribute. Now we need to retrieve
                 * the data files from the brower's form fields and write the
                 * data files to a URL accessible location.
                 */
                else if (metadataSource.equals("desktopHarvester")) {
                    emlFile = (File) httpSession.getAttribute("emlFile");
                    ArrayList<Entity> entityList = parseEntityList(emlFile);
                    harvestReportId = (String) httpSession.getAttribute("harvestReportId");
                    String dataPath = servletContext.getRealPath(DESKTOP_DATA_DIR);
                    String harvestPath = String.format("%s/%s", dataPath, harvestReportId);

                    Collection<Part> parts = request.getParts();
                    String objectName = null;
                    Part filePart = null;

                    for (Part part : parts) {
                        if (part.getContentType() != null) {
                            // save data file to disk
                            //processDataFile(part, harvestPath);
                            filePart = part;
                        } else {
                            /*
                             * Parse the request parameters.
                             */
                            String fieldName = part.getName();
                            String fieldValue = request.getParameter(fieldName);
                            if (fieldName != null && fieldValue != null) {
                                if (fieldName.equals("submit") && fieldValue.equalsIgnoreCase("evaluate")) {
                                    isEvaluate = true;
                                } else if (fieldName.startsWith("object-name-")) {
                                    objectName = fieldValue;
                                }
                            }
                        }

                        if (filePart != null && objectName != null) {
                            processDataFile(filePart, harvestPath, objectName);
                            objectName = null;
                            filePart = null;
                        }

                    }

                    emlFile = transformDesktopEML(harvestPath, emlFile, harvestReportId, entityList);
                }
            } else {
                throw new IllegalStateException("No value specified for request parameter 'metadataSource'");
            }

            if (harvester == null) {
                harvester = new Harvester(harvesterPath, harvestReportId, uid, isEvaluate);
            }

            if (emlTextArea != null) {
                harvester.processSingleDocument(emlTextArea);
            } else if (emlFile != null) {
                if (isDesktopUpload) {
                    ArrayList<Entity> entityList = parseEntityList(emlFile);
                    httpSession.setAttribute("entityList", entityList);
                    httpSession.setAttribute("emlFile", emlFile);
                    httpSession.setAttribute("harvestReportId", harvestReportId);
                    httpSession.setAttribute("isEvaluate", new Boolean(isEvaluate));
                } else {
                    harvester.processSingleDocument(emlFile);
                }
            } else if (documentURLs != null) {
                harvester.setDocumentURLs(documentURLs);
                ExecutorService executorService = Executors.newCachedThreadPool();
                executorService.execute(harvester);
                executorService.shutdown();
            }
        }
    } catch (Exception e) {
        handleDataPortalError(logger, e);
    }

    request.setAttribute("message", warningMessage);

    /*
     * If we have a new reportId, and either there is no warning message or
     * it's the "Check back later" message, set the harvestReportID session
     * attribute to the new reportId value.
     */
    if (harvestReportId != null && harvestReportId.length() > 0
            && (warningMessage.length() == 0 || warningMessage.equals(CHECK_BACK_LATER))) {
        httpSession.setAttribute("harvestReportID", harvestReportId);
    }

    if (isDesktopUpload) {
        RequestDispatcher requestDispatcher = request.getRequestDispatcher("./desktopHarvester.jsp");
        requestDispatcher.forward(request, response);
    } else if (warningMessage.length() == 0) {
        response.sendRedirect("./harvestReport.jsp");
    } else {
        RequestDispatcher requestDispatcher = request.getRequestDispatcher("./harvester.jsp");
        requestDispatcher.forward(request, response);
    }

}

From source file:gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java

/**
 * Get all pages in an async mode.//  ww w  .jav a2 s.com
 */
private Collection<String> getPages(String startDate, String endDate, List<Dimension> dimensions,
        ApiDimensionFilter countryFilter, Queue<Pair<String, FilterOperator>> toProcess) throws IOException {
    String country = GoogleWebmasterFilter.countryFilterToString(countryFilter);

    ConcurrentLinkedDeque<String> allPages = new ConcurrentLinkedDeque<>();
    int r = 0;
    while (r <= RETRY) {
        ++r;
        log.info(String.format("Get pages at round %d with size %d.", r, toProcess.size()));
        ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound = new ConcurrentLinkedDeque<>();
        ExecutorService es = Executors.newFixedThreadPool(10, ExecutorsUtils
                .newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName())));

        while (!toProcess.isEmpty()) {
            submitJob(toProcess.poll(), countryFilter, startDate, endDate, dimensions, es, allPages, nextRound);
        }
        //wait for jobs to finish and start next round if necessary.
        try {
            es.shutdown();
            boolean terminated = es.awaitTermination(5, TimeUnit.MINUTES);
            if (!terminated) {
                es.shutdownNow();
                log.warn(String.format(
                        "Timed out while getting all pages for country-%s at round %d. Next round now has size %d.",
                        country, r, nextRound.size()));
            }
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }

        if (nextRound.isEmpty()) {
            break;
        }
        toProcess = nextRound;
    }
    if (r == RETRY) {
        throw new RuntimeException(String.format(
                "Getting all pages reaches the maximum number of retires %d. Date range: %s ~ %s. Country: %s.",
                RETRY, startDate, endDate, country));
    }
    return allPages;
}

From source file:fr.inria.lille.repair.nopol.NoPol.java

/**
 * Method used as proxy for runNopolProcessor to handle timeout
 *///  w ww  .  j a  v a 2s. com
private List<Patch> executeNopolProcessor(final List<TestResult> tests, final SourceLocation sourceLocation,
        final SpoonedClass spoonCl, final NopolProcessor nopolProcessor) {
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final Future nopolExecution = executor.submit(new Callable() {
        @Override
        public Object call() throws Exception {
            return runNopolProcessor(tests, sourceLocation, spoonCl, nopolProcessor);
        }
    });
    try {
        executor.shutdown();
        return (List) nopolExecution.get(nopolContext.getMaxTimeEachTypeOfFixInMinutes(), TimeUnit.MINUTES);
    } catch (ExecutionException exception) {
        LoggerFactory.getLogger(this.getClass()).error("Error ExecutionException " + exception.toString());
        return Collections.emptyList();
    } catch (InterruptedException execption) {
        LoggerFactory.getLogger(this.getClass()).error("Repair interrupted");
        return Collections.emptyList();
    } catch (TimeoutException exception) {
        LoggerFactory.getLogger(this.getClass()).error("Timeout: execution time > "
                + nopolContext.getMaxTimeEachTypeOfFixInMinutes() + " " + TimeUnit.MINUTES, exception);
        return Collections.emptyList();
    }
}

From source file:com.microsoft.azure.servicebus.samples.topicsgettingstarted.TopicsGettingStarted.java

public void run(String connectionString) throws Exception {

    TopicClient sendClient;/*  w w w .ja  v  a  2 s . c  om*/
    SubscriptionClient subscription1Client;
    SubscriptionClient subscription2Client;
    SubscriptionClient subscription3Client;

    // Create a QueueClient instance using the connection string builder
    // We set the receive mode to "PeekLock", meaning the message is delivered
    // under a lock and must be acknowledged ("completed") to be removed from the queue
    subscription1Client = new SubscriptionClient(
            new ConnectionStringBuilder(connectionString, "BasicTopic/subscriptions/Subscription1"),
            ReceiveMode.PEEKLOCK);
    subscription2Client = new SubscriptionClient(
            new ConnectionStringBuilder(connectionString, "BasicTopic/subscriptions/Subscription2"),
            ReceiveMode.PEEKLOCK);
    subscription3Client = new SubscriptionClient(
            new ConnectionStringBuilder(connectionString, "BasicTopic/subscriptions/Subscription3"),
            ReceiveMode.PEEKLOCK);

    ExecutorService executorService = Executors.newCachedThreadPool();
    registerMessageHandlerOnClient(subscription1Client, executorService);
    registerMessageHandlerOnClient(subscription2Client, executorService);
    registerMessageHandlerOnClient(subscription3Client, executorService);

    sendClient = new TopicClient(new ConnectionStringBuilder(connectionString, "BasicTopic"));
    sendMessagesAsync(sendClient).thenRunAsync(() -> sendClient.closeAsync());

    // wait for ENTER or 10 seconds elapsing
    waitForEnter(10);

    CompletableFuture.allOf(subscription1Client.closeAsync(), subscription2Client.closeAsync(),
            subscription3Client.closeAsync()).join();

    executorService.shutdown();
}

From source file:com.opentransport.rdfmapper.nmbs.ScrapeTrip.java

private void requestJsons(Map trainDelays) {
    String trainName;//from  ww w . ja v  a2  s. co  m
    Iterator iterator = trainDelays.entrySet().iterator();

    ExecutorService pool = Executors.newFixedThreadPool(NUMBER_OF_CONNECTIONS_TO_IRAIL_API);
    while (iterator.hasNext()) {
        Map.Entry mapEntry = (Map.Entry) iterator.next();
        trainName = returnCorrectTrainFormat((String) mapEntry.getKey());
        url = "https://api.irail.be/vehicle/?id=BE.NMBS." + trainName + "&format=json";
        System.out.println("HTTP GET - " + url);
        countConnections++;
        pool.submit(new DownloadDelayedTrains(trainName, url));
    }
    pool.shutdown();

    try {
        pool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
        // all tasks have now finished (unless an exception is thrown abo
    } catch (InterruptedException ex) {
        Logger.getLogger(ScrapeTrip.class.getName()).log(Level.SEVERE, null, ex);
        errorWriter.writeError(ex.toString());
    }
}

From source file:demo.vmware.commands.schemaspecific.CommandCreateData.java

@Override
public CommandResult run(ConfigurableApplicationContext mainContext, List<String> parameters) {
    List<String> messages = new ArrayList<String>();

    // lets us track cache growth
    int attributeCounter = 0;

    CommandTimer timer = new CommandTimer();
    LOG.info("creating test data for " + numCompanies + "," + numRegions + "," + numGroups + "," + numBranches);
    // use the Java executor service because of it's awesome invokeAll method.
    // we have 4 cores but gemfire needs some. 2 is probably a more realistic number
    ExecutorService taskExecutor = Executors.newFixedThreadPool(2);
    Collection tasks = new ArrayList<CompanyHierarchyPopulator>();

    for (int i = 0; i < numCompanies; i++) {
        // add a task for each company we are creating
        CompanyHierarchyPopulator poper = new CompanyHierarchyPopulator();
        tasks.add(poper);//w  w  w. j  a  v  a2 s .c  om
    }

    try {
        // run all the tasks in execution pool
        List<Future<?>> futures = taskExecutor.invokeAll(tasks);
        taskExecutor.shutdown();

        // aggregate the results from the call() method
        for (int i = 0; i < numCompanies; i++) {
            // should get resulting messages also -- sometime in the future
            attributeCounter += ((Integer) futures.get(i).get()).intValue();
        }

        timer.stop();
        messages.add("Created " + attributeCounter + " attributes and " + " took "
                + timer.getTimeDiffInSeconds() + " sec");
    } catch (ExecutionException e) {
        // this should never happen
        LOG.warn("something bad happend", e);
        messages.add("Something bad happend " + e.getMessage());
    } catch (InterruptedException e) {
        // this should never happen
        LOG.warn("something bad happend", e);
        messages.add("Something bad happend " + e.getMessage());
    }
    return new CommandResult(null, messages);
}

From source file:io.ecarf.core.cloud.task.processor.old.ProcessLoadTask1Old.java

@Override
public void run() throws IOException {

    log.info("START: processing files for bigquery import");

    EcarfGoogleCloudService cloudService = (EcarfGoogleCloudService) this.getCloudService();

    //String bucket = metadata.getBucket();

    // get the schema terms if provided
    //String schemaTermsFile = metadata.getSchemaTermsFile();
    Set<String> schemaTerms = cloudService.getSetFromCloudStorageFile(schemaTermsFile, bucket);

    //Set<String> files = metadata.getFiles();
    Set<String> filesSet = ObjectUtils.csvToSet(files);
    log.info("Loading files: " + filesSet);
    Map<String, Integer> count = new HashMap<>();
    List<ProcessFilesForBigQuerySubTask> tasks = new ArrayList<>();
    int processors = Runtime.getRuntime().availableProcessors();

    for (final String file : filesSet) {

        TermCounter counter = null;//from  w w  w.  j av a 2s  .  c  o m

        if (schemaTerms != null) {
            counter = new TermCounter();
            counter.setTermsToCount(schemaTerms);
        }

        ProcessFilesForBigQuerySubTask task = new ProcessFilesForBigQuerySubTask(file, bucket, bucket, counter,
                null, false, false, this.getCloudService());
        tasks.add(task);

    }

    // check if we only have one file to process
    if (tasks.size() == 1) {

        TermCounter counter = tasks.get(0).call();
        if (counter != null) {
            count = counter.getCount();
        }

    } else if (processors == 1) {
        // only one process then process synchronously
        for (ProcessFilesForBigQuerySubTask task : tasks) {
            TermCounter counter = task.call();
            if (counter != null) {
                Utils.mergeCountMaps(count, counter.getCount());
            }
        }

    } else {

        // multiple cores
        ExecutorService executor = Utils.createFixedThreadPool(processors);

        try {

            List<Future<TermCounter>> results = executor.invokeAll(tasks);

            for (Future<TermCounter> result : results) {
                TermCounter counter = result.get();
                if (counter != null) {
                    Utils.mergeCountMaps(count, counter.getCount());
                }
            }

        } catch (Exception e) {
            log.error("Failed to process multiple files", e);
            throw new IOException(e);

        } finally {
            executor.shutdown();
        }
    }
    // write term stats to file and upload
    if ((count != null) && !count.isEmpty()) {
        log.info("Saving terms stats");
        String countStatsFile = Utils.TEMP_FOLDER + cloudService.getInstanceId() + Constants.DOT_JSON;
        FileUtils.objectToJsonFile(countStatsFile, count);

        cloudService.uploadFileToCloudStorage(countStatsFile, bucket);
    }

    log.info("FINISH: All files are processed and uploaded successfully");
}