Example usage for java.util.concurrent Future isDone

List of usage examples for java.util.concurrent Future isDone

Introduction

In this page you can find the example usage for java.util.concurrent Future isDone.

Prototype

boolean isDone();

Source Link

Document

Returns true if this task completed.

Usage

From source file:ubic.gemma.core.analysis.expression.diff.LinearModelAnalyzer.java

/**
 * Important bit. Run the analysis// ww  w  . j  a  v a  2 s . com
 *
 * @return results
 */
private Map<String, LinearModelSummary> runAnalysis(
        final DoubleMatrix<CompositeSequence, BioMaterial> namedMatrix,
        final DoubleMatrix<String, String> sNamedMatrix, DesignMatrix designMatrix,
        final DoubleMatrix1D librarySize, final DifferentialExpressionAnalysisConfig config) {

    final Map<String, LinearModelSummary> rawResults = new ConcurrentHashMap<>();

    Future<?> f = this.runAnalysisFuture(designMatrix, sNamedMatrix, rawResults, librarySize, config);

    StopWatch timer = new StopWatch();
    timer.start();
    long lastTime = 0;

    // this analysis should take just 10 or 20 seconds for most data sets.
    double MAX_ANALYSIS_TIME = 60 * 1000 * 30; // 30 minutes.
    double updateIntervalMillis = 60 * 1000;// 1 minute
    while (!f.isDone()) {
        try {
            Thread.sleep(1000);

            if (timer.getTime() - lastTime > updateIntervalMillis) {
                LinearModelAnalyzer.log.info(String.format("Analysis running, %.1f minutes elapsed ...",
                        timer.getTime() / 60000.00));
                lastTime = timer.getTime();
            }

        } catch (InterruptedException e) {
            LinearModelAnalyzer.log.warn("Analysis interrupted!");
            return rawResults;
        }

        if (timer.getTime() > MAX_ANALYSIS_TIME) {
            LinearModelAnalyzer.log
                    .error("Analysis is taking too long, something bad must have happened; cancelling");
            f.cancel(true);
            throw new RuntimeException("Analysis was taking too long, it was cancelled");
        }
    }

    if (timer.getTime() > updateIntervalMillis) {
        LinearModelAnalyzer.log
                .info(String.format("Analysis finished in %.1f minutes.", timer.getTime() / 60000.00));
    }

    try {
        f.get();
    } catch (InterruptedException e) {
        LinearModelAnalyzer.log.warn("Job was interrupted");
        return rawResults;
    } catch (ExecutionException e) {
        throw new RuntimeException(e);
    }

    assert rawResults.size() == namedMatrix.rows() : "expected " + namedMatrix.rows() + " results, got "
            + rawResults.size();
    return rawResults;
}

From source file:org.apache.flume.api.NettyAvroRpcClient.java

private void append(Event event, long timeout, TimeUnit tu) throws EventDeliveryException {

    assertReady();/* w ww .  j a v a 2s.  c o  m*/

    final CallFuture<Status> callFuture = new CallFuture<Status>();

    final AvroFlumeEvent avroEvent = new AvroFlumeEvent();
    avroEvent.setBody(ByteBuffer.wrap(event.getBody()));
    avroEvent.setHeaders(toCharSeqMap(event.getHeaders()));

    Future<Void> handshake;
    try {
        // due to AVRO-1122, avroClient.append() may block
        handshake = callTimeoutPool.submit(new Callable<Void>() {

            @Override
            public Void call() throws Exception {
                avroClient.append(avroEvent, callFuture);
                return null;
            }
        });
    } catch (RejectedExecutionException ex) {
        throw new EventDeliveryException(this + ": Executor error", ex);
    }

    try {
        handshake.get(connectTimeout, TimeUnit.MILLISECONDS);
    } catch (TimeoutException ex) {
        throw new EventDeliveryException(this + ": Handshake timed out after " + connectTimeout + " ms", ex);
    } catch (InterruptedException ex) {
        throw new EventDeliveryException(this + ": Interrupted in handshake", ex);
    } catch (ExecutionException ex) {
        throw new EventDeliveryException(this + ": RPC request exception", ex);
    } catch (CancellationException ex) {
        throw new EventDeliveryException(this + ": RPC request cancelled", ex);
    } finally {
        if (!handshake.isDone()) {
            handshake.cancel(true);
        }
    }

    waitForStatusOK(callFuture, timeout, tu);
}

From source file:de.hopmann.msc.slave.service.PackageInstallationBean.java

public Future<InstallationContext> acquireInstallation(PackageResolved packageResolved,
        PackageInstallerHolder packageInstallerHolder) throws PackageNotFoundException {
    // TODO exception, close context to rollback on error

    final InstallationContext context = new InstallationContext();

    final Future<PackageInstallationEntity> installationFuture = acquireInstallation(packageResolved,
            packageInstallerHolder, context);

    return new Future<InstallationContext>() {

        @Override//from www  .j a v  a  2s . c  om
        public boolean cancel(boolean mayInterruptIfRunning) {
            boolean cancelled = installationFuture.cancel(mayInterruptIfRunning);
            try {
                // Close installation context to release resources
                context.close();
            } catch (Exception e) {
                log.log(Level.WARNING, "Could not close installation context", e);
            }
            return cancelled;
        }

        @Override
        public InstallationContext get() throws InterruptedException, ExecutionException {
            context.setInstallation(installationFuture.get());
            return context;
        }

        @Override
        public InstallationContext get(long timeout, TimeUnit unit)
                throws InterruptedException, ExecutionException, TimeoutException {
            context.setInstallation(installationFuture.get(timeout, unit));
            return context;
        }

        @Override
        public boolean isCancelled() {
            return installationFuture.isCancelled();
        }

        @Override
        public boolean isDone() {
            return installationFuture.isDone();
        }
    };

}

From source file:org.apache.flume.api.NettyAvroRpcClient.java

private void appendBatch(List<Event> events, long timeout, TimeUnit tu) throws EventDeliveryException {

    assertReady();// w w w  .  j a va  2  s . c o m

    Iterator<Event> iter = events.iterator();
    final List<AvroFlumeEvent> avroEvents = new LinkedList<AvroFlumeEvent>();

    // send multiple batches... bail if there is a problem at any time
    while (iter.hasNext()) {
        avroEvents.clear();

        for (int i = 0; i < batchSize && iter.hasNext(); i++) {
            Event event = iter.next();
            AvroFlumeEvent avroEvent = new AvroFlumeEvent();
            avroEvent.setBody(ByteBuffer.wrap(event.getBody()));
            avroEvent.setHeaders(toCharSeqMap(event.getHeaders()));
            avroEvents.add(avroEvent);
        }

        final CallFuture<Status> callFuture = new CallFuture<Status>();

        Future<Void> handshake;
        try {
            // due to AVRO-1122, avroClient.appendBatch() may block
            handshake = callTimeoutPool.submit(new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    avroClient.appendBatch(avroEvents, callFuture);
                    return null;
                }
            });
        } catch (RejectedExecutionException ex) {
            throw new EventDeliveryException(this + ": Executor error", ex);
        }

        try {
            handshake.get(connectTimeout, TimeUnit.MILLISECONDS);
        } catch (TimeoutException ex) {
            throw new EventDeliveryException(this + ": Handshake timed out after " + connectTimeout + "ms", ex);
        } catch (InterruptedException ex) {
            throw new EventDeliveryException(this + ": Interrupted in handshake", ex);
        } catch (ExecutionException ex) {
            throw new EventDeliveryException(this + ": RPC request exception", ex);
        } catch (CancellationException ex) {
            throw new EventDeliveryException(this + ": RPC request cancelled", ex);
        } finally {
            if (!handshake.isDone()) {
                handshake.cancel(true);
            }
        }

        waitForStatusOK(callFuture, timeout, tu);
    }
}

From source file:org.elasticsearch.client.sniff.SnifferTests.java

public void testTaskCancelling() throws Exception {
    RestClient restClient = mock(RestClient.class);
    HostsSniffer hostsSniffer = mock(HostsSniffer.class);
    Scheduler noOpScheduler = new Scheduler() {
        @Override/*from w  w w  .  j a v  a 2  s  .co  m*/
        public Future<?> schedule(Sniffer.Task task, long delayMillis) {
            return null;
        }

        @Override
        public void shutdown() {
        }
    };
    Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L);
    ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
    try {
        int numIters = randomIntBetween(50, 100);
        for (int i = 0; i < numIters; i++) {
            Sniffer.Task task = sniffer.new Task(0L);
            TaskWrapper wrapper = new TaskWrapper(task);
            Future<?> future;
            if (rarely()) {
                future = executor.schedule(wrapper, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS);
            } else {
                future = executor.submit(wrapper);
            }
            Sniffer.ScheduledTask scheduledTask = new Sniffer.ScheduledTask(task, future);
            boolean skip = scheduledTask.skip();
            try {
                assertNull(future.get());
            } catch (CancellationException ignore) {
                assertTrue(future.isCancelled());
            }

            if (skip) {
                //the task was either cancelled before starting, in which case it will never start (thanks to Future#cancel),
                //or skipped, in which case it will run but do nothing (thanks to Task#skip).
                //Here we want to make sure that whenever skip returns true, the task either won't run or it won't do anything,
                //otherwise we may end up with parallel sniffing tracks given that each task schedules the following one. We need to
                // make sure that onFailure takes scheduling over while at the same time ordinary rounds don't go on.
                assertFalse(task.hasStarted());
                assertTrue(task.isSkipped());
                assertTrue(future.isCancelled());
                assertTrue(future.isDone());
            } else {
                //if a future is cancelled when its execution has already started, future#get throws CancellationException before
                //completion. The execution continues though so we use a latch to try and wait for the task to be completed.
                //Here we want to make sure that whenever skip returns false, the task will be completed, otherwise we may be
                //missing to schedule the following round, which means no sniffing will ever happen again besides on failure sniffing.
                assertTrue(wrapper.await());
                //the future may or may not be cancelled but the task has for sure started and completed
                assertTrue(task.toString(), task.hasStarted());
                assertFalse(task.isSkipped());
                assertTrue(future.isDone());
            }
            //subsequent cancel calls return false for sure
            int cancelCalls = randomIntBetween(1, 10);
            for (int j = 0; j < cancelCalls; j++) {
                assertFalse(scheduledTask.skip());
            }
        }
    } finally {
        executor.shutdown();
        executor.awaitTermination(1000, TimeUnit.MILLISECONDS);
    }
}

From source file:com.alibaba.wasp.master.FMaster.java

/**
 * We do the following in a different thread. If it is not completed in time,
 * we will time it out and assume it is not easy to recover.
 *
 * 1. Create a new ZK session. (since our current one is expired) 2. Try to
 * become a primary master again 3. Initialize all ZK based system trackers.
 * 4. Assign root and meta. (they are already assigned, but we need to update
 * our internal memory state to reflect it) 5. Process any RIT if any during
 * the process of our recovery.//from w  w  w . j  av  a  2  s  .c om
 *
 * @return True if we could successfully recover from ZK session expiry.
 * @throws InterruptedException
 * @throws java.io.IOException
 * @throws org.apache.zookeeper.KeeperException
 * @throws java.util.concurrent.ExecutionException
 */
private boolean tryRecoveringExpiredZKSession()
        throws InterruptedException, IOException, KeeperException, ExecutionException {

    this.zooKeeper.reconnectAfterExpiration();

    Callable<Boolean> callable = new Callable<Boolean>() {
        public Boolean call() throws InterruptedException, IOException, KeeperException {
            MonitoredTask status = TaskMonitor.get().createStatus("Recovering expired ZK session");
            try {
                if (!becomeActiveMaster(status)) {
                    return Boolean.FALSE;
                }
                initialized = false;
                finishInitialization(status, true);
                return Boolean.TRUE;
            } finally {
                status.cleanup();
            }
        }
    };

    long timeout = conf.getLong("wasp.master.zksession.recover.timeout", 300000);
    java.util.concurrent.ExecutorService executor = Executors.newSingleThreadExecutor();
    Future<Boolean> result = executor.submit(callable);
    executor.shutdown();
    if (executor.awaitTermination(timeout, TimeUnit.MILLISECONDS) && result.isDone()) {
        Boolean recovered = result.get();
        if (recovered != null) {
            return recovered.booleanValue();
        }
    }
    executor.shutdownNow();
    return false;
}

From source file:ubic.gemma.analysis.expression.diff.LinearModelAnalyzer.java

/**
 * Important bit. Run the analysis//  w  ww .  j a v a  2 s  .co  m
 * 
 * @param namedMatrix
 * @param factorNameMap
 * @param modelFormula
 * @param interactionFactorLists
 * @param interceptFactor
 * @param designMatrix
 * @param baselineConditions
 * @param quantitationType
 * @return results
 */
private Map<String, LinearModelSummary> runAnalysis(
        final DoubleMatrix<CompositeSequence, BioMaterial> namedMatrix,
        final DoubleMatrix<String, String> sNamedMatrix,
        final Map<String, Collection<ExperimentalFactor>> factorNameMap, final String modelFormula,
        DesignMatrix designMatrix, ExperimentalFactor interceptFactor, List<String[]> interactionFactorLists,
        Map<ExperimentalFactor, FactorValue> baselineConditions, QuantitationType quantitationType) {

    final Map<String, LinearModelSummary> rawResults = new ConcurrentHashMap<String, LinearModelSummary>();

    Future<?> f = runAnalysisFuture(designMatrix, sNamedMatrix, rawResults, quantitationType);

    StopWatch timer = new StopWatch();
    timer.start();
    long lasttime = 0;

    // this analysis should take just 10 or 20 seconds for most data sets.
    double MAX_ANALYSIS_TIME = 60 * 1000 * 20; // 20 minutes.
    double updateIntervalMillis = 60 * 1000;// 1 minute
    while (!f.isDone()) {
        try {
            Thread.sleep(1000);

            if (timer.getTime() - lasttime > updateIntervalMillis) {
                log.info(String.format("Analysis running, %.1f minutes elapsed ...",
                        timer.getTime() / 60000.00));
                lasttime = timer.getTime();
            }

        } catch (InterruptedException e) {
            log.warn("Analysis interrupted!");
            return rawResults;
        }

        if (timer.getTime() > MAX_ANALYSIS_TIME) {
            log.error("Analysis is taking too long, something bad must have happened; cancelling");
            f.cancel(true);
            throw new RuntimeException("Analysis was taking too long, it was cancelled");
        }
    }

    if (timer.getTime() > updateIntervalMillis) {
        log.info(String.format("Analysis finished in %.1f minutes.", timer.getTime() / 60000.00));
    }

    try {
        f.get();
    } catch (InterruptedException e) {
        log.warn("Job was interrupted");
        return rawResults;
    } catch (ExecutionException e) {
        throw new RuntimeException(e);
    }

    assert rawResults.size() == namedMatrix.rows() : "expected " + namedMatrix.rows() + " results, got "
            + rawResults.size();
    return rawResults;
}

From source file:org.opentox.jaqpot3.www.services.PredictionService.java

@Override
public void run() {

    /*/*www .  j  a v a 2s . c om*/
     * Change the status of the task from QUEUED to RUNNING
     * The task has ALREADY been registered (see ModelResource)
     */
    predictor.getTask().setStatus(Status.RUNNING);
    predictor.getTask().getMeta()
            .setDate(new LiteralValue(new Date(System.currentTimeMillis()), XSDDatatype.XSDdate));
    if (predictor.getModel() != null && predictor.getModel().getUri() != null) {
        predictor.getTask().getMeta()
                .addHasSource(new ResourceValue(predictor.getModel().getUri(), OTClasses.model()));
    }

    UpdateTask updater = new UpdateTask(predictor.getTask());
    updater.setUpdateTaskStatus(true);
    updater.setUpdateMeta(true);
    try {
        updater.update();// update the task (QUEUED --> RUNNING)
    } catch (DbException ex) {
        logger.error("Cannot update task to RUNNING", ex);
    } finally {
        if (updater != null) {
            try {
                updater.close();
            } catch (DbException ex) {
                logger.error("TaskUpdater is uncloseable", ex);
            }
        }
    }

    String datasetUri = clientInput.getFirstValue("dataset_uri");
    try {
        this.parametrize(clientInput);
        predictor.parametrize(clientInput);
        VRI datasetURI = new VRI(datasetUri);
        isSubstanceDataset = (datasetURI.getOpenToxType() == SubstanceDataset.class) ? true : false;
        Future<VRI> future;
        String debug = GenericUtils.getPropertyValue("debug");
        if (StringUtils.equals(debug, "true")) {
            String dbgFn;
            if (isSubstanceDataset) {
                /* GET THE PREDICTIONS FROM THE PREDICTOR */
                predictor.predictEnm(datasetURI);
                dbgFn = GenericUtils.getDebugFilename(predictor.getModel(), true);
            } else {
                /* GET THE PREDICTIONS FROM THE PREDICTOR */
                predictor.predict(datasetURI);
                dbgFn = GenericUtils.getDebugFilename(predictor.getModel(), false);
            }
            Instances inst = predictor.getPredictedInstances();
            WekaInstancesProcess.toCSV(inst, dbgFn);

            predictor.getTask().setHttpStatus(200).setResultUri(datasetURI).setPercentageCompleted(100.0f)
                    .setStatus(Status.COMPLETED);
            UpdateTask updateTask = new UpdateTask(predictor.getTask());
            updateTask.setUpdateHttpStatus(true);
            updateTask.setUpdateTaskStatus(true);
            updateTask.setUpdateResultUri(true);
            updateTask.update();
            updateTask.close();
        } else {
            if (isSubstanceDataset) {
                /* GET THE PREDICTIONS FROM THE PREDICTOR */
                SubstanceDataset output = predictor.predictEnm(datasetURI);
                future = output.publish(datasetServiceUri, token);
            } else {
                /* GET THE PREDICTIONS FROM THE PREDICTOR */
                Dataset output = predictor.predict(datasetURI);
                future = output.publish(datasetServiceUri, token);
            }
            /* */

            float counter = 1;
            while (!future.isDone()) {
                try {
                    Thread.sleep(1000);
                    float prc = 100f - (50.0f / (float) Math.sqrt(counter));
                    predictor.getTask().setPercentageCompleted(prc);
                    UpdateTask updateTask = new UpdateTask(predictor.getTask());
                    updateTask.setUpdateMeta(true);
                    updateTask.setUpdatePercentageCompleted(true);
                    updateTask.update();
                    updateTask.close();
                    counter++;
                } catch (InterruptedException ex) {
                    logger.error("Interrupted", ex);
                    throw new JaqpotException("UnknownCauseOfException", ex);
                }
            }
            try {
                VRI resultUri = future.get();

                if (isSubstanceDataset) {
                    //TODO: API EXT custom enanomapper and datasetURI must be returned
                    //In enanomapper publishing a dataset is available through posting to /substance
                    //this returns a substance.
                    //In order to get the dataset, the ownerUUID from the substance is retrieved 
                    String host = SubstanceDataset.getHostFromVRI(datasetUri);
                    String ownerName = Substance.getSubstanceKey(token, resultUri.getUri(), "ownerUUID");
                    resultUri = new VRI(host + "/substanceowner/" + ownerName + "/dataset");
                }

                predictor.getTask().setHttpStatus(200).setPercentageCompleted(100.0f).setResultUri(resultUri)
                        .setStatus(Status.COMPLETED);
                UpdateTask updateTask = new UpdateTask(predictor.getTask());
                updateTask.setUpdateHttpStatus(true);
                updateTask.setUpdateTaskStatus(true);
                updateTask.setUpdateResultUri(true);
                updateTask.update();
                updateTask.close();

            } catch (InterruptedException ex) {
                logger.error("Task update was abnormally interrupted", ex);
                throw new JaqpotException("UnknownCauseOfException", ex);
            }
        }
    } catch (URISyntaxException ex) {
        logger.trace(null, ex);
        updateFailedTask(predictor.getTask(), ex,
                "The parameter 'dataset_uri' provided by the user cannot be " + "cast as a valid URI", 400,
                predictor.getTask().getCreatedBy().getUid());
    } catch (BadParameterException ex) {
        logger.trace(null, ex);
        updateFailedTask(predictor.getTask(), ex, "Task failed due to illegal parametrization. ", 400,
                predictor.getTask().getCreatedBy().getUid());
    } catch (Throwable ex) {
        logger.error(null, ex);
        updateFailedTask(predictor.getTask(), ex, "", 500, Configuration.BASE_URI);
    }

}

From source file:de.unisb.cs.st.javalanche.mutation.runtime.testDriver.MutationTestDriver.java

/**
 * Runs given test in a new thread with specified timeout
 * (DEFAULT_TIMEOUT_IN_SECONDS) and stores the results in given testResult.
 * /*  w  w w  .  j av a2 s  . c  om*/
 * @param r
 *            the test to be run
 * @return the time needed for executing the test
 */
protected long runWithTimeoutOld(MutationTestRunnable r) {
    // ArrayList<Thread> threadsPre = ThreadUtil.getThreads();
    ExecutorService service = Executors.newSingleThreadExecutor();
    Future<?> future = service.submit(r);
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    service.shutdown();
    String exceptionMessage = null;
    Throwable capturedThrowable = null;
    try {
        logger.debug("Start  test: ");
        boolean terminated = service.awaitTermination(timeout, TimeUnit.SECONDS);
        logger.debug("First timeout");
        long time1 = stopWatch.getTime();
        if (!terminated) {
            service.shutdownNow();
        }
        future.get(1, TimeUnit.SECONDS);
        logger.debug("Second timeout");
        long time2 = stopWatch.getTime();
        if (time2 - time1 > 1000) {
            logger.info("Process got some extra time: " + (time2 - time1) + "  " + time2);
        }
        future.cancel(true);

    } catch (InterruptedException e) {
        capturedThrowable = e;
    } catch (ExecutionException e) {
        capturedThrowable = e;
    } catch (TimeoutException e) {
        exceptionMessage = "Mutation causes test timeout";
        capturedThrowable = e;
    } catch (Throwable t) {
        capturedThrowable = t;
    } finally {
        if (capturedThrowable != null) {
            if (exceptionMessage == null) {
                exceptionMessage = "Exception caught during test execution.";
            }
            r.setFailed(exceptionMessage, capturedThrowable);
        }
    }
    if (!future.isDone()) {
        r.setFailed("Mutated Thread is still running after timeout.", null);
        switchOfMutation(future);
    }
    stopWatch.stop();

    if (!r.hasFinished()) {
        shutDown(r, stopWatch);
    }
    logger.debug("End timed test, it took " + stopWatch.getTime() + " ms");
    return stopWatch.getTime();
}

From source file:com.hygenics.parser.KVParser.java

public void run() {
    log.info("Starting Parse @ " + Calendar.getInstance().getTime().toString());
    ForkJoinPool fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors() * procs);
    Set<Callable<ArrayList<String>>> collection;
    List<Future<ArrayList<String>>> futures;
    ArrayList<String> data = new ArrayList<String>((commitsize + 10));
    ArrayList<String> outdata = new ArrayList<String>(((commitsize + 10) * 3));

    int currpos = 0;
    boolean run = true;

    while (run) {
        log.info("Getting Pages");
        // get pages
        String query = select;//from   ww  w .j a v a  2s . co  m

        if (data.size() > 0) {
            data.clear();
        }

        if (extracondition != null) {
            query += " " + extracondition;
        }

        if (extracondition != null) {
            query += " WHERE " + extracondition + " AND ";
        } else {
            query += " WHERE ";
        }

        collection = new HashSet<Callable<ArrayList<String>>>(qnums);
        for (int i = 0; i < qnums; i++) {

            if (currpos + (Math.round(commitsize / qnums * (i + 1))) < currpos + commitsize) {
                collection.add(new SplitQuery((query + pullid + " >= "
                        + Integer.toString(currpos + (Math.round(commitsize / qnums * (i)))) + " AND " + pullid
                        + " < " + Integer.toString(currpos + (Math.round(commitsize / qnums * (i + 1)))))));
            } else {
                collection.add(new SplitQuery((query + pullid + " >= "
                        + Integer.toString(currpos + (Math.round(commitsize / qnums * (i)))) + " AND " + pullid
                        + " < " + Integer.toString(currpos + commitsize))));
            }
        }

        currpos += commitsize;

        if (collection.size() > 0) {

            futures = fjp.invokeAll(collection);

            int w = 0;

            while (fjp.isQuiescent() == false && fjp.getActiveThreadCount() > 0) {
                w++;
            }

            for (Future<ArrayList<String>> f : futures) {
                try {
                    ArrayList<String> darr = f.get();
                    if (darr != null && darr.size() > 0) {
                        data.addAll(darr);
                    }
                } catch (NullPointerException e) {
                    log.info("Some Data Returned Null");
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } catch (ExecutionException e) {
                    e.printStackTrace();
                }
            }

        }

        if (data.size() == 0 && checkString != null) {
            collection = new HashSet<Callable<ArrayList<String>>>(1);
            collection.add(new SplitQuery(checkString));

            futures = fjp.invokeAll(collection);
            int w = 0;
            while (fjp.isQuiescent() == false && fjp.getActiveThreadCount() > 0) {
                w++;
            }

            for (Future<ArrayList<String>> f : futures) {
                try {
                    ArrayList<String> arr = f.get();

                    if (arr != null) {
                        for (String a : arr) {
                            if (a != null) {
                                data.add(a);
                            }
                        }
                    }
                    if (!f.isDone()) {
                        f.cancel(true);
                    }
                    f = null;
                } catch (NullPointerException e) {
                    log.info("Some Data Returned Null");
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } catch (ExecutionException e) {
                    e.printStackTrace();
                }
            }
        }

        // parse pages
        if (data.size() > 0) {
            log.info("Parsing " + Integer.toString(data.size()) + " Records");
            collection = new HashSet<Callable<ArrayList<String>>>(data.size());

            for (String json : data) {
                Map<String, Object> jmap = Json.read(json).asMap();

                // for each table in the tags Map which is a key
                for (String k : tags.keySet()) {

                    collection.add(new Parser(tags.get(k), jmap.get(htmlColumn).toString(), replacePattern,
                            replacement, jmap.get(hashColumn).toString(), hashColumn, k));

                    if (collection.size() + 1 == data.size()
                            || (collection.size() % commitsize == 0 && collection.size() >= commitsize)) {
                        log.info("Waiting for Tasks to Complete");
                        futures = fjp.invokeAll(collection);

                        // post data
                        int w = 0;
                        while (fjp.isQuiescent() == false && fjp.getActiveThreadCount() > 0) {
                            w++;
                        }

                        for (Future<ArrayList<String>> future : futures) {
                            try {
                                outdata.addAll(future.get());
                            } catch (NullPointerException e) {
                                log.info("Some Data Returned Null");
                            } catch (InterruptedException e) {
                                e.printStackTrace();
                            } catch (ExecutionException e) {
                                e.printStackTrace();
                            }
                        }

                        log.info("Parsed " + outdata.size() + " records!");
                        // post data
                        int cp = 0;
                        if (outdata.size() > 0) {
                            checkTables(outdata);
                            this.sendToDb(outdata, true);
                            outdata = new ArrayList<String>(commitsize);
                        }

                    }

                }
            }
            data = new ArrayList<String>(commitsize);
        } else {
            log.info("No Records Found. Terminating!");
            run = false;
        }

    }

    if (outdata.size() > 0) {
        log.info("Posting Last Records");
        // post remaining pages for the iteration
        if (outdata.size() > 0) {
            int cp = 0;
            if (outdata.size() > 0) {
                checkTables(outdata);
                this.sendToDb(outdata, true);
            }
            data.clear();
            outdata.clear();
        }
    }

    // shutdown
    log.info("Complete! Shutting Down FJP.");
    fjp.shutdownNow();

    log.info("Finished Parse @ " + Calendar.getInstance().getTime().toString());
}