Example usage for java.util.concurrent Future cancel

List of usage examples for java.util.concurrent Future cancel

Introduction

In this page you can find the example usage for java.util.concurrent Future cancel.

Prototype

boolean cancel(boolean mayInterruptIfRunning);

Source Link

Document

Attempts to cancel execution of this task.

Usage

From source file:ubic.gemma.core.analysis.expression.diff.LinearModelAnalyzer.java

/**
 * Important bit. Run the analysis/*from  ww w.j a v  a  2  s. c o m*/
 *
 * @return results
 */
private Map<String, LinearModelSummary> runAnalysis(
        final DoubleMatrix<CompositeSequence, BioMaterial> namedMatrix,
        final DoubleMatrix<String, String> sNamedMatrix, DesignMatrix designMatrix,
        final DoubleMatrix1D librarySize, final DifferentialExpressionAnalysisConfig config) {

    final Map<String, LinearModelSummary> rawResults = new ConcurrentHashMap<>();

    Future<?> f = this.runAnalysisFuture(designMatrix, sNamedMatrix, rawResults, librarySize, config);

    StopWatch timer = new StopWatch();
    timer.start();
    long lastTime = 0;

    // this analysis should take just 10 or 20 seconds for most data sets.
    double MAX_ANALYSIS_TIME = 60 * 1000 * 30; // 30 minutes.
    double updateIntervalMillis = 60 * 1000;// 1 minute
    while (!f.isDone()) {
        try {
            Thread.sleep(1000);

            if (timer.getTime() - lastTime > updateIntervalMillis) {
                LinearModelAnalyzer.log.info(String.format("Analysis running, %.1f minutes elapsed ...",
                        timer.getTime() / 60000.00));
                lastTime = timer.getTime();
            }

        } catch (InterruptedException e) {
            LinearModelAnalyzer.log.warn("Analysis interrupted!");
            return rawResults;
        }

        if (timer.getTime() > MAX_ANALYSIS_TIME) {
            LinearModelAnalyzer.log
                    .error("Analysis is taking too long, something bad must have happened; cancelling");
            f.cancel(true);
            throw new RuntimeException("Analysis was taking too long, it was cancelled");
        }
    }

    if (timer.getTime() > updateIntervalMillis) {
        LinearModelAnalyzer.log
                .info(String.format("Analysis finished in %.1f minutes.", timer.getTime() / 60000.00));
    }

    try {
        f.get();
    } catch (InterruptedException e) {
        LinearModelAnalyzer.log.warn("Job was interrupted");
        return rawResults;
    } catch (ExecutionException e) {
        throw new RuntimeException(e);
    }

    assert rawResults.size() == namedMatrix.rows() : "expected " + namedMatrix.rows() + " results, got "
            + rawResults.size();
    return rawResults;
}

From source file:com.hygenics.parser.KVParser.java

public void run() {
    log.info("Starting Parse @ " + Calendar.getInstance().getTime().toString());
    ForkJoinPool fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors() * procs);
    Set<Callable<ArrayList<String>>> collection;
    List<Future<ArrayList<String>>> futures;
    ArrayList<String> data = new ArrayList<String>((commitsize + 10));
    ArrayList<String> outdata = new ArrayList<String>(((commitsize + 10) * 3));

    int currpos = 0;
    boolean run = true;

    while (run) {
        log.info("Getting Pages");
        // get pages
        String query = select;/*ww  w .  jav  a 2s . co  m*/

        if (data.size() > 0) {
            data.clear();
        }

        if (extracondition != null) {
            query += " " + extracondition;
        }

        if (extracondition != null) {
            query += " WHERE " + extracondition + " AND ";
        } else {
            query += " WHERE ";
        }

        collection = new HashSet<Callable<ArrayList<String>>>(qnums);
        for (int i = 0; i < qnums; i++) {

            if (currpos + (Math.round(commitsize / qnums * (i + 1))) < currpos + commitsize) {
                collection.add(new SplitQuery((query + pullid + " >= "
                        + Integer.toString(currpos + (Math.round(commitsize / qnums * (i)))) + " AND " + pullid
                        + " < " + Integer.toString(currpos + (Math.round(commitsize / qnums * (i + 1)))))));
            } else {
                collection.add(new SplitQuery((query + pullid + " >= "
                        + Integer.toString(currpos + (Math.round(commitsize / qnums * (i)))) + " AND " + pullid
                        + " < " + Integer.toString(currpos + commitsize))));
            }
        }

        currpos += commitsize;

        if (collection.size() > 0) {

            futures = fjp.invokeAll(collection);

            int w = 0;

            while (fjp.isQuiescent() == false && fjp.getActiveThreadCount() > 0) {
                w++;
            }

            for (Future<ArrayList<String>> f : futures) {
                try {
                    ArrayList<String> darr = f.get();
                    if (darr != null && darr.size() > 0) {
                        data.addAll(darr);
                    }
                } catch (NullPointerException e) {
                    log.info("Some Data Returned Null");
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } catch (ExecutionException e) {
                    e.printStackTrace();
                }
            }

        }

        if (data.size() == 0 && checkString != null) {
            collection = new HashSet<Callable<ArrayList<String>>>(1);
            collection.add(new SplitQuery(checkString));

            futures = fjp.invokeAll(collection);
            int w = 0;
            while (fjp.isQuiescent() == false && fjp.getActiveThreadCount() > 0) {
                w++;
            }

            for (Future<ArrayList<String>> f : futures) {
                try {
                    ArrayList<String> arr = f.get();

                    if (arr != null) {
                        for (String a : arr) {
                            if (a != null) {
                                data.add(a);
                            }
                        }
                    }
                    if (!f.isDone()) {
                        f.cancel(true);
                    }
                    f = null;
                } catch (NullPointerException e) {
                    log.info("Some Data Returned Null");
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } catch (ExecutionException e) {
                    e.printStackTrace();
                }
            }
        }

        // parse pages
        if (data.size() > 0) {
            log.info("Parsing " + Integer.toString(data.size()) + " Records");
            collection = new HashSet<Callable<ArrayList<String>>>(data.size());

            for (String json : data) {
                Map<String, Object> jmap = Json.read(json).asMap();

                // for each table in the tags Map which is a key
                for (String k : tags.keySet()) {

                    collection.add(new Parser(tags.get(k), jmap.get(htmlColumn).toString(), replacePattern,
                            replacement, jmap.get(hashColumn).toString(), hashColumn, k));

                    if (collection.size() + 1 == data.size()
                            || (collection.size() % commitsize == 0 && collection.size() >= commitsize)) {
                        log.info("Waiting for Tasks to Complete");
                        futures = fjp.invokeAll(collection);

                        // post data
                        int w = 0;
                        while (fjp.isQuiescent() == false && fjp.getActiveThreadCount() > 0) {
                            w++;
                        }

                        for (Future<ArrayList<String>> future : futures) {
                            try {
                                outdata.addAll(future.get());
                            } catch (NullPointerException e) {
                                log.info("Some Data Returned Null");
                            } catch (InterruptedException e) {
                                e.printStackTrace();
                            } catch (ExecutionException e) {
                                e.printStackTrace();
                            }
                        }

                        log.info("Parsed " + outdata.size() + " records!");
                        // post data
                        int cp = 0;
                        if (outdata.size() > 0) {
                            checkTables(outdata);
                            this.sendToDb(outdata, true);
                            outdata = new ArrayList<String>(commitsize);
                        }

                    }

                }
            }
            data = new ArrayList<String>(commitsize);
        } else {
            log.info("No Records Found. Terminating!");
            run = false;
        }

    }

    if (outdata.size() > 0) {
        log.info("Posting Last Records");
        // post remaining pages for the iteration
        if (outdata.size() > 0) {
            int cp = 0;
            if (outdata.size() > 0) {
                checkTables(outdata);
                this.sendToDb(outdata, true);
            }
            data.clear();
            outdata.clear();
        }
    }

    // shutdown
    log.info("Complete! Shutting Down FJP.");
    fjp.shutdownNow();

    log.info("Finished Parse @ " + Calendar.getInstance().getTime().toString());
}

From source file:com.numenta.core.service.DataSyncService.java

/**
 * This method is execute periodically and update {@link com.numenta.core.data.CoreDatabase}
 * with new data from the//from  w  w  w . j  a  va2s .  c o m
 * server.
 */
protected void synchronizeWithServer() throws IOException {
    Log.i(TAG, "synchronizeWithServer");

    if (_synchronizingWithServer) {
        return;
    }
    if (!NetUtils.isConnected()) {
        // Not connected, skip until we connect
        return;
    }

    final CoreDatabase database = HTMApplication.getDatabase();
    if (database == null) {
        return;
    }
    synchronized (this) {
        if (_synchronizingWithServer) {
            return;
        }
        _synchronizingWithServer = true;
    }
    String result = null;
    try {
        // Guard against blocking the UI Thread
        if (Looper.myLooper() == Looper.getMainLooper()) {
            throw new IllegalStateException("You should not access the database from the UI thread");
        }

        fireRefreshStateEvent(_synchronizingWithServer);

        final Context context = _service.getApplicationContext();
        final long now = System.currentTimeMillis();

        // Check if enough time has passed since we checked for new data
        SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(context);
        final long lastConnectedTime = prefs.getLong(PREF_LAST_CONNECTED_TIME, 0);
        if (now - lastConnectedTime < DataUtils.METRIC_DATA_INTERVAL) {
            return;
        }

        // Calculate hours since last update. This information will be
        // passed to the user together with error message
        final CharSequence hoursSinceData = DateUtils.getRelativeTimeSpanString(database.getLastTimestamp(),
                now, DateUtils.MINUTE_IN_MILLIS);

        Future<?> pendingIO = null;
        try {
            // Try to connect to server
            if (_htmClient == null) {
                _htmClient = _service.connectToServer();
            }
            if (_htmClient == null) {
                throw new IOException("Unable to connect to server");
            }

            // Update last connected time
            SharedPreferences.Editor editor = prefs.edit();
            editor.putLong(PREF_LAST_CONNECTED_TIME, now);
            editor.apply();

            // Start by downloading all the metrics available from backend
            // in a background IO thread
            pendingIO = _service.getIOThreadPool().submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {

                    try {
                        // First load metrics
                        loadAllMetrics();

                        // Load all annotations after metrics
                        loadAllAnnotations();

                        // Load all data after annotations
                        loadAllData();

                        // Synchronize notifications after data
                        synchronizeNotifications();

                        // Synchronize application data last
                        HTMApplication.getInstance().loadApplicationData(_htmClient);

                    } catch (android.database.sqlite.SQLiteFullException e) {
                        // Try to delete old records to make room if possible
                        Log.e(TAG, "Failed to save data into database", e);
                        database.deleteOldRecords();
                    }
                    return null;
                }
            });
            // Wait for metric data to finish
            pendingIO.get();
        } catch (InterruptedException e) {
            // Cancel pending tasks
            if (!pendingIO.isDone()) {
                pendingIO.cancel(true);
            }
            Log.w(TAG, "Interrupted while loading data");
        } catch (ExecutionException e) {
            // Cancel pending tasks
            if (!pendingIO.isDone()) {
                pendingIO.cancel(true);
            }
            Throwable original = e.getCause();
            if (original instanceof AuthenticationException) {
                _service.fireAuthenticationFailedEvent();
            } else if (original instanceof ObjectNotFoundException) {
                Log.e(TAG, "Error loading data", e);
                result = context.getString(R.string.refresh_update_error, hoursSinceData);
            } else if (original instanceof IOException) {
                Log.e(TAG, "Unable to connect", e);
                result = context.getString(R.string.refresh_server_unreachable, hoursSinceData);
            } else {
                Log.e(TAG, "Error loading data", e);
                result = context.getString(R.string.refresh_update_error, hoursSinceData);
            }
        } catch (AuthenticationException e) {
            _service.fireAuthenticationFailedEvent();
        } catch (HTMException e) {
            Log.e(TAG, "Error loading data", e);
            result = context.getString(R.string.refresh_update_error, hoursSinceData);
        } catch (IOException e) {
            Log.e(TAG, "Unable to connect", e);
            result = context.getString(R.string.refresh_server_unreachable, hoursSinceData);
        }
    } finally {
        _synchronizingWithServer = false;
        fireRefreshStateEvent(_synchronizingWithServer, result);
    }
}

From source file:ubic.gemma.analysis.expression.diff.LinearModelAnalyzer.java

/**
 * Important bit. Run the analysis/*from  w w  w .  j a v a  2 s .co m*/
 * 
 * @param namedMatrix
 * @param factorNameMap
 * @param modelFormula
 * @param interactionFactorLists
 * @param interceptFactor
 * @param designMatrix
 * @param baselineConditions
 * @param quantitationType
 * @return results
 */
private Map<String, LinearModelSummary> runAnalysis(
        final DoubleMatrix<CompositeSequence, BioMaterial> namedMatrix,
        final DoubleMatrix<String, String> sNamedMatrix,
        final Map<String, Collection<ExperimentalFactor>> factorNameMap, final String modelFormula,
        DesignMatrix designMatrix, ExperimentalFactor interceptFactor, List<String[]> interactionFactorLists,
        Map<ExperimentalFactor, FactorValue> baselineConditions, QuantitationType quantitationType) {

    final Map<String, LinearModelSummary> rawResults = new ConcurrentHashMap<String, LinearModelSummary>();

    Future<?> f = runAnalysisFuture(designMatrix, sNamedMatrix, rawResults, quantitationType);

    StopWatch timer = new StopWatch();
    timer.start();
    long lasttime = 0;

    // this analysis should take just 10 or 20 seconds for most data sets.
    double MAX_ANALYSIS_TIME = 60 * 1000 * 20; // 20 minutes.
    double updateIntervalMillis = 60 * 1000;// 1 minute
    while (!f.isDone()) {
        try {
            Thread.sleep(1000);

            if (timer.getTime() - lasttime > updateIntervalMillis) {
                log.info(String.format("Analysis running, %.1f minutes elapsed ...",
                        timer.getTime() / 60000.00));
                lasttime = timer.getTime();
            }

        } catch (InterruptedException e) {
            log.warn("Analysis interrupted!");
            return rawResults;
        }

        if (timer.getTime() > MAX_ANALYSIS_TIME) {
            log.error("Analysis is taking too long, something bad must have happened; cancelling");
            f.cancel(true);
            throw new RuntimeException("Analysis was taking too long, it was cancelled");
        }
    }

    if (timer.getTime() > updateIntervalMillis) {
        log.info(String.format("Analysis finished in %.1f minutes.", timer.getTime() / 60000.00));
    }

    try {
        f.get();
    } catch (InterruptedException e) {
        log.warn("Job was interrupted");
        return rawResults;
    } catch (ExecutionException e) {
        throw new RuntimeException(e);
    }

    assert rawResults.size() == namedMatrix.rows() : "expected " + namedMatrix.rows() + " results, got "
            + rawResults.size();
    return rawResults;
}

From source file:com.splout.db.dnode.DNodeHandler.java

/**
 * Here we instantiate a Thread for waiting for the deploy so that we
 * are able to implement deploy timeout... If the deploy takes too much
 * then we cancel it. We achieve this by using Java asynchronous Future
 * objects./*w  w w  . java  2 s  .  com*/
 */
protected Thread getDeployControllerThread(final List<DeployAction> deployActions, final long version) {
    return new Thread() {
        public void run() {
            Future<?> future = deployExecutor.submit(newDeployRunnable(deployActions, version));
            deploysBeingExecuted.put(version, future);
            try {
                // This line makes the wait thread wait for the deploy as long as
                // the configuration tells
                // If the timeout passes a TimeoutException is thrown
                future.get(config.getInt(DNodeProperties.DEPLOY_TIMEOUT_SECONDS), TimeUnit.SECONDS);
            } catch (CancellationException e) {
                log.info("Cancelation when waiting for local deploy to finish - killing deployment "
                        + "version[" + version + "]");
                markDeployAsAborted(version, ExceptionUtils.getStackTrace(e));
            } catch (InterruptedException e) {
                log.info("Interrupted exception waiting for local deploy to finish - killing deployment"
                        + "version[" + version + "]");
                markDeployAsAborted(version, ExceptionUtils.getStackTrace(e));
            } catch (ExecutionException e) {
                log.warn("Execution exception waiting for local deploy to finish - killing deployment."
                        + "version[" + version + "]", e);
                markDeployAsAborted(version, ExceptionUtils.getStackTrace(e));
            } catch (TimeoutException e) {
                log.info("Timeout waiting for local deploy to finish - killing deployment." + "version["
                        + version + "]", e);
                markDeployAsAborted(version, "Timeout reached - "
                        + config.getInt(DNodeProperties.DEPLOY_TIMEOUT_SECONDS) + " seconds");
                lastDeployTimedout.set(true);
            } finally {
                // If the future didn't end, we just send an interrupt signal to it.
                future.cancel(true);
                deploysBeingExecuted.remove(version);
            }
        }
    };
}

From source file:com.netflix.curator.framework.recipes.locks.TestReaper.java

@Test
public void testSparseUseNoReap() throws Exception {
    final int THRESHOLD = 3000;

    Timing timing = new Timing();
    Reaper reaper = null;/* w  ww  . jav  a2 s  .c  om*/
    Future<Void> watcher = null;
    CuratorFramework client = makeClient(timing, null);
    try {
        client.start();
        client.create().creatingParentsIfNeeded().forPath("/one/two/three");

        Assert.assertNotNull(client.checkExists().forPath("/one/two/three"));

        final Queue<Reaper.PathHolder> holders = new ConcurrentLinkedQueue<Reaper.PathHolder>();
        final ExecutorService pool = Executors.newCachedThreadPool();
        ScheduledExecutorService service = new ScheduledThreadPoolExecutor(1) {
            @Override
            public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
                final Reaper.PathHolder pathHolder = (Reaper.PathHolder) command;
                holders.add(pathHolder);
                final ScheduledFuture<?> f = super.schedule(command, delay, unit);
                pool.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws Exception {
                        f.get();
                        holders.remove(pathHolder);
                        return null;
                    }
                });
                return f;
            }
        };

        reaper = new Reaper(client, service, THRESHOLD);
        reaper.start();
        reaper.addPath("/one/two/three");

        long start = System.currentTimeMillis();
        boolean emptyCountIsCorrect = false;
        while (((System.currentTimeMillis() - start) < timing.forWaiting().milliseconds())
                && !emptyCountIsCorrect) // need to loop as the Holder can go in/out of the Reaper's DelayQueue
        {
            for (Reaper.PathHolder holder : holders) {
                if (holder.path.endsWith("/one/two/three")) {
                    emptyCountIsCorrect = (holder.emptyCount > 0);
                    break;
                }
            }
            Thread.sleep(1);
        }
        Assert.assertTrue(emptyCountIsCorrect);

        client.create().forPath("/one/two/three/foo");

        Thread.sleep(2 * (THRESHOLD / Reaper.EMPTY_COUNT_THRESHOLD));
        Assert.assertNotNull(client.checkExists().forPath("/one/two/three"));
        client.delete().forPath("/one/two/three/foo");

        Thread.sleep(THRESHOLD);
        timing.sleepABit();

        Assert.assertNull(client.checkExists().forPath("/one/two/three"));
    } finally {
        if (watcher != null) {
            watcher.cancel(true);
        }
        IOUtils.closeQuietly(reaper);
        IOUtils.closeQuietly(client);
    }
}

From source file:com.hygenics.parser.ParseDispatcher.java

/**
 * Fork/Join Pool Solution Maximizes Speed. JSon increases ease of use
 * /*from  ww  w.ja  v a2 s .  c  o m*/
 */
public void run() {
    log.info("Starting Clock and Parsing @" + Calendar.getInstance().getTime().toString());
    long t = Calendar.getInstance().getTimeInMillis();
    int pid = 0;
    int id = 0;
    int checkattempts = 0;
    String add = null;

    this.schema = Properties.getProperty(this.schema);
    this.select = Properties.getProperty(this.select);
    this.extracondition = Properties.getProperty(this.extracondition);
    this.column = Properties.getProperty(this.column);

    ArrayList<String> parsedrows = new ArrayList<String>();

    Set<Callable<String>> collect = new HashSet<Callable<String>>();
    List<Future<String>> futures;

    List<Future<ArrayList<String>>> qfutures;
    Set<Callable<ArrayList<String>>> qcollect = new HashSet<Callable<ArrayList<String>>>(4);

    ForkJoinPool fjp = new ForkJoinPool((int) Math.ceil(Runtime.getRuntime().availableProcessors() * procnum));

    if (schema != null) {
        createTables();
    }

    boolean run = true;
    String condition;
    int w = 0;
    int start = offset;
    int chunksize = (int) Math.ceil(pullsize / qnum);

    // attempt to query the database from multiple threads
    do {
        // query for pages
        pages = new ArrayList<String>(pullsize);
        log.info("Looking for Pages.");
        for (int conn = 0; conn < qnum; conn++) {
            // create condition
            condition = " WHERE " + pullid + " >= " + (start + (conn * chunksize)) + " AND " + pullid + " < "
                    + Integer.toString(start + (chunksize * (conn + 1)));

            if (extracondition != null) {
                condition += " " + extracondition.trim();
            }

            // get queries
            qcollect.add(new SplitQuery(template, (select + condition)));
            log.info("Fetching " + select + condition);
        }
        start += (chunksize * qnum);

        qfutures = fjp.invokeAll(qcollect);

        w = 0;
        while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
            w++;
        }
        log.info("Waited for " + w + " cycles");

        for (Future<ArrayList<String>> f : qfutures) {
            try {

                ArrayList<String> test = f.get();
                if (test != null) {
                    if (test.size() > 0) {
                        pages.addAll(test);
                    }
                }

                if (f.isDone() == false) {
                    f.cancel(true);
                }

                f = null;
            } catch (Exception e) {
                log.warn("Encoding Error!");
                e.printStackTrace();
            }
        }
        qcollect = new HashSet<Callable<ArrayList<String>>>(4);
        qfutures = null;
        log.info("Finished Getting Pages");

        // if no records then get records that may have been dropped
        if (pages.size() == 0 && checkstring != null && checkstring.trim().length() > 0
                && checkattempts < reattempts) {
            checkattempts += 1;
            log.info("Checking for Drops");
            qcollect.add(new SplitQuery(template, (checkstring)));
            qfutures = fjp.invokeAll(qcollect);

            w = 0;
            while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
                w++;
            }
            log.info("Waited for " + w + " cycles");

            for (Future<ArrayList<String>> f : qfutures) {
                try {

                    ArrayList<String> test = f.get();
                    if (test != null) {
                        if (test.size() > 0) {
                            pages.addAll(test);
                        }
                    }

                    if (f.isDone() == false) {
                        f.cancel(true);
                    }

                    f = null;
                } catch (Exception e) {
                    log.warn("Encoding Error!");
                    e.printStackTrace();
                }
            }
            qfutures = null;
            qcollect = new HashSet<Callable<ArrayList<String>>>(4);

        } else if (checkattempts >= reattempts) {
            pages.clear();
        }

        log.info("Found " + pages.size() + " records!");

        // get hashes if necessary
        if (getHash) {
            log.info("Hashing " + pages.size() + " Records");

            ArrayList<String> hashedrows = new ArrayList<String>();
            for (String row : pages) {

                collect.add(new CreateHash(row, pid));
                pid++;

            }

            log.info("Invoking");
            futures = fjp.invokeAll(collect);

            w = 0;
            while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
                w++;
            }

            log.info("Waited " + w + " Cycles!");

            for (Future<String> f : futures) {
                if (f != null) {
                    String json;
                    try {
                        json = f.get(termtime, TimeUnit.MILLISECONDS);

                        if (json != null) {
                            hashedrows.add(json);
                        }

                    } catch (Exception e) {
                        log.warn("Encoding Error!");
                        e.printStackTrace();
                    }
                }

            }
            log.info("Hashed " + hashedrows.size() + " Records!");
            pages = hashedrows;

            collect = new HashSet<Callable<String>>(pullsize);
            futures.clear();
            log.info("Completed Hashing");
        }

        log.info("Performing Regex");
        // handle single patterns
        int i = 0;
        if (singlepats != null) {

            log.info("Found Singlepats");
            int subs = 0;
            int rows = 0;
            for (String row : pages) {
                rows += 1;
                String inrow = row;
                try {

                    inrow = inrow.replaceAll("\t|\r|\r\n|\n", "");

                    Map<String, Json> jmap = Json.read(inrow).asJsonMap();

                    if (singlepats.containsKey("table")) {
                        subs += 1;

                        if (fjp.isShutdown()) {
                            fjp = new ForkJoinPool((Runtime.getRuntime().availableProcessors() * procnum));
                        }

                        if (jmap.get(column) != null) {

                            if (test) {
                                System.out.println("//////////////////////HTML////////////////////////\n"
                                        + jmap.get(column).asString()
                                        + "\n///////////////////////////////END///////////////////////////\n\n");
                            }

                            if (mustcontain != null) {
                                if (jmap.get(column).asString().contains(mustcontain)) {
                                    if (cannotcontain != null) {
                                        if (jmap.get(column).asString().contains(cannotcontain) == false)
                                            collect.add(new ParsePage(unescape, replacementPattern,
                                                    singlepats.get("table"),
                                                    jmap.get(column).asString().replaceAll("\\s\\s", " "),
                                                    singlepats, Calendar.getInstance().getTime().toString(),
                                                    jmap.get("offenderhash").asString()));
                                    } else {
                                        collect.add(new ParsePage(unescape, replacementPattern,
                                                singlepats.get("table"),
                                                jmap.get(column).asString().replaceAll("\\s\\s", " "),
                                                singlepats, Calendar.getInstance().getTime().toString(),
                                                jmap.get("offenderhash").asString()));
                                    }
                                }
                            } else if (cannotcontain != null) {
                                if (jmap.get(column).asString().contains(cannotcontain) == false) {
                                    collect.add(
                                            new ParsePage(unescape, replacementPattern, singlepats.get("table"),
                                                    jmap.get(column).asString().replaceAll("\\s\\s", " "),
                                                    singlepats, Calendar.getInstance().getTime().toString(),
                                                    jmap.get("offenderhash").asString()));
                                }
                            } else {
                                collect.add(new ParsePage(unescape, replacementPattern, singlepats.get("table"),
                                        jmap.get(column).asString().replaceAll("\\s\\s", " "), singlepats,
                                        Calendar.getInstance().getTime().toString(),
                                        jmap.get("offenderhash").asString()));
                            }
                        }
                    }
                    i++;

                    if (((i % commit_size) == 0 & i != 0) || i == pages.size()
                            || pages.size() == 1 && singlepats != null) {
                        log.info("Getting Regex Results");

                        log.info("Getting Tasks");

                        futures = fjp.invokeAll(collect);

                        w = 0;

                        while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
                            w++;
                        }

                        log.info("Waited for " + w + " cycles");

                        for (Future<String> r : futures) {
                            try {

                                add = r.get();
                                if (add.contains("No Data") == false) {
                                    parsedrows.add(add);
                                }

                                add = null;

                            } catch (Exception e) {
                                log.warn("Encoding Error!");
                                e.printStackTrace();
                            }
                        }

                        futures = null;
                        collect = new HashSet<Callable<String>>();

                        if (parsedrows.size() >= commit_size) {
                            log.info("INSERTING " + parsedrows.size() + " records!");
                            if (parsedrows.size() >= SPLITSIZE) {
                                sendToDb(parsedrows, true);
                            } else {
                                sendToDb(parsedrows, false);
                            }

                            parsedrows = new ArrayList<String>(pullsize);
                        }

                        // hint to the gc in case it actually pays off; use
                        // -X:compactexplicitgc to improve odds and
                        // -XX:UseConcMarkSweepGC for improving odds on
                        // older generation strings
                        // (think if i were a gambling man)
                        System.gc();
                        Runtime.getRuntime().gc();
                    }
                } catch (Exception e) {
                    log.warn("Encoding Error!");
                    e.printStackTrace();
                }
            }
            log.info("Submitted " + subs + " records. Found " + rows + " rows");
        }

        log.info("REMAINING ROWS TO COMMIT " + parsedrows.size());
        log.info("Rows Left" + parsedrows.size());
        if (parsedrows.size() > 0) {

            if (parsedrows.size() >= SPLITSIZE) {
                sendToDb(parsedrows, true);
            } else {
                sendToDb(parsedrows, false);
            }

            parsedrows = new ArrayList<String>();
        }

        // handle multi patterns
        if (multipats != null) {
            // parse multiple pages for the run
            int subs = 0;
            for (String row : pages) {
                try {
                    for (String k : multipats.keySet()) {
                        if (fjp.isShutdown()) {

                            fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors());
                        }

                        Map<String, Json> jmap = Json.read(row).asJsonMap();

                        if (jmap.get(column) != null) {
                            subs += 1;
                            if (test) {
                                System.out.println("//////////////////////HTML////////////////////////\n"
                                        + jmap.get(column).asString()
                                        + "\n///////////////////////////////END///////////////////////////\n\n");
                            }

                            if (mustcontain != null) {
                                if (jmap.get(column).asString().contains(mustcontain)) {
                                    if (cannotcontain != null) {
                                        if (jmap.get(column).asString().contains(cannotcontain) == false) {
                                            collect.add(
                                                    new ParseMultiPage(unescape, replacementPattern, k,
                                                            jmap.get(column).asString().replaceAll("\\s\\s",
                                                                    " "),
                                                            jmap.get("offenderhash").asString(),
                                                            Calendar.getInstance().getTime().toString(),
                                                            multipats.get(k)));
                                        }
                                    } else {
                                        collect.add(new ParseMultiPage(unescape, replacementPattern, k,
                                                jmap.get(column).asString(),
                                                jmap.get("offenderhash").asString().replaceAll("\\s\\s", " "),
                                                Calendar.getInstance().getTime().toString(), multipats.get(k)));
                                    }
                                }
                            } else if (cannotcontain != null) {
                                if (jmap.get(column).asString().contains(cannotcontain) == false) {
                                    collect.add(new ParseMultiPage(unescape, replacementPattern, k,
                                            jmap.get(column).asString().replaceAll("\\s\\s", " "),
                                            jmap.get("offenderhash").asString(),
                                            Calendar.getInstance().getTime().toString(), multipats.get(k)));
                                }

                            } else {
                                collect.add(new ParseMultiPage(unescape, replacementPattern, k,
                                        jmap.get(column).asString().replaceAll("\\s\\s", " "),
                                        jmap.get("offenderhash").asString(),
                                        Calendar.getInstance().getTime().toString(), multipats.get(k)));
                            }
                        }

                        i++;
                        if (((i % commit_size) == 0 & i != 0) || i == pages.size()
                                || pages.size() == 1 && multipats != null) {
                            futures = fjp.invokeAll(collect);
                            w = 0;
                            while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
                                w++;
                            }

                            log.info("Waited " + w + " Cycles");

                            for (Future<String> r : futures) {
                                try {
                                    add = r.get();

                                    if (add.contains("No Data") == false) {

                                        for (String js : add.split("~")) {
                                            parsedrows.add(js);
                                        }
                                    }
                                    add = null;

                                    if (r.isDone() == false) {
                                        r.cancel(true);
                                    }
                                    r = null;

                                } catch (InterruptedException e) {
                                    // TODO Auto-generated catch block
                                    e.printStackTrace();
                                } catch (ExecutionException e) {
                                    // TODO Auto-generated catch block
                                    e.printStackTrace();
                                }
                            }

                            futures = null;
                            collect = new HashSet<Callable<String>>();

                            if (parsedrows.size() >= commit_size) {
                                log.info("INSERTING " + parsedrows.size() + " records!");
                                if (parsedrows.size() >= SPLITSIZE) {
                                    sendToDb(parsedrows, true);
                                } else {
                                    sendToDb(parsedrows, false);
                                }
                                parsedrows = new ArrayList<String>(pullsize);
                            }

                            // hint to the gc in case it actually pays off
                            System.gc();
                            Runtime.getRuntime().gc();
                        }
                    }

                } catch (Exception e) {
                    log.warn("Encoding Error!");
                }

            }
            log.info("Submitted " + subs + " records.");
        }

        // handle looped patterns
        if (loopedpats != null) {
            log.info("Looped Patterns Found");
            int subs = 0;
            if (fjp.isShutdown()) {
                fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors() * procnum);
            }

            for (String row : pages) {
                try {

                    for (String k : loopedpats.keySet()) {
                        if (fjp.isShutdown()) {
                            fjp = new ForkJoinPool(Runtime.getRuntime().availableProcessors() * procnum);
                        }
                        Map<String, Json> jmap = Json.read(row).asJsonMap();

                        if (jmap.get(column) != null) {
                            subs += 1;
                            if (mustcontain != null) {
                                if (jmap.get(column).asString().contains(mustcontain)) {
                                    if (cannotcontain != null) {
                                        if (jmap.get(column).asString().contains(cannotcontain) == false) {
                                            collect.add(
                                                    new LoopRegex(unescape,
                                                            jmap.get(column).asString().replaceAll("\\s\\s",
                                                                    " "),
                                                            jmap.get("offenderhash").asString(),
                                                            Calendar.getInstance().getTime().toString(), k,
                                                            replacementPattern, loopedpats.get(k), test));
                                        }
                                    } else {
                                        collect.add(new LoopRegex(unescape,
                                                jmap.get(column).asString().replaceAll("\\s\\s", " "),
                                                jmap.get("offenderhash").asString(),
                                                Calendar.getInstance().getTime().toString(), k,
                                                replacementPattern, loopedpats.get(k), test));
                                    }
                                }
                            } else if (cannotcontain != null) {
                                if (jmap.get(column).asString().contains(cannotcontain) == false) {
                                    collect.add(new LoopRegex(unescape,
                                            jmap.get(column).asString().replaceAll("\\s\\s", " "),
                                            jmap.get("offenderhash").asString(),
                                            Calendar.getInstance().getTime().toString(), k, replacementPattern,
                                            loopedpats.get(k), test));
                                }
                            } else {
                                collect.add(new LoopRegex(unescape,
                                        jmap.get(column).asString().replaceAll("\\s\\s", " "),
                                        jmap.get("offenderhash").asString(),
                                        Calendar.getInstance().getTime().toString(), k, replacementPattern,
                                        loopedpats.get(k), test));
                            }
                            jmap.remove(k);
                        }
                        i++;
                        if (((i % commit_size) == 0 & i != 0) || (i % (pages.size() - 1)) == 0
                                || pages.size() == 1) {

                            futures = fjp.invokeAll(collect);

                            w = 0;

                            while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
                                w++;
                            }
                            log.info("Waited " + w + " Cycles");

                            for (Future<String> r : futures) {
                                try {
                                    add = r.get();
                                    if (add.contains("No Data") == false) {
                                        for (String toarr : add.split("~")) {
                                            parsedrows.add(toarr);
                                        }
                                    }

                                    if (r.isDone() == false) {
                                        r.cancel(true);
                                    }
                                    add = null;

                                } catch (Exception e) {
                                    log.warn("Encoding Error!");
                                    e.printStackTrace();
                                }
                            }

                            futures = null;
                            collect = new HashSet<Callable<String>>();

                            // hint to the gc in case it actually pays off
                            System.gc();
                            Runtime.getRuntime().gc();
                        }
                    }

                    if (parsedrows.size() >= this.commit_size) {
                        log.info("INSERTING " + parsedrows.size() + " records!");
                        if (parsedrows.size() >= SPLITSIZE) {
                            sendToDb(parsedrows, true);
                        } else {
                            sendToDb(parsedrows, false);
                        }

                        parsedrows = new ArrayList<String>(pullsize);
                    }

                } catch (Exception e) {
                    log.warn("Encoding Error!");
                }
            }
            log.info("Submitted " + subs + " records.");
        }

        if (collect.size() > 0) {
            log.info("Getting Last Regex Results for Iteration");

            log.info("Getting Tasks");

            futures = fjp.invokeAll(collect);

            w = 0;

            while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
                w++;
            }

            log.info("Waited for " + w + " cycles");

            for (Future<String> r : futures) {
                try {

                    add = r.get();
                    if (add.contains("No Data") == false) {
                        parsedrows.add(add);
                    }

                    add = null;

                } catch (Exception e) {
                    log.warn("Encoding Error!");
                    e.printStackTrace();
                }
            }

            futures = null;
            collect = new HashSet<Callable<String>>(pullsize);
            // hint to the gc in case it actually pays off; use
            // -X:compactexplicitgc to improve odds and
            // -XX:UseConcMarkSweepGC for improving odds on older generation
            // strings
            // (think if i were a gambling man)
            System.gc();
            Runtime.getRuntime().gc();
        }

        log.info("REMAINING ROWS TO COMMIT " + parsedrows.size());
        log.info("Rows Left" + parsedrows.size());
        if (parsedrows.size() > 0) {

            if (parsedrows.size() >= SPLITSIZE) {
                sendToDb(parsedrows, true);
            } else {
                sendToDb(parsedrows, false);
            }

            parsedrows = new ArrayList<String>();
        }

    } while (pages != null && pages.size() > 0);

    // ensure that nothing is still caught in limbo
    // final parser to ensure that nothing is left out
    if (collect.size() > 0) {
        log.info("More Rows Caught in FJP, Completing Process");
        futures = fjp.invokeAll(collect);

        w = 0;

        while (fjp.getActiveThreadCount() > 0 && fjp.isQuiescent() == false) {
            w++;
        }
        log.info("Waited " + w + " Cycles");

        for (Future<String> r : futures) {
            try {
                add = r.get();

                if (add.contains("No Data") == false) {

                    for (String js : add.split("~")) {
                        parsedrows.add(js);
                    }
                }
                add = null;

                if (r.isDone() == false) {
                    r.cancel(true);
                }
                r = null;

            } catch (InterruptedException e) {
                e.printStackTrace();
            } catch (ExecutionException e) {
                e.printStackTrace();
            }
        }

        futures = null;
        collect = null;
    }

    // send any remaining parsed rows to the db
    if (parsedrows.size() > 0) {

        if (parsedrows.size() >= SPLITSIZE) {
            sendToDb(parsedrows, true);
        } else {
            sendToDb(parsedrows, false);
        }

        parsedrows = new ArrayList<String>();
    }

    log.info("Shutting Down Fork Join Pool");
    if (fjp.isShutdown() == false) {
        fjp.shutdownNow();
    }

    fjp = null;

    log.info("Complete @" + Calendar.getInstance().getTime().toString());
    log.info("Total Runtime(seconds): "
            + Double.toString((double) (Calendar.getInstance().getTimeInMillis() - t) / 1000));

    // hint to the gc in case it actually pays off
    System.gc();
    Runtime.getRuntime().gc();
}

From source file:org.apache.marmotta.platform.sparql.services.sparql.SparqlServiceImpl.java

@Override
@Deprecated//from w w  w  .  j  a v a2s .  co  m
public void query(final QueryLanguage queryLanguage, final String query,
        final TupleQueryResultWriter tupleWriter, final BooleanQueryResultWriter booleanWriter,
        final SPARQLGraphResultWriter graphWriter, int timeoutInSeconds)
        throws MarmottaException, MalformedQueryException, QueryEvaluationException, TimeoutException {

    log.debug("executing SPARQL query:\n{}", query);

    Future<Boolean> future = executorService.submit(new Callable<Boolean>() {
        @Override
        public Boolean call() throws Exception {
            long start = System.currentTimeMillis();
            try {
                RepositoryConnection connection = sesameService.getConnection();
                try {
                    connection.begin();
                    Query sparqlQuery = connection.prepareQuery(queryLanguage, query,
                            configurationService.getBaseUri());

                    if (sparqlQuery instanceof TupleQuery) {
                        query((TupleQuery) sparqlQuery, tupleWriter);
                    } else if (sparqlQuery instanceof BooleanQuery) {
                        query((BooleanQuery) sparqlQuery, booleanWriter);
                    } else if (sparqlQuery instanceof GraphQuery) {
                        query((GraphQuery) sparqlQuery, graphWriter.getOutputStream(), graphWriter.getFormat());
                    } else {
                        connection.rollback();
                        throw new InvalidArgumentException(
                                "SPARQL query type " + sparqlQuery.getClass() + " not supported!");
                    }

                    connection.commit();
                } catch (Exception ex) {
                    connection.rollback();
                    throw ex;
                } finally {
                    connection.close();
                }
            } catch (RepositoryException e) {
                log.error("error while getting repository connection: {}", e);
                throw new MarmottaException("error while getting repository connection", e);
            } catch (QueryEvaluationException e) {
                log.error("error while evaluating query: {}", e.getMessage());
                throw new MarmottaException("error while writing query result in format ", e);
            }

            log.debug("SPARQL execution took {}ms", System.currentTimeMillis() - start);

            return Boolean.TRUE;
        }
    });

    try {
        future.get(timeoutInSeconds, TimeUnit.SECONDS);
    } catch (InterruptedException | TimeoutException e) {
        log.info("SPARQL query execution aborted due to timeout");
        future.cancel(true);
        throw new TimeoutException("SPARQL query execution aborted due to timeout ("
                + configurationService.getIntConfiguration("sparql.timeout", 60) + "s)");
    } catch (ExecutionException e) {
        log.info("SPARQL query execution aborted due to exception");
        log.debug("exception details", e);
        if (e.getCause() instanceof MarmottaException) {
            throw (MarmottaException) e.getCause();
        } else if (e.getCause() instanceof MalformedQueryException) {
            throw (MalformedQueryException) e.getCause();
        } else {
            throw new MarmottaException("unknown exception while evaluating SPARQL query", e.getCause());
        }
    }
}

From source file:org.apache.marmotta.platform.sparql.services.sparql.SparqlServiceImpl.java

@Override
public void query(final QueryLanguage language, final String query, final OutputStream output,
        final String format, int timeoutInSeconds)
        throws MarmottaException, TimeoutException, MalformedQueryException {
    log.debug("executing SPARQL query:\n{}", query);
    Future<Boolean> future = executorService.submit(new Callable<Boolean>() {
        @Override//from   www  .  jav a 2s .c  o m
        public Boolean call() throws Exception {
            long start = System.currentTimeMillis();
            try {
                RepositoryConnection connection = sesameService.getConnection();
                try {
                    connection.begin();
                    Query sparqlQuery = connection.prepareQuery(language, query,
                            configurationService.getBaseUri());

                    if (sparqlQuery instanceof TupleQuery) {
                        query((TupleQuery) sparqlQuery, output, format);
                    } else if (sparqlQuery instanceof BooleanQuery) {
                        query((BooleanQuery) sparqlQuery, output, format);
                    } else if (sparqlQuery instanceof GraphQuery) {
                        query((GraphQuery) sparqlQuery, output, format);
                    } else {
                        throw new InvalidArgumentException(
                                "SPARQL query type " + sparqlQuery.getClass() + " not supported!");
                    }

                    connection.commit();
                } catch (Exception ex) {
                    connection.rollback();
                    throw ex;
                } finally {
                    connection.close();
                }
            } catch (RepositoryException e) {
                log.error("error while getting repository connection: {}", e);
                throw new MarmottaException("error while getting repository connection", e);
            } catch (QueryEvaluationException e) {
                log.error("error while evaluating query: {}", e);
                throw new MarmottaException("error while evaluating query ", e);
            } catch (MalformedQueryException e) {
                log.error("error because malformed query: {}", e);
                throw new MarmottaException("error because malformed query", e);
            }

            log.debug("SPARQL execution took {}ms", System.currentTimeMillis() - start);
            return Boolean.TRUE;
        }
    });

    try {
        future.get(timeoutInSeconds, TimeUnit.SECONDS);
    } catch (InterruptedException | TimeoutException e) {
        log.info("SPARQL query execution aborted due to timeout");
        future.cancel(true);
        throw new TimeoutException("SPARQL query execution aborted due to timeout ("
                + configurationService.getIntConfiguration("sparql.timeout", 60) + "s)");
    } catch (ExecutionException e) {
        log.info("SPARQL query execution aborted due to exception");
        log.debug("exception details", e);
        if (e.getCause() instanceof MarmottaException) {
            throw (MarmottaException) e.getCause();
        } else if (e.getCause() instanceof MalformedQueryException) {
            throw (MalformedQueryException) e.getCause();
        } else {
            throw new MarmottaException("unknown exception while evaluating SPARQL query", e.getCause());
        }
    }

}

From source file:edu.mayo.cts2.framework.webapp.rest.controller.MethodTimingAspect.java

/**
 * Execute./*from www .  j a va2s .  c  o m*/
 *
 * @param pjp the pjp
 * @return the object
 * @throws Throwable the throwable
 */
@Around("execution(public *"
        + " edu.mayo.cts2.framework.webapp.rest.controller.*.*(..,edu.mayo.cts2.framework.webapp.rest.command.QueryControl,..))")
public Object execute(final ProceedingJoinPoint pjp) throws Throwable {

    QueryControl queryControl = null;

    //this should never happen
    if (ArrayUtils.isEmpty(pjp.getArgs())) {
        throw new IllegalStateException("Pointcut failure!");
    }

    for (Object arg : pjp.getArgs()) {
        if (arg.getClass() == QueryControl.class) {
            queryControl = (QueryControl) arg;
            break;
        }
    }

    //this also should never happen
    if (queryControl == null) {
        throw new IllegalStateException("Pointcut failure!");
    }

    final AtomicLong threadId = new AtomicLong(-1);

    Future<Object> future = this.executorService.submit(new Callable<Object>() {

        @Override
        public Object call() {
            try {
                threadId.set(Thread.currentThread().getId());

                /*
                 * The model here is that we clear any previous timeout before we launch the job. A design flaw is that we
                 * can't tell if we are clearing a previous timeout that simply hadn't been cleaned up yet, or if we are
                 * clearing a timeout meant for this thread that happened before this thread even launched. The second scenario 
                 * seems unlikely as the minimum timeout is 1 second - hard to believe it would take more than 1 second to 
                 * launch this thread. Plus, this thread would have to launch in the exact window in between the timeout and 
                 * the future.cancel()
                 * 
                 * If the above scenario did defy all odds and happen , it shouldn't cause much harm, as the end result would
                 * be that this thread wouldn't see the cancelled flag - and would churn away for no reason, wasting some cpu
                 * cycles, but doing no other harm.
                 */

                Timeout.clearThreadFlag(threadId.get());
                return pjp.proceed();
            } catch (Throwable e) {

                if (e instanceof Error) {
                    throw (Error) e;
                }

                if (e instanceof RuntimeException) {
                    throw (RuntimeException) e;
                }

                throw new RuntimeException(e);
            }
        }
    });

    long time = queryControl.getTimelimit();

    try {
        if (time < 0) {
            return future.get();
        } else {
            return future.get(time, TimeUnit.SECONDS);
        }
    } catch (ExecutionException e) {
        throw e.getCause();
    } catch (TimeoutException e) {
        try {
            //Set the flag for the processing thread to read
            Timeout.setTimeLimitExceeded(threadId.get());

            //Schedule another future to make sure we don't cause a memory leak if the thread IDs aren't being reused (though, they should be)
            //and therefore don't get cleared up by the next run.  Give the running thread 30 seconds to see the cancelled flag before this 
            //cleanup takes place.
            this.scheduledExecutorService.schedule(new Runnable() {
                @Override
                public void run() {
                    Timeout.clearThreadFlag(threadId.get());
                }
            }, 30, TimeUnit.SECONDS);

            //Interrupt the processing thread so it has an opportunity to check the flag and stop.
            future.cancel(true);
        } catch (Exception e1) {
            // don't think this is possible, but just in case...
        }
        throw ExceptionFactory.createTimeoutException(e.getMessage());
    }
}