Example usage for java.lang InterruptedException getMessage

List of usage examples for java.lang InterruptedException getMessage

Introduction

In this page you can find the example usage for java.lang InterruptedException getMessage.

Prototype

public String getMessage() 

Source Link

Document

Returns the detail message string of this throwable.

Usage

From source file:cx.fbn.nevernote.threads.IndexRunner.java

@Override
public void run() {
    thread().setPriority(Thread.MIN_PRIORITY);
    noteSignal = new NoteSignal();
    resourceSignal = new NoteResourceSignal();
    signal = new IndexSignal();
    logger.log(logger.EXTREME, "Starting index thread ");
    while (keepRunning) {
        idle = true;//from  w w w  . j  av a  2  s . com
        try {
            conn.commitTransaction();
            uncommittedCount = 0;
            String work = workQueue.take();
            idle = false;
            if (work.startsWith("SCAN")) {
                guid = null;
                interrupt = false;
                indexType = SCAN;
            }
            if (work.startsWith("REINDEXALL")) {
                guid = null;
                indexType = REINDEXALL;
            }
            if (work.startsWith("REINDEXNOTE")) {
                work = work.replace("REINDEXNOTE ", "");
                guid = work;
                indexType = REINDEXNOTE;
            }
            if (work.startsWith("STOP")) {
                keepRunning = false;
                guid = null;
            }
            logger.log(logger.EXTREME, "Type:" + indexType);
            if (indexType == SCAN && keepRunning) {
                logger.log(logger.MEDIUM, "Scanning for unindexed notes & resources");
                scanUnindexed();
                setIndexType(0);
            }
            if (indexType == REINDEXALL && keepRunning) {
                logger.log(logger.MEDIUM, "Marking all for reindex");
                reindexAll();
                setIndexType(0);
            }
            if (indexType == REINDEXNOTE && keepRunning) {
                reindexNote();
            }
        } catch (InterruptedException e) {
            logger.log(logger.LOW, "Thread interrupted exception: " + e.getMessage());
        }
    }
    logger.log(logger.EXTREME, "Shutting down database");
    conn.dbShutdown();
    logger.log(logger.EXTREME, "Database shut down.  Exiting thread");
}

From source file:pt.lunacloud.http.AmazonHttpClient.java

/**
 * Exponential sleep on failed request to avoid flooding a service with
 * retries./*from  w w w .j  a  v  a  2  s.com*/
 *
 * @param retries
 *            Current retry count.
 * @param previousException
 *            Exception information for the previous attempt, if any.
 */
private void pauseExponentially(int retries, LunacloudServiceException previousException,
        CustomBackoffStrategy backoffStrategy) {
    long delay = 0;
    if (backoffStrategy != null) {
        delay = backoffStrategy.getBackoffPeriod(retries);
    } else {
        long scaleFactor = 300;
        if (isThrottlingException(previousException)) {
            scaleFactor = 500 + random.nextInt(100);
        }
        delay = (long) (Math.pow(2, retries) * scaleFactor);
    }

    delay = Math.min(delay, MAX_BACKOFF_IN_MILLISECONDS);
    if (log.isDebugEnabled()) {
        log.debug("Retriable error detected, " + "will retry in " + delay + "ms, attempt number: " + retries);
    }

    try {
        Thread.sleep(delay);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new LunacloudClientException(e.getMessage(), e);
    }
}

From source file:com.auditbucket.engine.service.TrackService.java

private MetaHeader waitOnInitialSearchResult(MetaHeader metaHeader) {

    if (metaHeader.isSearchSuppressed() || metaHeader.getSearchKey() != null)
        return metaHeader; // Nothing to wait for as we're suppressing searches for this metaHeader

    int timeOut = 100;
    int i = 0;//from  w  w  w  .j  a v a2  s  . c  o  m

    while (metaHeader.getSearchKey() == null && i < timeOut) {
        i++;
        try {
            Thread.sleep(300);
        } catch (InterruptedException e) {
            logger.error(e.getMessage());
        }
        metaHeader = getHeader(metaHeader.getId());
    }
    if (metaHeader.getSearchKey() == null)
        logger.error("Timeout waiting for the initial search document to be created [{}]",
                metaHeader.getMetaKey());
    return metaHeader;

}

From source file:com.heliosphere.demeter.base.runner.AbstractRunner.java

@SuppressWarnings("nls")
@Override//from  w  w w.j  a v  a 2s . c  o  m
public void start() throws RunnerException {
    log.info(String.format("Runner started: dispatching [%1d] context(s) across [%2d] thread(s).",
            contexts.size(), threadCount));
    log.info(" ");

    ExecutorService executor = Executors.newFixedThreadPool(this.threadCount);
    for (IContext context : contexts) {
        callables.add(context.getProcessor());
    }

    try {
        futures = executor.invokeAll(callables);
    } catch (InterruptedException e) {
        throw new RunnerException("An error occurred due to: " + e.getMessage(), e);
    }

    log.info(
            "*********************************************************************************************************");
    log.info("EXECUTION SUMMARY:");
    log.info(" ");
    log.info(String.format(" Thread pool size..: [%1d]", threadCount));
    log.info(String.format(" Configuration file: [%1s]", configuration.getResource().getFile().getName()));
    log.info(String.format(" Execution file....: [%1s]", execution.getResource().getFile().getName()));
    log.info(String.format("        Description: %1s", execution.getHeader().getDescription()));
    log.info(String.format("       Parameter(s):"));
    IParameterConfiguration configuration = null;
    for (IParameterExecution p : execution.getContent().getElements()) {
        configuration = p.getConfiguration();
        log.info(String.format("               type:[%1s], name:[%2s], value:[%3s], description:[%4s]",
                p.getType(), p.getName(), p.getValue(), configuration.getDescription()));
    }
    log.info(" ");

    for (Future<IExecutionResult> future : futures) {
        try {
            IExecutionResult result = future.get();

            // Dump the execution result of the execution of a processor.
            String message = String.format("Context name:[%1s], status:[%2s], execution:[%4s]",
                    StringUtils.abbreviateMiddle(result.getName(), "...", 50), result.getStatus().toString(),
                    result.getElapsed());
            log.error(message);

            // If process has failed, then dump the exceptions!
            if (result.getStatus() == ExecutionStatusType.FAILED) {
                for (Exception exception : result.getExceptions()) {
                    log.error(String.format("   Exception caught -> %1s", exception.getMessage()), exception);
                }
            }
        } catch (InterruptedException | ExecutionException e) {
            throw new RunnerException("An error occurred due to: " + e.getMessage(), e);
        }
    }

    executor.shutdown();
    watch.stop();

    log.info(" ");
    log.info(String.format("Runner finished processing: [%1d] context(s) in a total of: [%2s]", contexts.size(),
            watch.toString()));
    log.info(
            "*********************************************************************************************************");
}

From source file:net.sf.ehcache.distribution.RMIAsynchronousCacheReplicator.java

/**
 * RemoteDebugger method for the replicationQueue thread.
 * <p/>//from  w w w . j  a  v a  2s.c o  m
 * Note that the replicationQueue thread locks the cache for the entire time it is writing elements to the disk.
 */
private void replicationThreadMain() {
    while (true) {
        // Wait for elements in the replicationQueue
        while (alive() && replicationQueue != null && replicationQueue.size() == 0) {
            try {
                Thread.sleep(asynchronousReplicationInterval);
            } catch (InterruptedException e) {
                LOG.debug("Spool Thread interrupted.");
                return;
            }
        }
        if (notAlive()) {
            return;
        }
        try {
            if (replicationQueue.size() != 0) {
                flushReplicationQueue();
            }
        } catch (Throwable e) {
            LOG.warn("Exception on flushing of replication queue: " + e.getMessage() + ". Continuing...", e);
        }
    }
}

From source file:com.couchbase.lite.DocumentTest.java

/**
 * Unit Test for https://github.com/couchbase/couchbase-lite-java-core/issues/472
 * <p/>/*  ww w  .j  a v  a2 s  .c o m*/
 * Tries to reproduce the scenario which is described in following comment.
 * https://github.com/couchbase/couchbase-lite-net/issues/388#issuecomment-77637583
 */
public void testUpdateConflict() throws Exception {
    Document document = database.getDocument("testUpdateConflict");
    Map<String, Object> properties = new HashMap<String, Object>();
    properties.put("title", "testUpdateConflict");
    document.putProperties(properties);

    final String title1 = "testUpdateConflict - 1";
    final String text1 = "notes - 1";

    final String title2 = "testUpdateConflict - 2";
    final String notes2 = "notes - 2";

    final CountDownLatch latch1 = new CountDownLatch(1);
    final CountDownLatch latch2 = new CountDownLatch(1);

    // Another thread to update document
    // This thread pretends to be Pull replicator update logic
    Thread thread = new Thread(new Runnable() {
        @Override
        public void run() {
            Log.w(TAG, "Thread.run() start");

            // wait till main thread finishes to create newRevision
            Log.w(TAG, "Thread.run() latch1.await()");
            try {
                latch1.await();
            } catch (InterruptedException e) {
                Log.e(TAG, e.getMessage());
            }

            Log.w(TAG, "Thread.run() exit from latch1.await()");

            Document document1 = database.getDocument("testUpdateConflict");
            Map<String, Object> properties1 = new HashMap<String, Object>();
            properties1.putAll(document1.getProperties());
            properties1.put("title", title1);
            properties1.put("text", text1);
            try {
                document1.putProperties(properties1);
            } catch (CouchbaseLiteException e) {
                Log.e(TAG, "[Thread.run()] " + e.getMessage());
            }

            Log.w(TAG, "Thread.run() latch2.countDown()");
            latch2.countDown();

            Log.w(TAG, "Thread.run() end");
        }
    });
    thread.start();

    // main thread to update document
    document.update(new Document.DocumentUpdater() {
        @Override
        public boolean update(UnsavedRevision newRevision) {

            Log.w(TAG, "DocumentUpdater.update() start");

            // after created newRevision wait till other thread to update document.
            Log.w(TAG, "DocumentUpdater.update() latch1.countDown()");
            latch1.countDown();

            Log.w(TAG, "DocumentUpdater.update() latch2.await()");
            try {
                latch2.await();
            } catch (InterruptedException e) {
                Log.e(TAG, "[DocumentUpdater.update()]" + e.getMessage());
            }

            Map<String, Object> properties2 = newRevision.getUserProperties();
            properties2.put("title", title2);
            properties2.put("notes", notes2);
            newRevision.setUserProperties(properties2);

            Log.w(TAG, "DocumentUpdater.update() end");
            return true;
        }
    });

    Document document4 = database.getDocument("testUpdateConflict");
    Log.w(TAG, "" + document4.getProperties());
    assertEquals(title2, document4.getProperties().get("title"));
    assertEquals(notes2, document4.getProperties().get("notes"));
    assertEquals(text1, document4.getProperties().get("text"));
}

From source file:cn.ctyun.amazonaws.http.AmazonHttpClient.java

/**
 * Exponential sleep on failed request to avoid flooding a service with
 * retries.//from ww  w  .  j  av a2 s  .  co  m
 *
 * @param retries
 *            Current retry count.
 * @param previousException
 *            Exception information for the previous attempt, if any.
 */
private void pauseExponentially(int retries, AmazonServiceException previousException,
        CustomBackoffStrategy backoffStrategy) {
    long delay = 0;
    if (backoffStrategy != null) {
        delay = backoffStrategy.getBackoffPeriod(retries);
    } else {
        long scaleFactor = 300;
        if (isThrottlingException(previousException)) {
            scaleFactor = 500 + random.nextInt(100);
        }
        delay = (long) (Math.pow(2, retries) * scaleFactor);
    }

    delay = Math.min(delay, MAX_BACKOFF_IN_MILLISECONDS);
    if (log.isDebugEnabled()) {
        log.debug("Retriable error detected, " + "will retry in " + delay + "ms, attempt number: " + retries);
    }

    try {
        Thread.sleep(delay);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new AmazonClientException(e.getMessage(), e);
    }
}

From source file:com.baifendian.swordfish.execserver.runner.flow.FlowRunner.java

/**
 * ?, ?//from  w ww.  ja va2  s .  co m
 */
private void updateUnfinishNodeStatus(boolean updateKilled) {
    Date now = new Date();

    // ???
    for (Map.Entry<NodeRunner, Future<Boolean>> entry : activeNodeRunners.entrySet()) {
        NodeRunner nodeRunner = entry.getKey();
        Future<Boolean> future = entry.getValue();

        // 
        if (!future.isDone()) {
            // ?, ??, ???
            if (updateKilled || (nodeRunner.getExecType() != ExecType.SCHEDULER
                    && nodeRunner.getExecType() != ExecType.COMPLEMENT_DATA)) {
                ExecutionNode executionNode = nodeRunner.getExecutionNode();
                updateNodeToKilled(executionNode);
            }
        } else {
            // ??
            Boolean value = false;

            try {
                value = future.get();

                if (value) {
                    ExecutionNode executionNode = nodeRunner.getExecutionNode();

                    executionNode.setStatus(FlowStatus.SUCCESS);
                    executionNode.setEndTime(now);

                    flowDao.updateExecutionNode(executionNode);
                }
            } catch (InterruptedException e) {
                logger.error(e.getMessage(), e);
            } catch (ExecutionException e) {
                logger.error(e.getMessage(), e);
            } catch (CancellationException e) { // ?
                logger.error("task has been cancel, name:{}", nodeRunner.getNodename());
            } catch (Exception e) {
                logger.error(e.getMessage(), e);
            } finally {
                if (!value) {
                    if (updateKilled || (nodeRunner.getExecType() != ExecType.SCHEDULER
                            && nodeRunner.getExecType() != ExecType.COMPLEMENT_DATA)) {
                        ExecutionNode executionNode = nodeRunner.getExecutionNode();
                        updateNodeToKilled(executionNode);
                    }
                }
            }
        }
    }
}

From source file:com.github.hdl.tensorflow.yarn.app.ApplicationMaster.java

@VisibleForTesting
protected boolean finish() {
    // wait for completion.
    while (!done && (numCompletedContainers.get() != numTotalContainers)) {
        try {/*from   w  w w . ja va2  s. c om*/
            Thread.sleep(200);
        } catch (InterruptedException ex) {
        }
    }

    // Join all launched threads
    // needed for when we time out
    // and we need to release containers
    for (Thread launchThread : launchThreads) {
        try {
            launchThread.join(10000);
        } catch (InterruptedException e) {
            LOG.info("Exception thrown in thread join: " + e.getMessage());
            e.printStackTrace();
        }
    }

    // When the application completes, it should stop all running containers
    LOG.info("Application completed. Stopping running containers");
    nmClientAsync.stop();

    // When the application completes, it should send a finish application
    // signal to the RM
    LOG.info("Application completed. Signalling finish to RM");

    FinalApplicationStatus appStatus;
    String appMessage = null;
    boolean success = true;
    if (numCompletedContainers.get() - numFailedContainers.get() >= numTotalContainers) {
        appStatus = FinalApplicationStatus.SUCCEEDED;
    } else {
        appStatus = FinalApplicationStatus.FAILED;
        appMessage = "Diagnostics." + ", total=" + numTotalContainers + ", completed="
                + numCompletedContainers.get() + ", allocated=" + numAllocatedContainers.get() + ", failed="
                + numFailedContainers.get();
        LOG.info(appMessage);
        success = false;
    }
    try {
        amRMClient.unregisterApplicationMaster(appStatus, appMessage, null);
    } catch (YarnException ex) {
        LOG.error("Failed to unregister application", ex);
    } catch (IOException e) {
        LOG.error("Failed to unregister application", e);
    }

    amRMClient.stop();

    return success;
}

From source file:grakn.core.daemon.executor.Storage.java

/**
 * Attempt to start Storage and perform periodic polling until it is ready. The readiness check is performed with nodetool.
 * <p>//from www. j  a v a2  s .com
 * A {@link GraknDaemonException} will be thrown if Storage does not start after a timeout specified
 * in the 'WAIT_INTERVAL_SECOND' field.
 *
 * @throws GraknDaemonException
 */
private void start() {
    System.out.print("Starting " + DISPLAY_NAME + "...");
    System.out.flush();

    // Consume configuration from Grakn config file into Cassandra config file
    initialiseConfig();

    Future<Executor.Result> result = daemonExecutor.executeAsync(storageCommand(), graknHome.toFile());

    LocalDateTime timeout = LocalDateTime.now().plusSeconds(STORAGE_STARTUP_TIMEOUT_SECOND);

    while (LocalDateTime.now().isBefore(timeout) && !result.isDone()) {
        System.out.print(".");
        System.out.flush();

        if (storageStatus().equals("running")) {
            System.out.println("SUCCESS");
            return;
        }

        try {
            Thread.sleep(WAIT_INTERVAL_SECOND * 1000);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }

    try {
        System.out.println("FAILED!");
        System.err.println("Unable to start " + DISPLAY_NAME + ".");
        String errorMessage = "Process exited with code '" + result.get().exitCode() + "': '"
                + result.get().stderr() + "'";
        System.err.println(errorMessage);
        throw new GraknDaemonException(errorMessage);
    } catch (InterruptedException | ExecutionException e) {
        throw new GraknDaemonException(e.getMessage(), e);
    }
}