Example usage for java.lang Thread interrupted

List of usage examples for java.lang Thread interrupted

Introduction

In this page you can find the example usage for java.lang Thread interrupted.

Prototype

public static boolean interrupted() 

Source Link

Document

Tests whether the current thread has been interrupted.

Usage

From source file:itdelatrisu.opsu.Utils.java

/**
 * Returns a the contents of a URL as a string.
 * @param url the remote URL/*from  w  w  w.  ja  va 2  s  . co  m*/
 * @return the contents as a string, or null if any error occurred
 * @author Roland Illig (http://stackoverflow.com/a/4308662)
 * @throws IOException if an I/O exception occurs
 */
public static String readDataFromUrl(URL url) throws IOException {
    // open connection
    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
    conn.setConnectTimeout(Download.CONNECTION_TIMEOUT);
    conn.setReadTimeout(Download.READ_TIMEOUT);
    conn.setUseCaches(false);
    try {
        conn.connect();
    } catch (SocketTimeoutException e) {
        Log.warn("Connection to server timed out.", e);
        throw e;
    }

    if (Thread.interrupted())
        return null;

    // read contents
    try (InputStream in = conn.getInputStream()) {
        BufferedReader rd = new BufferedReader(new InputStreamReader(in));
        StringBuilder sb = new StringBuilder();
        int c;
        while ((c = rd.read()) != -1)
            sb.append((char) c);
        return sb.toString();
    } catch (SocketTimeoutException e) {
        Log.warn("Connection to server timed out.", e);
        throw e;
    }
}

From source file:org.apache.hadoop.hbase.coordination.ZkSplitLogWorkerCoordination.java

/**
 * try to grab a 'lock' on the task zk node to own and execute the task.
 * <p>// w w w .  j  a v a2s .co m
 * @param path zk node for the task
 */
private void grabTask(String path) {
    Stat stat = new Stat();
    byte[] data;
    synchronized (grabTaskLock) {
        currentTask = path;
        workerInGrabTask = true;
        if (Thread.interrupted()) {
            return;
        }
    }
    try {
        try {
            if ((data = ZKUtil.getDataNoWatch(watcher, path, stat)) == null) {
                SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.incrementAndGet();
                return;
            }
        } catch (KeeperException e) {
            LOG.warn("Failed to get data for znode " + path, e);
            SplitLogCounters.tot_wkr_failed_to_grab_task_exception.incrementAndGet();
            return;
        }
        SplitLogTask slt;
        try {
            slt = SplitLogTask.parseFrom(data);
        } catch (DeserializationException e) {
            LOG.warn("Failed parse data for znode " + path, e);
            SplitLogCounters.tot_wkr_failed_to_grab_task_exception.incrementAndGet();
            return;
        }
        if (!slt.isUnassigned()) {
            SplitLogCounters.tot_wkr_failed_to_grab_task_owned.incrementAndGet();
            return;
        }

        currentVersion = attemptToOwnTask(true, watcher, server.getServerName(), path, slt.getMode(),
                stat.getVersion());
        if (currentVersion < 0) {
            SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.incrementAndGet();
            return;
        }

        if (ZKSplitLog.isRescanNode(watcher, currentTask)) {
            ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails();
            splitTaskDetails.setTaskNode(currentTask);
            splitTaskDetails.setCurTaskZKVersion(new MutableInt(currentVersion));

            endTask(new SplitLogTask.Done(server.getServerName(), slt.getMode()),
                    SplitLogCounters.tot_wkr_task_acquired_rescan, splitTaskDetails);
            return;
        }

        LOG.info("worker " + server.getServerName() + " acquired task " + path);
        SplitLogCounters.tot_wkr_task_acquired.incrementAndGet();
        getDataSetWatchAsync();

        submitTask(path, slt.getMode(), currentVersion, reportPeriod);

        // after a successful submit, sleep a little bit to allow other RSs to grab the rest tasks
        try {
            int sleepTime = RandomUtils.nextInt(500) + 500;
            Thread.sleep(sleepTime);
        } catch (InterruptedException e) {
            LOG.warn("Interrupted while yielding for other region servers", e);
            Thread.currentThread().interrupt();
        }
    } finally {
        synchronized (grabTaskLock) {
            workerInGrabTask = false;
            // clear the interrupt from stopTask() otherwise the next task will
            // suffer
            Thread.interrupted();
        }
    }
}

From source file:tw.edu.sju.ee.eea.module.iepe.project.window.IepeRealtimeVoltageElement.java

@Override
public void run() {
    Thread.currentThread().setPriority(Thread.MIN_PRIORITY);
    while (!Thread.interrupted()) {
        if (!hold) {
            for (int i = 0; i < list.size(); i++) {
                list.get(i).update(t);//w  w  w.  ja v  a2s .  c om
                //                    channels[i].update(t);
            }
            //                channels[0].series.setNotify(true);
            //                channels[0].series.setNotify(false);
            new Thread() {

                @Override
                public void run() {
                    ((SourceChannel) list.get(0)).series.changed();
                }

            }.start();
        }
        try {
            Thread.sleep(1000);
        } catch (InterruptedException ex) {
            Exceptions.printStackTrace(ex);
        }
    }
}

From source file:com.alibaba.otter.shared.arbitrate.impl.setl.monitor.MainstemMonitor.java

/**
 * ??/* w  w  w . j  av  a2s .  c  o  m*/
 */
public boolean check() {
    String path = StagePathUtils.getMainStem(getPipelineId());
    try {
        byte[] bytes = zookeeper.readData(path);
        Long nid = ArbitrateConfigUtils.getCurrentNid();
        MainStemEventData eventData = JsonUtils.unmarshalFromByte(bytes, MainStemEventData.class);
        activeData = eventData;// 
        // nid?
        boolean result = nid.equals(eventData.getNid());
        if (!result) {
            logger.warn("mainstem is running in node[{}] , but not in node[{}]", eventData.getNid(), nid);
        }
        return result;
    } catch (ZkNoNodeException e) {
        logger.warn("mainstem is not run any in node");
        return false;
    } catch (ZkInterruptedException e) {
        logger.warn("mainstem check is interrupt");
        Thread.interrupted();// interrupt
        return check();
    } catch (ZkException e) {
        logger.warn("mainstem check is failed");
        return false;
    }
}

From source file:org.archive.io.RecordingInputStream.java

/**
 * Read all of a stream (Or read until we timeout or have read to the max).
 * @param softMaxLength Maximum length to read; if zero or < 0, then no 
 * limit. If met, return normally. /*from ww w .j  a  v  a2 s  .  co m*/
 * @throws IOException failed read.
 * @throws RecorderLengthExceededException
 * @throws RecorderTimeoutException
 * @throws InterruptedException
 * @deprecated
 */
public void readFullyOrUntil(long softMaxLength)
        throws IOException, RecorderLengthExceededException, RecorderTimeoutException, InterruptedException {
    // Check we're open before proceeding.
    if (!isOpen()) {
        // TODO: should this be a noisier exception-raising error? 
        return;
    }

    long totalBytes = 0L;
    long bytesRead = -1L;
    long maxToRead = -1;
    while (true) {
        try {
            // read no more than soft max
            maxToRead = (softMaxLength <= 0) ? drainBuffer.length
                    : Math.min(drainBuffer.length, softMaxLength - totalBytes);
            // nor more than hard max
            maxToRead = Math.min(maxToRead, recordingOutputStream.getRemainingLength());
            // but always at least 1 (to trigger hard max exception
            maxToRead = Math.max(maxToRead, 1);

            bytesRead = read(drainBuffer, 0, (int) maxToRead);
            if (bytesRead == -1) {
                break;
            }
            totalBytes += bytesRead;

            if (Thread.interrupted()) {
                throw new InterruptedException("Interrupted during IO");
            }
        } catch (SocketTimeoutException e) {
            // A socket timeout is just a transient problem, meaning
            // nothing was available in the configured  timeout period,
            // but something else might become available later.
            // Take this opportunity to check the overall 
            // timeout (below).  One reason for this timeout is 
            // servers that keep up the connection, 'keep-alive', even
            // though we asked them to not keep the connection open.
            if (logger.isLoggable(Level.FINE)) {
                logger.log(Level.FINE, "socket timeout", e);
            }
            // check for interrupt
            if (Thread.interrupted()) {
                throw new InterruptedException("Interrupted during IO");
            }
            // check for overall timeout
            recordingOutputStream.checkLimits();
        } catch (SocketException se) {
            throw se;
        } catch (NullPointerException e) {
            // [ 896757 ] NPEs in Andy's Th-Fri Crawl.
            // A crawl was showing NPE's in this part of the code but can
            // not reproduce.  Adding this rethrowing catch block w/
            // diagnostics to help should we come across the problem in the
            // future.
            throw new NullPointerException(
                    "Stream " + this.in + ", " + e.getMessage() + " " + Thread.currentThread().getName());
        }

        // if have read 'enough', just finish
        if (softMaxLength > 0 && totalBytes >= softMaxLength) {
            break; // return
        }
    }
}

From source file:org.apache.tomee.jul.handler.rotating.ArchivingTest.java

private void withRetry(final int countDown, final long timeout, final Runnable assertCallback) {
    try {//w  w  w  . ja  v  a 2 s  .  c om
        assertCallback.run();
    } catch (final AssertionError e) {
        if (countDown < 1) {
            throw e;
        }
        try {
            TimeUnit.SECONDS.sleep(timeout);
        } catch (final InterruptedException e1) {
            Thread.interrupted();
        }
        withRetry(countDown - 1, timeout, assertCallback);
    }
}

From source file:com.netflix.suro.input.thrift.MessageSetProcessor.java

public void shutdown(long timeout) {
    log.info("MessageQueue is shutting down");
    isRunning = false;/* w  w w  . j a v  a2s.  c  o  m*/
    try {
        executors.shutdown();
        executors.awaitTermination(timeout, TimeUnit.MILLISECONDS);
        if (!executors.isTerminated()) {
            log.error("MessageDispatcher was not shut down gracefully");
        }
        executors.shutdownNow();
    } catch (InterruptedException e) {
        Thread.interrupted();
    }
}

From source file:org.apache.jackrabbit.core.query.lucene.IndexMerger.java

/**
 * Implements the index merging.//from ww w.jav a2  s.  c  o  m
 */
public void run() {
    for (;;) {
        boolean isIdle = false;
        if (mergeTasks.size() == 0) {
            mergerIdle.release();
            isIdle = true;
        }
        Merge task = (Merge) mergeTasks.remove();
        if (task == QUIT) {
            mergerIdle.release();
            break;
        }
        if (isIdle) {
            try {
                mergerIdle.acquire();
            } catch (InterruptedException e) {
                Thread.interrupted();
                log.warn("Unable to acquire mergerIdle sync");
            }
        }

        log.debug("accepted merge request");

        // reset deleted documents
        deletedDocuments.clear();

        // get readers
        String[] names = new String[task.indexes.length];
        for (int i = 0; i < task.indexes.length; i++) {
            names[i] = task.indexes[i].name;
        }
        try {
            log.debug("create new index");
            PersistentIndex index = multiIndex.getOrCreateIndex(null);
            boolean success = false;
            try {

                log.debug("get index readers from MultiIndex");
                IndexReader[] readers = multiIndex.getIndexReaders(names, this);
                try {
                    // do the merge
                    long time = System.currentTimeMillis();
                    index.addIndexes(readers);
                    time = System.currentTimeMillis() - time;
                    int docCount = 0;
                    for (int i = 0; i < readers.length; i++) {
                        docCount += readers[i].numDocs();
                    }
                    log.info("merged " + docCount + " documents in " + time + " ms into " + index.getName()
                            + ".");
                } finally {
                    for (int i = 0; i < readers.length; i++) {
                        try {
                            Util.closeOrRelease(readers[i]);
                        } catch (IOException e) {
                            log.warn("Unable to close IndexReader: " + e);
                        }
                    }
                }

                // inform multi index
                // if we cannot get the sync immediately we have to quit
                if (!indexReplacement.attempt(0)) {
                    log.debug("index merging canceled");
                    break;
                }
                try {
                    log.debug("replace indexes");
                    multiIndex.replaceIndexes(names, index, deletedDocuments);
                } finally {
                    indexReplacement.release();
                }

                success = true;

            } finally {
                if (!success) {
                    // delete index
                    log.debug("deleting index " + index.getName());
                    multiIndex.deleteIndex(index);
                }
            }
        } catch (Throwable e) {
            log.error("Error while merging indexes: ", e);
        }
    }
    log.info("IndexMerger terminated");
}

From source file:org.exoplatform.services.jcr.impl.core.query.lucene.IndexMerger.java

/**
 * Implements the index merging./*from  w w  w . j  a va 2s  .c  o m*/
 */
public void run() {
    for (;;) {
        boolean isIdle = false;
        if (mergeTasks.size() == 0) {
            mergerIdle.release();
            isIdle = true;
        }
        Merge task = (Merge) mergeTasks.remove();
        if (task == QUIT) // NOSONAR
        {
            mergerIdle.release();
            break;
        }
        if (isIdle) {
            try {
                mergerIdle.acquire();
            } catch (InterruptedException e) {
                Thread.interrupted();
                log.warn("Unable to acquire mergerIdle sync");
            }
        }

        log.debug("accepted merge request");

        // reset deleted documents
        deletedDocuments.clear();

        // get readers
        String[] names = new String[task.indexes.length];
        for (int i = 0; i < task.indexes.length; i++) {
            names[i] = task.indexes[i].name;
        }
        try {
            log.debug("create new index");
            PersistentIndex index = multiIndex.getOrCreateIndex(null);
            boolean success = false;
            try {

                log.debug("get index readers from MultiIndex");
                IndexReader[] readers = multiIndex.getIndexReaders(names, this);
                try {
                    // do the merge
                    long time = System.currentTimeMillis();
                    index.addIndexes(readers);
                    time = System.currentTimeMillis() - time;
                    int docCount = 0;
                    for (int i = 0; i < readers.length; i++) {
                        docCount += readers[i].numDocs();
                    }
                    if (log.isDebugEnabled()) {
                        log.debug("merged " + docCount + " documents in " + time + " ms into " + index.getName()
                                + ".");
                    }
                } finally {
                    for (int i = 0; i < readers.length; i++) {
                        try {
                            Util.closeOrRelease(readers[i]);
                        } catch (IOException e) {
                            log.warn("Unable to close IndexReader: " + e);
                        }
                    }
                }

                // inform multi index
                // if we cannot get the sync immediately we have to quit
                if (!indexReplacement.attempt(0)) {
                    log.debug("index merging canceled");
                    // if index not passed to multiIndex, then it will never be closed
                    index.close();
                    break;
                }
                try {
                    log.debug("replace indexes");
                    multiIndex.replaceIndexes(names, index, deletedDocuments);
                } finally {
                    indexReplacement.release();
                }

                success = true;

            } finally {
                if (!success) {
                    // delete index
                    log.debug("deleting index " + index.getName());
                    multiIndex.deleteIndex(index);
                }
            }
        } catch (Throwable e) //NOSONAR
        {
            log.error("Error while merging indexes: ", e);
        }
    }
    if (log.isDebugEnabled()) {
        log.debug("IndexMerger terminated");
    }
}

From source file:com.netflix.suro.input.thrift.MessageSetProcessor.java

public TMessageSet poll(long timeout, TimeUnit unit) {
    try {//w  w w.j  a  v  a 2s .  c  o  m
        return queue.poll(timeout, unit);
    } catch (InterruptedException e) {
        Thread.interrupted();
        return new MessageSetBuilder(new ClientConfig()).build();
    }
}