Example usage for java.lang Thread interrupt

List of usage examples for java.lang Thread interrupt

Introduction

In this page you can find the example usage for java.lang Thread interrupt.

Prototype

public void interrupt() 

Source Link

Document

Interrupts this thread.

Usage

From source file:edu.harvard.i2b2.im.ws.IMService.java

private OMElement execute(RequestHandler handler, long waitTime) throws I2B2Exception {
    //do workplace processing inside thread, so that  
    // service could send back message with timeout error.  
    log.debug("In execute");

    OMElement returnElement = null;/*from   w w w .j  a  v  a  2 s. co m*/

    String unknownErrorMessage = "Error message delivered from the remote server \n"
            + "You may wish to retry your last action";

    ExecutorRunnable er = new ExecutorRunnable();

    er.setRequestHandler(handler);

    Thread t = new Thread(er);
    String workplaceDataResponse = null;

    synchronized (t) {
        t.start();

        //              try {
        //                 if (waitTime > 0) {
        //                    t.wait(waitTime);
        //                 } else {
        //                    t.wait();
        //                 }

        try {
            long startTime = System.currentTimeMillis();
            long deltaTime = -1;
            while ((er.isJobCompleteFlag() == false) && (deltaTime < waitTime)) {
                if (waitTime > 0) {
                    t.wait(waitTime - deltaTime);
                    deltaTime = System.currentTimeMillis() - startTime;
                } else {
                    t.wait();
                }
            }

            workplaceDataResponse = er.getOutputString();

            if (workplaceDataResponse == null) {
                if (er.getJobException() != null) {
                    log.error("er.jobException is " + er.getJobException().getMessage());

                    log.info("waitTime is " + waitTime);
                    ResponseMessageType responseMsgType = MessageFactory.doBuildErrorResponse(null,
                            unknownErrorMessage);
                    workplaceDataResponse = MessageFactory.convertToXMLString(responseMsgType);

                } else if (er.isJobCompleteFlag() == false) {
                    //<result_waittime_ms>5000</result_waittime_ms>
                    String timeOuterror = "Remote server timed out \n" + "Result waittime = " + waitTime
                            + " ms elapsed,\nPlease try again";
                    log.error(timeOuterror);
                    log.debug(
                            "im waited " + deltaTime + "ms for " + er.getRequestHandler().getClass().getName());
                    ResponseMessageType responseMsgType = MessageFactory.doBuildErrorResponse(null,
                            timeOuterror);
                    workplaceDataResponse = MessageFactory.convertToXMLString(responseMsgType);

                } else {
                    log.error("im  data response is null");
                    log.info("waitTime is " + waitTime);
                    log.debug(
                            "im waited " + deltaTime + "ms for " + er.getRequestHandler().getClass().getName());
                    ResponseMessageType responseMsgType = MessageFactory.doBuildErrorResponse(null,
                            unknownErrorMessage);
                    workplaceDataResponse = MessageFactory.convertToXMLString(responseMsgType);
                }
            }
        } catch (InterruptedException e) {
            log.error(e.getMessage());
            throw new I2B2Exception("Thread error while running IM job ");
        } finally {
            t.interrupt();
            er = null;
            t = null;
        }
    }
    returnElement = MessageFactory.createResponseOMElementFromString(workplaceDataResponse);

    return returnElement;
}

From source file:org.opennms.netmgt.syslogd.SyslogdImplementationsIT.java

@Test
@Transactional/*  w  w  w  .j  a  va 2s . c  o m*/
public void testDefaultSyslogd() throws Exception {
    Thread listener = new Thread(m_nio);
    listener.start();
    Thread.sleep(3000);

    final int eventCount = 100;

    List<Integer> foos = new ArrayList<Integer>();

    for (int i = 0; i < eventCount; i++) {
        int eventNum = Double.valueOf(Math.random() * 10000).intValue();
        foos.add(eventNum);
    }

    m_eventCounter.setAnticipated(eventCount);

    String testPduFormat = "2010-08-19 localhost foo%d: load test %d on tty1";
    SyslogClient sc = new SyslogClient(null, 10, SyslogClient.LOG_USER, addr("127.0.0.1"));

    /*
    // Test by directly invoking the SyslogConnection task
    System.err.println("Starting to send packets");
    final long start = System.currentTimeMillis();
    for (int i = 0; i < eventCount; i++) {
    int foo = foos.get(i);
    DatagramPacket pkt = sc.getPacket(SyslogClient.LOG_DEBUG, String.format(testPduFormat, foo, foo));
    WaterfallExecutor.waterfall(m_executorService, new SyslogConnection(pkt, m_config));
    }
            
    // Test by sending over a java.net socket
    final DatagramSocket socket = new DatagramSocket();
    System.err.println("Starting to send packets");
    final long start = System.currentTimeMillis();
    for (int i = 0; i < eventCount; i++) {
    int foo = foos.get(i);
    DatagramPacket pkt = sc.getPacket(SyslogClient.LOG_DEBUG, String.format(testPduFormat, foo, foo));
    socket.send(pkt);
    }
    socket.close();
    */

    // Test by sending over an NIO channel
    SocketAddress address = new InetSocketAddress(InetAddressUtils.getLocalHostAddress(), SyslogClient.PORT);
    final DatagramChannel channel = DatagramChannel.open();
    final ByteBuffer buffer = ByteBuffer.allocate(SyslogReceiverNioThreadPoolImpl.MAX_PACKET_SIZE);
    buffer.clear();
    System.err.println("Starting to send packets");
    final long start = System.currentTimeMillis();
    for (int i = 0; i < eventCount; i++) {
        int foo = foos.get(i);
        buffer.put(SyslogClient.getPacketPayload(SyslogClient.LOG_USER, null, SyslogClient.LOG_DEBUG,
                String.format(testPduFormat, foo, foo)));
        buffer.flip();
        channel.send(buffer, address);
        buffer.clear();
    }
    channel.close();
    /*
    */

    long mid = System.currentTimeMillis();
    System.err.println(String.format("Sent %d packets in %d milliseconds", eventCount, mid - start));

    m_eventCounter.waitForFinish(120000);
    long end = System.currentTimeMillis();

    System.err.println(
            String.format("Events expected: %d, events received: %d", eventCount, m_eventCounter.getCount()));
    final long total = (end - start);
    final double eventsPerSecond = (eventCount * 1000.0 / total);
    System.err.println(String.format("total time: %d, wait time: %d, events per second: %8.4f", total,
            (end - mid), eventsPerSecond));

    listener.interrupt();
    listener.join();
}

From source file:com.tencent.gaia.portal.util.Shell.java

/**
 * Run a command//from  w ww .  java 2 s .  c  om
 */
private void runCommand() throws IOException {
    ProcessBuilder builder = new ProcessBuilder(getExecString());
    Timer timeOutTimer = null;
    ShellTimeoutTimerTask timeoutTimerTask = null;
    timedOut = new AtomicBoolean(false);
    completed = new AtomicBoolean(false);

    if (environment != null) {
        builder.environment().putAll(this.environment);
    }
    if (dir != null) {
        builder.directory(this.dir);
    }

    builder.redirectErrorStream(redirectErrorStream);

    if (Shell.WINDOWS) {
        synchronized (WindowsProcessLaunchLock) {
            // To workaround the race condition issue with child processes
            // inheriting unintended handles during process launch that can
            // lead to hangs on reading output and error streams, we
            // serialize process creation. More info available at:
            // http://support.microsoft.com/kb/315939
            process = builder.start();
        }
    } else {
        process = builder.start();
    }

    if (timeOutInterval > 0) {
        timeOutTimer = new Timer("Shell command timeout");
        timeoutTimerTask = new ShellTimeoutTimerTask(this);
        //One time scheduling.
        timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
    }
    final BufferedReader errReader = new BufferedReader(new InputStreamReader(process.getErrorStream()));
    BufferedReader inReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
    final StringBuffer errMsg = new StringBuffer();

    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    errMsg.append(line);
                    errMsg.append(System.getProperty("line.separator"));
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    try {
        errThread.start();
    } catch (IllegalStateException ise) {
    }
    try {
        parseExecResult(inReader); // parse the output
        // clear the input stream buffer
        String line = inReader.readLine();
        while (line != null) {
            line = inReader.readLine();
        }
        // wait for the process to finish and check the exit code
        exitCode = process.waitFor();
        try {
            // make sure that the error thread exits
            errThread.join();
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted while reading the error stream", ie);
        }
        completed.set(true);
        //the timeout thread handling
        //taken care in finally block
        if (exitCode != 0) {
            throw new ExitCodeException(exitCode, errMsg.toString());
        }
    } catch (InterruptedException ie) {
        throw new IOException(ie.toString());
    } finally {
        if (timeOutTimer != null) {
            timeOutTimer.cancel();
        }
        // close the input stream
        try {
            // JDK 7 tries to automatically drain the input streams for us
            // when the process exits, but since close is not synchronized,
            // it creates a race if we close the stream first and the same
            // fd is recycled.  the stream draining thread will attempt to
            // drain that fd!!  it may block, OOM, or cause bizarre behavior
            // see: https://bugs.openjdk.java.net/browse/JDK-8024521
            //      issue is fixed in build 7u60
            InputStream stdout = process.getInputStream();
            synchronized (stdout) {
                inReader.close();
            }
        } catch (IOException ioe) {
            LOG.warn("Error while closing the input stream", ioe);
        }
        try {
            if (!completed.get()) {
                errThread.interrupt();
                errThread.join();
            }
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted while joining errThread");
        }
        try {
            InputStream stderr = process.getErrorStream();
            synchronized (stderr) {
                errReader.close();
            }
        } catch (IOException ioe) {
            LOG.warn("Error while closing the error stream", ioe);
        }
        process.destroy();
        lastTime = System.currentTimeMillis();
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.FSDataset.java

/**
 * Try to interrupt all of the given threads, and join on them.
 * If interrupted, returns false, indicating some threads may
 * still be running.//from   ww  w. j  a  v  a 2 s  .  co  m
 */
private boolean interruptAndJoinThreads(List<Thread> threads) {
    // interrupt and wait for all ongoing create threads
    for (Thread t : threads) {
        t.interrupt();
    }
    for (Thread t : threads) {
        try {
            t.join();
        } catch (InterruptedException e) {
            DataNode.LOG.warn("interruptOngoingCreates: t=" + t, e);
            return false;
        }
    }
    return true;
}

From source file:com.gft.unity.android.AndroidIO.java

@Override
public IOResponse InvokeService(IORequest request, IOService service) {

    IOServiceEndpoint endpoint = service.getEndpoint();
    IOResponse response = new IOResponse();

    if (service != null) {
        // JUST FOR LOCAL TESTING, DO NOT UNCOMMENT FOR PLATFORM RELEASE
        // LOG.LogDebug(Module.PLATFORM, "Request content: " + request.getContent());

        if (endpoint == null) {
            LOG.LogDebug(Module.PLATFORM, "No endpoint configured for this service name: " + service.getName());
            return response;
        }/*w w w. j  av a  2s  . c  o  m*/

        String requestMethod = service.getRequestMethod().toString();
        if (request.getMethod() != null && request.getMethod().length() > 0)
            requestMethod = request.getMethod().toUpperCase();

        String requestUriString = formatRequestUriString(request, endpoint, requestMethod);
        Thread timeoutThread = null;

        try {

            // Security - VALIDATIONS
            if (!this.applySecurityValidations(requestUriString)) {
                return null;
            }

            // Adding HTTP Client Parameters
            this.addingHttpClientParms(request, endpoint);

            // Building Web Request to send
            HttpEntityEnclosingRequestBase httpRequest = this.buildWebRequest(request, service,
                    requestUriString, requestMethod);

            LOG.LogDebug(Module.PLATFORM, "Downloading service content");

            // Throw a new Thread to check absolute timeout
            timeoutThread = new Thread(new CheckTimeoutThread(httpRequest));
            timeoutThread.start();

            long start = System.currentTimeMillis();
            HttpResponse httpResponse = httpClient.execute(httpRequest);
            LOG.LogDebug(Module.PLATFORM,
                    "Content downloaded in " + (System.currentTimeMillis() - start) + "ms");

            // Read response
            response = this.readWebResponse(httpResponse, service);

        } catch (Exception ex) {
            LOG.Log(Module.PLATFORM, "Unnandled Exception requesting service.", ex);
            response.setContentType(contentTypes.get(ServiceType.REST_JSON).toString());
            response.setContent("Unhandled Exception Requesting Service. Message: " + ex.getMessage());
        } finally {
            // abort any previous timeout checking thread
            if (timeoutThread != null && timeoutThread.isAlive()) {
                timeoutThread.interrupt();
            }
        }
    }

    LOG.LogDebug(Module.PLATFORM, "invoke service finished");
    return response;
}

From source file:org.apache.hadoop.hbase.wal.TestWALSplit.java

/**
 * Simulates splitting a WAL out from under a regionserver that is still trying to write it.
 * Ensures we do not lose edits.//from   ww w  .jav  a2 s .c o m
 * @throws IOException
 * @throws InterruptedException
 */
@Test(timeout = 300000)
public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
    final AtomicLong counter = new AtomicLong(0);
    AtomicBoolean stop = new AtomicBoolean(false);
    // Region we'll write edits too and then later examine to make sure they all made it in.
    final String region = REGIONS.get(0);
    final int numWriters = 3;
    Thread zombie = new ZombieLastLogWriterRegionServer(counter, stop, region, numWriters);
    try {
        long startCount = counter.get();
        zombie.start();
        // Wait till writer starts going.
        while (startCount == counter.get())
            Threads.sleep(1);
        // Give it a second to write a few appends.
        Threads.sleep(1000);
        final Configuration conf2 = HBaseConfiguration.create(this.conf);
        final User robber = User.createUserForTesting(conf2, ROBBER, GROUP);
        int count = robber.runAs(new PrivilegedExceptionAction<Integer>() {
            @Override
            public Integer run() throws Exception {
                StringBuilder ls = new StringBuilder("Contents of WALDIR (").append(WALDIR).append("):\n");
                for (FileStatus status : fs.listStatus(WALDIR)) {
                    ls.append("\t").append(status.toString()).append("\n");
                }
                LOG.debug(ls);
                LOG.info("Splitting WALs out from under zombie. Expecting " + numWriters + " files.");
                WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf2, wals);
                LOG.info("Finished splitting out from under zombie.");
                Path[] logfiles = getLogForRegion(HBASEDIR, TABLE_NAME, region);
                assertEquals("wrong number of split files for region", numWriters, logfiles.length);
                int count = 0;
                for (Path logfile : logfiles) {
                    count += countWAL(logfile);
                }
                return count;
            }
        });
        LOG.info("zombie=" + counter.get() + ", robber=" + count);
        assertTrue(
                "The log file could have at most 1 extra log entry, but can't have less. "
                        + "Zombie could write " + counter.get() + " and logfile had only " + count,
                counter.get() == count || counter.get() + 1 == count);
    } finally {
        stop.set(true);
        zombie.interrupt();
        Threads.threadDumpingIsAlive(zombie);
    }
}

From source file:org.apache.hadoop.util.Shell.java

/** Run a command */
private void runCommand() throws IOException {
    ProcessBuilder builder = new ProcessBuilder(getExecString());
    Timer timeOutTimer = null;//from  w  ww  .  j  ava  2  s .  c o  m
    ShellTimeoutTimerTask timeoutTimerTask = null;
    timedOut = new AtomicBoolean(false);
    completed = new AtomicBoolean(false);

    if (environment != null) {
        builder.environment().putAll(this.environment);
    }
    if (dir != null) {
        builder.directory(this.dir);
    }

    builder.redirectErrorStream(redirectErrorStream);

    if (Shell.WINDOWS) {
        synchronized (WindowsProcessLaunchLock) {
            // To workaround the race condition issue with child processes
            // inheriting unintended handles during process launch that can
            // lead to hangs on reading output and error streams, we
            // serialize process creation. More info available at:
            // http://support.microsoft.com/kb/315939
            process = builder.start();
        }
    } else {
        process = builder.start();
    }

    if (timeOutInterval > 0) {
        timeOutTimer = new Timer("Shell command timeout");
        timeoutTimerTask = new ShellTimeoutTimerTask(this);
        //One time scheduling.
        timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
    }
    final BufferedReader errReader = new BufferedReader(new InputStreamReader(process.getErrorStream()));
    BufferedReader inReader = new BufferedReader(new InputStreamReader(process.getInputStream()));
    final StringBuffer errMsg = new StringBuffer();

    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {
        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    errMsg.append(line);
                    errMsg.append(System.getProperty("line.separator"));
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    try {
        errThread.start();
    } catch (IllegalStateException ise) {
    }
    try {
        parseExecResult(inReader); // parse the output
        // clear the input stream buffer
        String line = inReader.readLine();
        while (line != null) {
            line = inReader.readLine();
        }
        // wait for the process to finish and check the exit code
        exitCode = process.waitFor();
        try {
            // make sure that the error thread exits
            errThread.join();
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted while reading the error stream", ie);
        }
        completed.set(true);
        //the timeout thread handling
        //taken care in finally block
        if (exitCode != 0) {
            throw new ExitCodeException(exitCode, errMsg.toString());
        }
    } catch (InterruptedException ie) {
        throw new IOException(ie.toString());
    } finally {
        if (timeOutTimer != null) {
            timeOutTimer.cancel();
        }
        // close the input stream
        try {
            // JDK 7 tries to automatically drain the input streams for us
            // when the process exits, but since close is not synchronized,
            // it creates a race if we close the stream first and the same
            // fd is recycled.  the stream draining thread will attempt to
            // drain that fd!!  it may block, OOM, or cause bizarre behavior
            // see: https://bugs.openjdk.java.net/browse/JDK-8024521
            //      issue is fixed in build 7u60
            InputStream stdout = process.getInputStream();
            synchronized (stdout) {
                inReader.close();
            }
        } catch (IOException ioe) {
            LOG.warn("Error while closing the input stream", ioe);
        }
        try {
            if (!completed.get()) {
                errThread.interrupt();
                errThread.join();
            }
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted while joining errThread");
        }
        try {
            InputStream stderr = process.getErrorStream();
            synchronized (stderr) {
                errReader.close();
            }
        } catch (IOException ioe) {
            LOG.warn("Error while closing the error stream", ioe);
        }
        process.destroy();
        lastTime = Time.now();
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.FSDataset.java

/**
 * Start writing to a block file/*w  w  w.  ja  v a2 s.com*/
 * If isRecovery is true and the block pre-exists, then we kill all
    volumeMap.put(b, v);
    volumeMap.put(b, v);
 * other threads that might be writing to this block, and then reopen the file.
 * If replicationRequest is true, then this operation is part of a block
 * replication request.
 */
public DatanodeBlockWriter writeToBlock(int namespaceId, Block b, Block newBlock, boolean isRecovery,
        boolean replicationRequest, int checksumType, int bytesPerChecksum) throws IOException {
    //
    // Make sure the block isn't a valid one - we're still creating it!
    //
    if (isValidBlock(namespaceId, b, false)) {
        if (!isRecovery) {
            throw new BlockAlreadyExistsException("Block " + b + " is valid, and cannot be written to.");
        }
        // If the block was successfully finalized because all packets
        // were successfully processed at the Datanode but the ack for
        // some of the packets were not received by the client. The client 
        // re-opens the connection and retries sending those packets.
        // The other reason is that an "append" is occurring to this block.
        detachBlock(namespaceId, b, 1);
    }
    long blockSize = b.getNumBytes();

    //
    // Serialize access to /tmp, and check if file already there.
    //
    File f = null;
    List<Thread> threads = null;
    long expectedFileSize = ActiveFile.UNKNOWN_SIZE;
    boolean inlineChecksum = datanode.useInlineChecksum;
    DatanodeBlockInfo binfo;
    FSVolume v = null;
    Block targetBlock = b;
    if (newBlock != null && newBlock != b) {
        targetBlock = newBlock;
    }

    lock.writeLock().lock();
    try {

        //
        // Is it already in the create process?
        //
        ActiveFile activeFile = volumeMap.getOngoingCreates(namespaceId, b);
        if (activeFile != null) {
            f = activeFile.getDataFile();
            threads = activeFile.threads;
            expectedFileSize = activeFile.getBytesWritten();
            inlineChecksum = activeFile.isInlineChecksum();

            if (!isRecovery) {
                throw new BlockAlreadyExistsException("Block " + b
                        + " has already been started (though not completed), and thus cannot be created.");
            } else {
                for (Thread thread : threads) {
                    thread.interrupt();
                }
            }
            volumeMap.removeOngoingCreates(namespaceId, b);
        }
        if (!isRecovery) {
            if (newBlock != null && b != newBlock) {
                throw new IOException("newBlock is not allowed except append case. ");
            }
            v = volumes.getNextVolume(blockSize);
            // create temporary file to hold block in the designated volume
            f = createTmpFile(namespaceId, v, b, replicationRequest, inlineChecksum, checksumType,
                    bytesPerChecksum);
        } else if (f != null) {
            DataNode.LOG.info("Reopen already-open Block for append " + b);
            if (newBlock != null && b != newBlock) {
                throw new IOException("newBlock is not allowed except append case. ");
            }
            // create or reuse temporary file to hold block in the designated volume
            DatanodeBlockInfo oldBinfo = volumeMap.get(namespaceId, b);
            inlineChecksum = oldBinfo.isInlineChecksum();
            v = oldBinfo.getBlockDataFile().getVolume();
            volumeMap.add(namespaceId, b, new DatanodeBlockInfo(v, f, DatanodeBlockInfo.UNFINALIZED, true,
                    inlineChecksum, checksumType, bytesPerChecksum, false, 0));
        } else {
            // reopening block for appending to it.
            DataNode.LOG.info("Reopen Block for append " + b);
            if (newBlock == null) {
                throw new IOException("newBlock is required for append af file to write. ");
            }
            DatanodeBlockInfo oldBinfo = volumeMap.get(namespaceId, b);
            inlineChecksum = oldBinfo.isInlineChecksum();
            v = oldBinfo.getBlockDataFile().getVolume();
            f = createTmpFile(namespaceId, v, newBlock, replicationRequest, inlineChecksum, checksumType,
                    bytesPerChecksum);
            File blkfile = getBlockFile(namespaceId, b);

            if (!inlineChecksum) {
                File oldmeta = BlockWithChecksumFileReader.getMetaFile(this, namespaceId, b);
                File newmeta = BlockWithChecksumFileWriter.getMetaFile(f, newBlock);

                // rename meta file to tmp directory
                DataNode.LOG.debug("Renaming " + oldmeta + " to " + newmeta);
                if (!oldmeta.renameTo(newmeta)) {
                    throw new IOException("Block " + b + " reopen failed. " + " Unable to move meta file  "
                            + oldmeta + " to tmp dir " + newmeta);
                }
            }

            // rename block file to tmp directory
            DataNode.LOG.debug("Renaming " + blkfile + " to " + f);
            if (!blkfile.renameTo(f)) {
                if (!f.delete()) {
                    throw new IOException("Block " + b + " reopen failed. " + " Unable to remove file " + f);
                }
                if (!blkfile.renameTo(f)) {
                    throw new IOException("Block " + b + " reopen failed. " + " Unable to move block file "
                            + blkfile + " to tmp dir " + f);
                }
            }
            // fsyncIfPossible parent directory to persist rename.
            if (datanode.syncOnClose) {
                NativeIO.fsyncIfPossible(blkfile.getParent());
            }
        }
        if (f == null) {
            DataNode.LOG.warn("Block " + b + " reopen failed " + " Unable to locate tmp file.");
            throw new IOException("Block " + b + " reopen failed " + " Unable to locate tmp file.");
        }
        // If this is a replication request, then this is not a permanent
        // block yet, it could get removed if the datanode restarts. If this
        // is a write or append request, then it is a valid block.
        if (replicationRequest) {
            binfo = new DatanodeBlockInfo(v, f, DatanodeBlockInfo.UNFINALIZED, false, inlineChecksum,
                    checksumType, bytesPerChecksum, false, 0);
        } else {
            binfo = new DatanodeBlockInfo(v, f, DatanodeBlockInfo.UNFINALIZED, true, inlineChecksum,
                    checksumType, bytesPerChecksum, false, 0);
        }
        if (newBlock != null && newBlock != b) {
            volumeMap.remove(namespaceId, b);
        }

        volumeMap.add(namespaceId, targetBlock, binfo);
        volumeMap.addOngoingCreates(namespaceId, targetBlock,
                new ActiveFile(binfo, threads, expectedFileSize, datanode.updateBlockCrcWhenWrite));

    } finally {
        lock.writeLock().unlock();
    }

    try {
        if (threads != null) {
            for (Thread thread : threads) {
                thread.join();
            }
        }
    } catch (InterruptedException e) {
        throw new IOException("Recovery waiting for thread interrupted.");
    }

    //
    // Finally, allow a writer to the block file
    // REMIND - mjc - make this a filter stream that enforces a max
    // block size, so clients can't go crazy
    //
    if (DataNode.LOG.isDebugEnabled()) {
        DataNode.LOG.debug("writeTo blockfile is " + f + " of size " + f.length());
    }
    if (inlineChecksum) {
        return new BlockInlineChecksumWriter(binfo.getBlockDataFile(), checksumType, bytesPerChecksum,
                datanode.writePacketSize);
    } else {
        File metafile = BlockWithChecksumFileWriter.getMetaFile(f, targetBlock);
        if (DataNode.LOG.isDebugEnabled()) {
            DataNode.LOG.debug("writeTo metafile is " + metafile + " of size " + metafile.length());
        }
        return new BlockWithChecksumFileWriter(binfo.getBlockDataFile(), metafile);
    }
}

From source file:edu.harvard.i2b2.workplace.ws.WorkplaceService.java

private OMElement execute(RequestHandler handler, long waitTime) throws I2B2Exception {
    //do workplace processing inside thread, so that  
    // service could send back message with timeout error.  
    log.debug("In execute");

    OMElement returnElement = null;/* w ww  .j av a  2  s .  com*/

    String unknownErrorMessage = "Error message delivered from the remote server \n"
            + "You may wish to retry your last action";

    ExecutorRunnable er = new ExecutorRunnable();

    er.setRequestHandler(handler);

    Thread t = new Thread(er);
    String workplaceDataResponse = null;

    synchronized (t) {
        t.start();

        //              try {
        //                 if (waitTime > 0) {
        //                    t.wait(waitTime);
        //                 } else {
        //                    t.wait();
        //                 }

        try {
            long startTime = System.currentTimeMillis();
            long deltaTime = -1;
            while ((er.isJobCompleteFlag() == false) && (deltaTime < waitTime)) {
                if (waitTime > 0) {
                    t.wait(waitTime - deltaTime);
                    deltaTime = System.currentTimeMillis() - startTime;
                } else {
                    t.wait();
                }
            }

            workplaceDataResponse = er.getOutputString();

            if (workplaceDataResponse == null) {
                if (er.getJobException() != null) {
                    log.error("er.jobException is " + er.getJobException().getMessage());

                    log.info("waitTime is " + waitTime);
                    ResponseMessageType responseMsgType = MessageFactory.doBuildErrorResponse(null,
                            unknownErrorMessage);
                    workplaceDataResponse = MessageFactory.convertToXMLString(responseMsgType);

                } else if (er.isJobCompleteFlag() == false) {
                    //<result_waittime_ms>5000</result_waittime_ms>
                    String timeOuterror = "Remote server timed out \n" + "Result waittime = " + waitTime
                            + " ms elapsed,\nPlease try again";
                    log.error(timeOuterror);
                    log.debug("workplace waited " + deltaTime + "ms for "
                            + er.getRequestHandler().getClass().getName());
                    ResponseMessageType responseMsgType = MessageFactory.doBuildErrorResponse(null,
                            timeOuterror);
                    workplaceDataResponse = MessageFactory.convertToXMLString(responseMsgType);

                } else {
                    log.error("workplace  data response is null");
                    log.info("waitTime is " + waitTime);
                    log.debug("workplace waited " + deltaTime + "ms for "
                            + er.getRequestHandler().getClass().getName());
                    ResponseMessageType responseMsgType = MessageFactory.doBuildErrorResponse(null,
                            unknownErrorMessage);
                    workplaceDataResponse = MessageFactory.convertToXMLString(responseMsgType);
                }
            }
        } catch (InterruptedException e) {
            log.error(e.getMessage());
            throw new I2B2Exception("Thread error while running Workplace job ");
        } finally {
            t.interrupt();
            er = null;
            t = null;
        }
    }
    returnElement = MessageFactory.createResponseOMElementFromString(workplaceDataResponse);

    return returnElement;
}

From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java

public void prepareStopping() {
    super.prepareStopping();
    Thread[] threadsToStop = new Thread[] { querySubmitter, statusPoller, queryPurger, prepareQueryPurger };
    // Nudge the threads to stop
    for (Thread th : threadsToStop) {
        th.interrupt();
    }//ww w . j ava2s.c o m

    // Nudge executor pools to stop

    // Hard shutdown, since it doesn't matter whether waiting queries were selected, all will be
    // selected in the next restart
    waitingQueriesSelectionSvc.shutdownNow();
    // Soft shutdown, Wait for current estimate tasks
    estimatePool.shutdown();
    // shutdown launcher pool
    queryLauncherPool.shutdown();
    // Soft shutdown for result purger too. Purging shouldn't take much time.
    if (null != queryResultPurger) {
        queryResultPurger.shutdown();
    }
    // shutdown query expirer
    queryExpirer.shutdownNow();
    // Soft shutdown right now, will await termination in this method itself, since cancellation pool
    // should be terminated before query state gets persisted.
    queryCancellationPool.shutdown();

    // Join the threads.
    for (Thread th : threadsToStop) {
        try {
            log.debug("Waiting for {}", th.getName());
            th.join();
        } catch (InterruptedException e) {
            log.error("Error waiting for thread: {}", th.getName(), e);
        }
    }
    // Needs to be done before queries' states are persisted, hence doing here. Await of other
    // executor services can be done after persistence, hence they are done in #stop
    awaitTermination(queryLauncherPool);
    awaitTermination(queryCancellationPool);
}