Example usage for java.lang InterruptedException getStackTrace

List of usage examples for java.lang InterruptedException getStackTrace

Introduction

In this page you can find the example usage for java.lang InterruptedException getStackTrace.

Prototype

public StackTraceElement[] getStackTrace() 

Source Link

Document

Provides programmatic access to the stack trace information printed by #printStackTrace() .

Usage

From source file:com.github.kuben.realshopping.Shop.java

/**
 * Safely deletes a player's pager.//from   www  . ja v a 2 s .  c  o  m
 * If a pager is not present it will do nothing.
 * @param player Player who's pager is going to be removed.
 */
public static void removePager(String player) {
    Pager pg = timers.get(player);
    if (pg == null) {
        return;
    }
    pg.setStop(true);
    try {
        pg.join(5000);
    } catch (InterruptedException ex) {
        RealShopping.logsevere(ex.getStackTrace().toString());
    }
    timers.remove(player);
}

From source file:io.cloudslang.samples.HelloScore.java

private void waitForExecutionToFinish() {
    try {//from ww  w .j a v a  2 s. c om
        synchronized (lock) {
            lock.wait(10000);
        }
    } catch (InterruptedException e) {
        logger.error(e.getStackTrace());
    }
}

From source file:com.bna.ezrxlookup.integration.util.IngestSplTest.java

@Test
@Ignore // The ingestSpl will ingest records on statup now
public void loadTheSplDataFromOpenFda() {
    LOG.debug("Invoking the ingestSplData Test");
    try {//from  w w w  .jav a  2s. c  o  m
        Thread.sleep(10);
        int i = ingestSpl.ingestSplData(300);
        LOG.debug("Ingested a total of :" + i);
        LOG.debug("Total documents in SPL: " + ingestSpl.getSplCount());

        //ingestSpl.shutdownElastic();
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        LOG.error(e.getStackTrace().toString());
    }

}

From source file:gda.scan.ConcurrentScan.java

@Override
public void doCollection() throws Exception {
    try {/*from  ww  w .j a va  2s  .c om*/
        if (!this.isChild) {
            logger.info("Starting scan: '" + getName() + "' (" + getCommand() + ")");
        }
        reportDetectorsThatWillBeMovedConcurrentlyWithSomeOtherScannables();
        logger.info("Concurrency: " + reportDevicesByLevel());

        // if true, then make a note of current position of all scannables to use at the end
        if (!isChild && isReturnScannablesToOrginalPositions()) {
            recordOriginalPositions();
        }

        // *** First point in this scan ***
        setPointPositionInLine(PointPositionInLine.FIRST);
        if (getChild() == null) {
            callAtPointStartHooks();
            // move to initial movements
            currentPointCount++;
            acquirePoint(true, true); // start point, collect detectors
            checkThreadInterrupted();

            readDevicesAndPublishScanDataPoint();
            callAtPointEndHooks();
            sendScanEvent(ScanEvent.EventType.UPDATED);

            checkThreadInterrupted();
            waitIfPaused();
            if (isFinishEarlyRequested()) {
                return;
            }
        } else {
            // move the Scannable operated by this scan and then run the child scan
            ScanObject principleScanObject = this.allScanObjects.get(0);
            // There will only be one scannable moved in this parent scan, so no
            // need to sort by level!
            principleScanObject.scannable.atLevelStart();
            principleScanObject.scannable.atLevelMoveStart();
            stepId = principleScanObject.moveToStart();
            checkThreadInterrupted();
            checkAllMovesComplete();
            waitIfPaused();
            if (isFinishEarlyRequested()) {
                return;
            }
            principleScanObject.scannable.atLevelEnd();
            runChildScan();
            checkThreadInterrupted();
            // note that some scan hooks not called (atPointStart,atLevelMoveStart,atPointEnd) as this scannable is ot part of the child scan
        }

        // *** Subsequent points in this scan ***

        for (int step = 0; step < numberSteps; step++) {
            waitIfPaused();
            if (isFinishEarlyRequested()) {
                return;
            }

            setPointPositionInLine(
                    (step == (numberSteps - 1)) ? PointPositionInLine.LAST : PointPositionInLine.MIDDLE);

            if (getChild() == null) {
                callAtPointStartHooks();
                // make all these increments
                currentPointCount++;
                acquirePoint(false, true); // step point, collect detectors
                checkThreadInterrupted();
                readDevicesAndPublishScanDataPoint();
                checkThreadInterrupted();
                callAtPointEndHooks();
                sendScanEvent(ScanEvent.EventType.UPDATED);
            } else {
                ScanObject principleScanObject = this.allScanObjects.get(0);
                principleScanObject.scannable.atLevelStart();
                principleScanObject.scannable.atLevelMoveStart();
                stepId = principleScanObject.moveStep();
                checkAllMovesComplete();
                checkThreadInterrupted();
                principleScanObject.scannable.atLevelEnd();
                runChildScan();
                checkThreadInterrupted();
            }
        }
    } catch (InterruptedException e) {
        setStatus(ScanStatus.TIDYING_UP_AFTER_STOP);
        throw new ScanInterruptedException(e.getMessage(), e.getStackTrace());
    } catch (Exception e) {
        setStatus(ScanStatus.TIDYING_UP_AFTER_FAILURE);
        throw e;
    }
}

From source file:GUI.MainWindow.java

private void ImportScanScreenWindowActivated(java.awt.event.WindowEvent evt) {//GEN-FIRST:event_ImportScanScreenWindowActivated

    Object obj = ImportFile.getModel().getElementAt(0);
    if (obj != null && obj instanceof ImportFile) {
        ImportFile imFile = (ImportFile) obj;
        System.out.println("Importing File: " + imFile.getAbsolutePath());
        ProgressBar.setIndeterminate(true);

        ImportScanTask ist = new ImportScanTask(ProgressBar, imFile, ImportScanScreen);
        ist.addPropertyChangeListener(new PropertyChangeListener() {
            @Override//from  w ww .  ja  v a  2s .c  o m
            public void propertyChange(PropertyChangeEvent e) {
                if ("progress".equals(e.getPropertyName())) {
                    ProgressBar.setIndeterminate(false);
                    ProgressBar.setValue((Integer) e.getNewValue());
                    System.out.println("**: " + e.getNewValue());
                }
            }
        });
        ist.execute();

        try {
            DefaultMutableTreeNode new_root = ist.get();
            System.out.println("Import Finished");
            DefaultMutableTreeNode existing_root = (DefaultMutableTreeNode) VulnTree.getModel().getRoot();
            if (existing_root.getChildCount() == 0) {
                // The tree was empty so simply set the importe one into the model
                VulnTree.setModel(new DefaultTreeModel(new_root));
            } else {
                // The tree had existing children so we need to merge them
                VulnTree.setModel(new DefaultTreeModel(new TreeUtils().mergeTrees(existing_root, new_root)));
            }

        } catch (InterruptedException ex) {
            //Logger.getLogger(MainWindow.class.getName()).log(Level.SEVERE, null, ex);
            System.out.println(ex.getStackTrace());
        } catch (ExecutionException ex) {
            //Logger.getLogger(MainWindow.class.getName()).log(Level.SEVERE, null, ex);
            System.out.println(ex.getStackTrace());
        }

    }
}

From source file:org.apache.hadoop.fs.DelegationTokenRenewer.java

/**
 * Remove the associated renew action from the queue
 * //w ww  .  j a va  2  s.co  m
 * @throws IOException
 */
public <T extends FileSystem & Renewable> void removeRenewAction(final T fs) throws IOException {
    RenewAction<T> action = new RenewAction<T>(fs);
    if (queue.remove(action)) {
        try {
            action.cancel();
        } catch (InterruptedException ie) {
            LOG.error("Interrupted while canceling token for " + fs.getUri() + "filesystem");
            if (LOG.isDebugEnabled()) {
                LOG.debug(ie.getStackTrace());
            }
        }
    }
}

From source file:org.apache.hadoop.ipc.MiniRPCBenchmark.java

void connectToServerAndGetDelegationToken(final Configuration conf, final InetSocketAddress addr)
        throws IOException {
    MiniProtocol client = null;/*ww w  .  j  a  va2  s  .  c  o m*/
    try {
        UserGroupInformation current = UserGroupInformation.getCurrentUser();
        UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(MINI_USER, current,
                GROUP_NAMES);

        try {
            client = proxyUserUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
                @Override
                public MiniProtocol run() throws IOException {
                    MiniProtocol p = RPC.getProxy(MiniProtocol.class, MiniProtocol.versionID, addr, conf);
                    Token<TestDelegationTokenIdentifier> token;
                    token = p.getDelegationToken(new Text(RENEWER));
                    currentUgi = UserGroupInformation.createUserForTesting(MINI_USER, GROUP_NAMES);
                    SecurityUtil.setTokenService(token, addr);
                    currentUgi.addToken(token);
                    return p;
                }
            });
        } catch (InterruptedException e) {
            Assert.fail(Arrays.toString(e.getStackTrace()));
        }
    } finally {
        RPC.stopProxy(client);
    }
}

From source file:org.wso2.esb.integration.common.utils.clients.Http2Client.java

public Http2Response doGet(String url, Map<String, String> headers) {
    initChannel();//w  w w . j  ava2  s.  c o  m
    HttpScheme scheme = SSL ? HttpScheme.HTTPS : HttpScheme.HTTP;
    AsciiString hostName = new AsciiString(HOST + ':' + PORT);

    FullHttpRequest request = new DefaultFullHttpRequest(HTTP_1_1, GET, url);
    if (!headers.isEmpty()) {
        for (Map.Entry h : headers.entrySet()) {
            request.headers().add((CharSequence) h.getKey(), h.getValue());
        }
    }
    request.headers().add(HttpHeaderNames.HOST, hostName);
    request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), scheme.name());
    request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.GZIP);
    request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.DEFLATE);
    io.netty.channel.ChannelPromise p;
    int s = StreamId;
    if (responseHandler == null) {
        log.error("Response handler is null");
        return null;
    } else if (channel == null) {
        log.error("Channel is null");
        return null;
    } else {
        responseHandler.put(StreamId, channel.writeAndFlush(request), p = channel.newPromise());
        StreamId += 2;
        Http2Response response;
        try {
            while (!p.isSuccess()) {
                log.info("Waiting for response");
                Thread.sleep(20);
            }
            response = responseHandler.getResponse(s);
        } catch (InterruptedException e) {
            response = null;
            log.error(e.getStackTrace());
        }
        return response;
    }

}

From source file:org.wso2.esb.integration.common.utils.clients.Http2Client.java

public Http2Response doPost(String url, String data, Map<String, String> headers) {

    initChannel();/*from ww w.ja  v a2  s. c  o m*/
    HttpScheme scheme = SSL ? HttpScheme.HTTPS : HttpScheme.HTTP;
    AsciiString hostName = new AsciiString(HOST + ':' + PORT);

    FullHttpRequest request = new DefaultFullHttpRequest(HTTP_1_1, POST, url,
            Unpooled.copiedBuffer(data.getBytes()));

    if (!headers.isEmpty()) {
        for (Map.Entry h : headers.entrySet()) {
            request.headers().add((CharSequence) h.getKey(), h.getValue());
        }
    }
    request.headers().add(HttpHeaderNames.HOST, hostName);
    request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), scheme.name());
    request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.GZIP);
    request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.DEFLATE);
    io.netty.channel.ChannelPromise p;
    int s = StreamId;
    responseHandler.put(StreamId, channel.writeAndFlush(request), p = channel.newPromise());
    StreamId += 2;
    Http2Response response;
    try {
        while (!p.isSuccess()) {
            log.info("Waiting for response");
            Thread.sleep(20);
        }
        response = responseHandler.getResponse(s);
    } catch (InterruptedException e) {
        response = null;
        log.error(e.getStackTrace());
    }
    return response;
}

From source file:voldemort.store.readonly.fetcher.HdfsFetcher.java

public File fetch(String sourceFileUrl, String destinationFile, String hadoopConfigPath) throws IOException {
    if (this.globalThrottleLimit != null) {
        if (this.globalThrottleLimit.getSpeculativeRate() < this.minBytesPerSecond)
            throw new VoldemortException("Too many push jobs.");
        this.globalThrottleLimit.incrementNumJobs();
    }/*from   ww  w . j  a  v a 2  s . com*/

    ObjectName jmxName = null;
    try {

        final Configuration config = new Configuration();
        FileSystem fs = null;
        config.setInt("io.socket.receive.buffer", bufferSize);
        config.set("hadoop.rpc.socket.factory.class.ClientProtocol", ConfigurableSocketFactory.class.getName());
        config.set("hadoop.security.group.mapping", "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");

        final Path path = new Path(sourceFileUrl);

        boolean isHftpBasedFetch = sourceFileUrl.length() > 4 && sourceFileUrl.substring(0, 4).equals("hftp");
        logger.info("URL : " + sourceFileUrl + " and hftp protocol enabled = " + isHftpBasedFetch);
        logger.info("Hadoop path = " + hadoopConfigPath + " , keytab path = " + HdfsFetcher.keytabPath
                + " , kerberos principal = " + HdfsFetcher.kerberosPrincipal);

        if (hadoopConfigPath.length() > 0 && !isHftpBasedFetch) {

            config.addResource(new Path(hadoopConfigPath + "/core-site.xml"));
            config.addResource(new Path(hadoopConfigPath + "/hdfs-site.xml"));

            String security = config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);

            if (security == null || !security.equals("kerberos")) {
                logger.error("Security isn't turned on in the conf: "
                        + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION + " = "
                        + config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION));
                logger.error("Please make sure that the Hadoop config directory path is valid.");
                throw new VoldemortException(
                        "Error in getting Hadoop filesystem. Invalid Hadoop config directory path.");
            } else {
                logger.info("Security is turned on in the conf. Trying to authenticate ...");

            }
        }

        if (HdfsFetcher.keytabPath.length() > 0 && !isHftpBasedFetch) {

            /*
             * We're seeing intermittent errors while trying to get the
             * Hadoop filesystem in a privileged doAs block. This happens
             * when we fetch the files over hdfs or webhdfs. This retry loop
             * is inserted here as a temporary measure.
             */
            for (int attempt = 0; attempt < maxAttempts; attempt++) {
                boolean isValidFilesystem = false;

                if (!new File(HdfsFetcher.keytabPath).exists()) {
                    logger.error("Invalid keytab file path. Please provide a valid keytab path");
                    throw new VoldemortException(
                            "Error in getting Hadoop filesystem. Invalid keytab file path.");
                }

                /*
                 * The Hadoop path for getting a Filesystem object in a
                 * privileged doAs block is not thread safe. This might be
                 * causing intermittent NPE exceptions. Adding a
                 * synchronized block.
                 */
                synchronized (this) {
                    /*
                     * First login using the specified principal and keytab
                     * file
                     */
                    UserGroupInformation.setConfiguration(config);
                    UserGroupInformation.loginUserFromKeytab(HdfsFetcher.kerberosPrincipal,
                            HdfsFetcher.keytabPath);

                    /*
                     * If login is successful, get the filesystem object.
                     * NOTE: Ideally we do not need a doAs block for this.
                     * Consider removing it in the future once the Hadoop
                     * jars have the corresponding patch (tracked in the
                     * Hadoop Apache project: HDFS-3367)
                     */
                    try {
                        logger.info("I've logged in and am now Doasing as "
                                + UserGroupInformation.getCurrentUser().getUserName());
                        fs = UserGroupInformation.getCurrentUser()
                                .doAs(new PrivilegedExceptionAction<FileSystem>() {

                                    @Override
                                    public FileSystem run() throws Exception {
                                        FileSystem fs = path.getFileSystem(config);
                                        return fs;
                                    }
                                });
                        isValidFilesystem = true;
                    } catch (InterruptedException e) {
                        logger.error(e.getMessage(), e);
                    } catch (Exception e) {
                        logger.error("Got an exception while getting the filesystem object: ");
                        logger.error("Exception class : " + e.getClass());
                        e.printStackTrace();
                        for (StackTraceElement et : e.getStackTrace()) {
                            logger.error(et.toString());
                        }
                    }
                }

                if (isValidFilesystem) {
                    break;
                } else if (attempt < maxAttempts - 1) {
                    logger.error(
                            "Attempt#" + attempt + " Could not get a valid Filesystem object. Trying again in "
                                    + retryDelayMs + " ms");
                    sleepForRetryDelayMs();
                }
            }
        } else {
            fs = path.getFileSystem(config);
        }

        CopyStats stats = new CopyStats(sourceFileUrl, sizeOfPath(fs, path));
        jmxName = JmxUtils.registerMbean("hdfs-copy-" + copyCount.getAndIncrement(), stats);
        File destination = new File(destinationFile);

        if (destination.exists()) {
            throw new VoldemortException(
                    "Version directory " + destination.getAbsolutePath() + " already exists");
        }

        logger.info("Starting fetch for : " + sourceFileUrl);
        boolean result = fetch(fs, path, destination, stats);
        logger.info("Completed fetch : " + sourceFileUrl);

        // Close the filesystem
        fs.close();

        if (result) {
            return destination;
        } else {
            return null;
        }
    } catch (Throwable te) {
        te.printStackTrace();
        logger.error("Error thrown while trying to get data from Hadoop filesystem", te);
        throw new VoldemortException("Error thrown while trying to get data from Hadoop filesystem : " + te);
    } finally {
        if (this.globalThrottleLimit != null) {
            this.globalThrottleLimit.decrementNumJobs();
        }
        if (jmxName != null)
            JmxUtils.unregisterMbean(jmxName);
    }
}