Example usage for java.lang InterruptedException InterruptedException

List of usage examples for java.lang InterruptedException InterruptedException

Introduction

In this page you can find the example usage for java.lang InterruptedException InterruptedException.

Prototype

public InterruptedException() 

Source Link

Document

Constructs an InterruptedException with no detail message.

Usage

From source file:org.pmedv.core.components.RelativeImageView.java

/**
 * Method insures that the image is loaded and not a broken reference
 *//*from   ww w .j  av  a2s .  c  o m*/
private void waitForImage() throws InterruptedException {

    int w = fImage.getWidth(this);
    int h = fImage.getHeight(this);

    while (true) {
        int flags = Toolkit.getDefaultToolkit().checkImage(fImage, w, h, this);
        if (((flags & ERROR) != 0) || ((flags & ABORT) != 0)) {
            throw new InterruptedException();
        } else if ((flags & (ALLBITS | FRAMEBITS)) != 0) {
            return;
        }
        Thread.sleep(10);
    }

}

From source file:com.yahoo.ads.pb.kafka.KafkaSimpleConsumer.java

private long getOffset(boolean earliest) throws InterruptedException {
    TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
    Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
            earliest ? kafka.api.OffsetRequest.EarliestTime() : kafka.api.OffsetRequest.LatestTime(), 1));
    OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId);
    OffsetResponse response = null;/*  www .  j av a2 s .c o  m*/
    try {
        response = consumer.getOffsetsBefore(request);
    } catch (Exception e) {
        // e could be an instance of ClosedByInterruptException as SimpleConsumer.getOffsetsBefore uses nio
        if (Thread.interrupted()) {
            logger.info("catch exception of {} with interrupted in getOffset({}) for {} - {}",
                    e.getClass().getName(), earliest, topic, partitionId);

            throw new InterruptedException();
        }

        logger.error("caught exception in getOffsetsBefore {} - {}", topic, partitionId, e);
        return -1;
    }
    if (response.hasError()) {
        logger.error("error fetching data Offset from the Broker {}. reason: {}", leaderBroker.host(),
                response.errorCode(topic, partitionId));
        return -1;
    }
    long[] offsets = response.offsets(topic, partitionId);
    return earliest ? offsets[0] : offsets[offsets.length - 1];
}

From source file:sce.ProcessExecutor.java

public String executeProcess(String[] processParameters) throws JobExecutionException {
    try {//  www.j a  v a 2 s  .c  o  m
        //Command to be executed
        CommandLine command = new CommandLine(processParameters[0]);

        String[] params = new String[processParameters.length - 1];
        for (int i = 0; i < processParameters.length - 1; i++) {
            params[i] = processParameters[i + 1];
        }

        //Adding its arguments
        command.addArguments(params);

        //set timeout in seconds
        ExecuteWatchdog watchDog = new ExecuteWatchdog(
                this.timeout == 0 ? ExecuteWatchdog.INFINITE_TIMEOUT : this.timeout * 1000);
        this.watchdog = watchDog;

        //Result Handler for executing the process in a Asynch way
        DefaultExecuteResultHandler resultHandler = new DefaultExecuteResultHandler();
        //MyResultHandler resultHandler = new MyResultHandler();

        //Using Std out for the output/error stream
        //ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
        //PumpStreamHandler streamHandler = new PumpStreamHandler(outputStream);
        //This is used to end the process when the JVM exits
        ShutdownHookProcessDestroyer processDestroyer = new ShutdownHookProcessDestroyer();

        //Our main command executor
        DefaultExecutor executor = new DefaultExecutor();

        //Setting the properties
        executor.setStreamHandler(new PumpStreamHandler(null, null));
        executor.setWatchdog(watchDog);
        //executor.setExitValue(1); // this has to be set if the java code contains System.exit(1) to avoid a FAILED status

        //Setting the working directory
        //Use of recursion along with the ls makes this a long running process
        //executor.setWorkingDirectory(new File("/home"));
        executor.setProcessDestroyer(processDestroyer);

        //if set, use the java environment variables when running the command
        if (!this.environment.equals("")) {
            Map<String, String> procEnv = EnvironmentUtils.getProcEnvironment();
            EnvironmentUtils.addVariableToEnvironment(procEnv, this.environment);
            //Executing the command
            executor.execute(command, procEnv, resultHandler);
        } else {
            //Executing the command
            executor.execute(command, resultHandler);
        }

        //The below section depends on your need
        //Anything after this will be executed only when the command completes the execution
        resultHandler.waitFor();

        /*int exitValue = resultHandler.getExitValue();
         System.out.println(exitValue);
         if (executor.isFailure(exitValue)) {
         System.out.println("Execution failed");
         } else {
         System.out.println("Execution Successful");
         }
         System.out.println(outputStream.toString());*/
        //return outputStream.toString();
        if (watchdog.killedProcess()) {
            throw new JobExecutionException("Job Interrupted", new InterruptedException());
        }
        if (executor.isFailure(resultHandler.getExitValue())) {
            ExecuteException ex = resultHandler.getException();
            throw new JobExecutionException(ex.getMessage(), ex);
        }
        return "1";
    } catch (ExecuteException ex) {
        throw new JobExecutionException(ex.getMessage(), ex);
    } catch (IOException | InterruptedException | JobExecutionException ex) {
        throw new JobExecutionException(ex.getMessage(), ex);
    }
}

From source file:eu.stratosphere.nephele.execution.RuntimeEnvironment.java

@Override
public void run() {
    if (invokable == null) {
        LOG.fatal("ExecutionEnvironment has no Invokable set");
    }//from  w ww .  ja  va  2  s .c  o  m

    // Now the actual program starts to run
    changeExecutionState(ExecutionState.RUNNING, null);

    // If the task has been canceled in the mean time, do not even start it
    if (this.executionObserver.isCanceled()) {
        changeExecutionState(ExecutionState.CANCELED, null);
        return;
    }

    try {
        ClassLoader cl = LibraryCacheManager.getClassLoader(jobID);
        Thread.currentThread().setContextClassLoader(cl);
        this.invokable.invoke();

        // Make sure, we enter the catch block when the task has been canceled
        if (this.executionObserver.isCanceled()) {
            throw new InterruptedException();
        }
    } catch (Throwable t) {
        if (!this.executionObserver.isCanceled()) {

            // Perform clean up when the task failed and has been not canceled by the user
            try {
                this.invokable.cancel();
            } catch (Throwable t2) {
                LOG.error(StringUtils.stringifyException(t2));
            }
        }

        // Release all resources that may currently be allocated by the individual channels
        releaseAllChannelResources();

        if (this.executionObserver.isCanceled() || t instanceof CancelTaskException) {
            changeExecutionState(ExecutionState.CANCELED, null);
        } else {
            changeExecutionState(ExecutionState.FAILED, StringUtils.stringifyException(t));
        }

        return;
    }

    // Task finished running, but there may be unconsumed output data in some of the channels
    changeExecutionState(ExecutionState.FINISHING, null);

    try {
        // If there is any unclosed input gate, close it and propagate close operation to corresponding output gate
        closeInputGates();

        // First, close all output gates to indicate no records will be emitted anymore
        requestAllOutputGatesToClose();

        // Wait until all input channels are closed
        waitForInputChannelsToBeClosed();

        // Now we wait until all output channels have written out their data and are closed
        waitForOutputChannelsToBeClosed();
    } catch (Throwable t) {

        // Release all resources that may currently be allocated by the individual channels
        releaseAllChannelResources();

        if (this.executionObserver.isCanceled() || t instanceof CancelTaskException) {
            changeExecutionState(ExecutionState.CANCELED, null);
        } else {
            changeExecutionState(ExecutionState.FAILED, StringUtils.stringifyException(t));
        }

        return;
    }

    // Release all resources that may currently be allocated by the individual channels
    releaseAllChannelResources();

    // Finally, switch execution state to FINISHED and report to job manager
    changeExecutionState(ExecutionState.FINISHED, null);
}

From source file:com.feedzai.commons.sql.abstraction.engine.AbstractDatabaseEngine.java

/**
 * Checks if the connection is available and returns it. If the connection is not available, it tries to reconnect (the number of times defined in the
 * properties with the delay there specified).
 *
 * @return The connection./*from   w  w  w  . j a v  a2 s .  com*/
 * @throws RetryLimitExceededException If the retry limit is exceeded.
 * @throws InterruptedException        If the thread is interrupted during reconnection.
 */
@Override
public synchronized Connection getConnection()
        throws RetryLimitExceededException, InterruptedException, RecoveryException {

    if (!properties.isReconnectOnLost()) {
        return conn;
    }

    int retries = 1;

    if (checkConnection(conn)) {
        return conn;
    }

    logger.debug("Connection is down.");

    // reconnect.
    while (true) {
        if (Thread.interrupted()) {
            throw new InterruptedException();
        }

        try {

            if (maximumNumberOfTries > 0) {
                if (retries == (maximumNumberOfTries / 2) || retries == (maximumNumberOfTries - 1)) {
                    logger.error("The connection to the database was lost. Remaining retries: {}",
                            (maximumNumberOfTries - retries));
                    notificationLogger.error("The connection to the database was lost. Remaining retries: {}",
                            (maximumNumberOfTries - retries));
                } else {
                    logger.debug("Retrying ({}/{}) in {} seconds...", new Object[] { retries,
                            maximumNumberOfTries, TimeUnit.MILLISECONDS.toSeconds(retryInterval) });
                }
            } else {
                logger.debug("Retry number {} in {} seconds...", retries,
                        TimeUnit.MILLISECONDS.toSeconds(retryInterval));
                if (retries % 10 == 0) {
                    notificationLogger.error("The connection to the database was lost. Retry number {} in {}",
                            retries, TimeUnit.MILLISECONDS.toSeconds(retryInterval));
                }
            }
            Thread.sleep(retryInterval);
            connect(); // this sets the new object.

            // recover state.

            try {
                recover();
            } catch (Exception e) {
                throw new RecoveryException("Error recovering from lost connection.", e);
            }

            // return it.
            return conn;
        } catch (SQLException ex) {

            logger.debug("Connection failed.");

            if (maximumNumberOfTries > 0 && retries > maximumNumberOfTries) {
                throw new RetryLimitExceededException("Maximum number of retries for a connection exceeded.",
                        ex);
            }

            retries++;
        } catch (Exception e) {
            logger.error("An unexpected error occurred.", e);
        }
    }
}

From source file:org.apache.giraph.hive.HiveGiraphRunner.java

/**
* process arguments// ww  w .  j  a  v a  2  s  . co  m
* @param args to process
* @return CommandLine instance
* @throws org.apache.commons.cli.ParseException error parsing arguments
* @throws InterruptedException interrupted
*/
private CommandLine handleCommandLine(String[] args) throws ParseException, InterruptedException {
    Options options = new Options();
    addOptions(options);
    addMoreOptions(options);

    CommandLineParser parser = new GnuParser();
    final CommandLine cmdln = parser.parse(options, args);
    if (args.length == 0 || cmdln.hasOption("help")) {
        new HelpFormatter().printHelp(getClass().getName(), options, true);
        throw new InterruptedException();
    }

    // Giraph classes
    String vertexClassStr = cmdln.getOptionValue("vertexClass");
    if (vertexClassStr != null) {
        vertexClass = findClass(vertexClassStr, Vertex.class);
    }
    if (vertexClass == null) {
        throw new IllegalArgumentException(
                "Need the Giraph " + Vertex.class.getSimpleName() + " class name (-vertexClass) to use");
    }

    String hiveToVertexClassStr = cmdln.getOptionValue("hiveToVertexClass");
    if (hiveToVertexClassStr != null) {
        if (hiveToVertexClassStr.equals("disable")) {
            hiveToVertexClass = null;
        } else {
            setHiveToVertexClass(findClass(hiveToVertexClassStr, HiveToVertex.class));
        }
    }

    String hiveToEdgeClassStr = cmdln.getOptionValue("hiveToEdgeClass");
    if (hiveToEdgeClassStr != null) {
        if (hiveToEdgeClassStr.equals("disable")) {
            hiveToEdgeClass = null;
        } else {
            setHiveToEdgeClass(findClass(hiveToEdgeClassStr, HiveToEdge.class));
        }
    }

    String vertexToHiveClassStr = cmdln.getOptionValue("vertexToHiveClass");
    if (vertexToHiveClassStr != null) {
        setVertexToHiveClass(findClass(vertexToHiveClassStr, VertexToHive.class));
    }

    if (cmdln.hasOption("skipOutput")) {
        skipOutput = true;
    }

    if (hiveToVertexClass == null && hiveToEdgeClass == null) {
        throw new IllegalArgumentException("Need at least one of Giraph " + HiveToVertex.class.getSimpleName()
                + " class name (-hiveToVertexClass) and " + HiveToEdge.class.getSimpleName()
                + " class name (-hiveToEdgeClass)");
    }
    if (vertexToHiveClass == null && !skipOutput) {
        throw new IllegalArgumentException("Need the Giraph " + VertexToHive.class.getSimpleName()
                + " class name (-vertexToHiveClass) to use");
    }
    String workersStr = cmdln.getOptionValue("workers");
    if (workersStr == null) {
        throw new IllegalArgumentException("Need to choose the number of workers (-w)");
    }

    String vertexInputTableStr = cmdln.getOptionValue("vertexInputTable");
    if (vertexInputTableStr == null && hiveToVertexClass != null) {
        throw new IllegalArgumentException("Need to set the vertex input table name (-vi)");
    }

    String edgeInputTableStr = cmdln.getOptionValue("edgeInputTable");
    if (edgeInputTableStr == null && hiveToEdgeClass != null) {
        throw new IllegalArgumentException("Need to set the edge input table name (-ei)");
    }

    String outputTableStr = cmdln.getOptionValue("outputTable");
    if (outputTableStr == null) {
        throw new IllegalArgumentException("Need to set the output table name (-o)");
    }

    String dbName = cmdln.getOptionValue("dbName", "default");
    hiveVertexInputDescription.setDbName(dbName);
    hiveEdgeInputDescription.setDbName(dbName);
    hiveOutputDescription.setDbName(dbName);

    hiveEdgeInputDescription.setPartitionFilter(cmdln.getOptionValue("edgeInputFilter"));
    hiveEdgeInputDescription.setTableName(edgeInputTableStr);

    hiveVertexInputDescription.setPartitionFilter(cmdln.getOptionValue("vertexInputFilter"));
    hiveVertexInputDescription.setTableName(vertexInputTableStr);

    hiveOutputDescription.setTableName(cmdln.getOptionValue("outputTable"));
    hiveOutputDescription.setPartitionValues(parsePartitionValues(cmdln.getOptionValue("outputPartition")));

    workers = Integer.parseInt(workersStr);

    isVerbose = cmdln.hasOption("verbose");

    // pick up -hiveconf arguments
    processHiveConfOptions(cmdln);

    processMoreArguments(cmdln);

    return cmdln;
}

From source file:org.roda_project.commons_ip.model.impl.eark.EARKUtils.java

protected static void addDocumentationToZipAndMETS(Map<String, ZipEntryInfo> zipEntries,
        MetsWrapper metsWrapper, List<IPFile> documentation, String representationId)
        throws IPException, InterruptedException {
    if (documentation != null && !documentation.isEmpty()) {
        for (IPFile doc : documentation) {
            if (Thread.interrupted()) {
                throw new InterruptedException();
            }/*from w  ww.j  a  v  a 2s . com*/

            String documentationFilePath = IPConstants.DOCUMENTATION_FOLDER
                    + ModelUtils.getFoldersFromList(doc.getRelativeFolders()) + doc.getFileName();
            FileType fileType = EARKMETSUtils.addDocumentationFileToMETS(metsWrapper, documentationFilePath,
                    doc.getPath());

            if (representationId != null) {
                documentationFilePath = IPConstants.REPRESENTATIONS_FOLDER + representationId
                        + IPConstants.ZIP_PATH_SEPARATOR + documentationFilePath;
            }
            ZIPUtils.addFileTypeFileToZip(zipEntries, doc.getPath(), documentationFilePath, fileType);
        }
    }
}

From source file:com.cloudbees.jenkins.plugins.bitbucket.client.BitbucketCloudApiClient.java

/**
 * {@inheritDoc}/*from  w ww .  j a  v a2 s.c  om*/
 */
@NonNull
@Override
public List<BitbucketPullRequestValue> getPullRequests() throws InterruptedException, IOException {
    List<BitbucketPullRequestValue> pullRequests = new ArrayList<>();

    UriTemplate template = UriTemplate.fromTemplate(REPO_URL_TEMPLATE + "/pullrequests{?page,pagelen}")
            .set("owner", owner).set("repo", repositoryName).set("pagelen", 50);

    BitbucketPullRequests page;
    int pageNumber = 1;
    do {
        if (Thread.interrupted()) {
            throw new InterruptedException();
        }
        String url = template //
                .set("page", pageNumber++) //
                .expand();
        String response = getRequest(url);
        try {
            page = JsonParser.toJava(response, BitbucketPullRequests.class);
        } catch (IOException e) {
            throw new IOException("I/O error when parsing response from URL: " + url, e);
        }
        pullRequests.addAll(page.getValues());
    } while (page.getNext() != null);

    for (BitbucketPullRequestValue pullRequest : pullRequests) {
        setupClosureForPRBranch(pullRequest);
    }

    return pullRequests;
}

From source file:com.splout.db.dnode.Fetcher.java

/**
 * In case of interrupted, written file is not deleted.
 *///from w w  w.jav  a  2 s. c om
private void copyFile(File sourceFile, File destFile, Reporter reporter)
        throws IOException, InterruptedException {
    if (!destFile.exists()) {
        destFile.createNewFile();
    }
    FileChannel source = null;
    FileChannel destination = null;

    Throttler throttler = new Throttler((double) bytesPerSecThrottle);

    FileInputStream iS = null;
    FileOutputStream oS = null;

    try {
        iS = new FileInputStream(sourceFile);
        oS = new FileOutputStream(destFile);
        source = iS.getChannel();
        destination = oS.getChannel();
        long bytesSoFar = 0;
        long reportingBytesSoFar = 0;
        long size = source.size();

        int transferred = 0;

        while (bytesSoFar < size) {
            // Needed to being able to be interrupted at any moment.
            if (Thread.interrupted()) {
                throw new InterruptedException();
            }

            // Casting to int here is safe since we will transfer at most "downloadBufferSize" bytes.
            // This is done on purpose for being able to implement Throttling.
            transferred = (int) destination.transferFrom(source, bytesSoFar, downloadBufferSize);
            bytesSoFar += transferred;
            reportingBytesSoFar += transferred;
            throttler.incrementAndThrottle(transferred);
            if (reportingBytesSoFar >= bytesToReportProgress) {
                reporter.progress(reportingBytesSoFar);
                reportingBytesSoFar = 0l;
            }
        }

        if (reporter != null) {
            reporter.progress(reportingBytesSoFar);
        }

    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        if (iS != null) {
            iS.close();
        }
        if (oS != null) {
            oS.close();
        }
        if (source != null) {
            source.close();
        }
        if (destination != null) {
            destination.close();
        }
    }
}

From source file:com.yahoo.ads.pb.kafka.KafkaSimpleConsumer.java

public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException {
    List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>();
    FetchResponse response = null;/*from   w  w w. ja  v a2  s .c  o m*/
    Broker previousLeader = leaderBroker;
    while (true) {
        ensureConsumer(previousLeader);

        FetchRequest request = new FetchRequestBuilder().clientId(clientId)
                .addFetch(topic, partitionId, offset, 100000000).maxWait(timeoutMs).minBytes(1).build();

        //logger.debug("fetch offset {}", offset);

        try {
            response = consumer.fetch(request);
        } catch (Exception e) {
            // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio
            if (Thread.interrupted()) {
                logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}",
                        e.getClass().getName(), topic, partitionId, offset);

                throw new InterruptedException();
            }
            logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e);
            response = null;
        }

        if (response == null || response.hasError()) {
            short errorCode = response != null ? response.errorCode(topic, partitionId)
                    : ErrorMapping.UnknownCode();
            logger.debug("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset,
                    errorCode);

            boolean needNewLeader = false;
            if (errorCode == ErrorMapping.RequestTimedOutCode()) {
                //TODO: leave it here
            } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
                //TODO: fetch the earliest offset or latest offset ?
                // seems no obvious correct way to handle it
                long earliestOffset = getOffset(true);
                logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId);
                if (earliestOffset < 0) {
                    needNewLeader = true;
                } else {
                    newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset));
                    offset = earliestOffset;
                    continue;
                }
            } else {
                needNewLeader = true;
            }

            if (needNewLeader) {
                stopConsumer();
                previousLeader = leaderBroker;
                leaderBroker = null;
                continue;
            }
        } else {
            break;
        }
    }

    return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset)
            : (newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS);
}