Example usage for java.lang InterruptedException InterruptedException

List of usage examples for java.lang InterruptedException InterruptedException

Introduction

In this page you can find the example usage for java.lang InterruptedException InterruptedException.

Prototype

public InterruptedException() 

Source Link

Document

Constructs an InterruptedException with no detail message.

Usage

From source file:gov.redhawk.efs.sca.internal.cache.FileCache.java

private static long copyLarge(InputStream input, OutputStream output)
        throws IOException, InterruptedException, ExecutionException, TimeoutException {
    byte[] buffer = new byte[FileCache.DEFAULT_BUFFER_SIZE];
    long count = 0;
    int n = 0;//w ww  .  ja va  2 s . c  o m
    while (-1 != (n = FileCache.readProtected(input, buffer))) {
        output.write(buffer, 0, n);
        count += n;
        if (Thread.currentThread().isInterrupted()) {
            throw new InterruptedException();
        }
    }
    return count;
}

From source file:org.roda_project.commons_ip.model.impl.eark.EARKUtils.java

protected static void addRepresentationsToZipAndMETS(IPInterface ip, List<IPRepresentation> representations,
        Map<String, ZipEntryInfo> zipEntries, MetsWrapper mainMETSWrapper, Path buildDir)
        throws IPException, InterruptedException {
    // representations
    if (representations != null && !representations.isEmpty()) {
        if (ip instanceof SIP) {
            ((SIP) ip).notifySipBuildRepresentationsProcessingStarted(representations.size());
        }/*ww w  . java  2 s .c  om*/
        for (IPRepresentation representation : representations) {
            if (Thread.interrupted()) {
                throw new InterruptedException();
            }
            String representationId = representation.getObjectID();
            // 20160407 hsilva: not being used by Common Specification v0.13
            String representationProfile = "";
            String representationContentType = representation.getContentType().asString();

            IPHeader header = new IPHeader(IPEnums.IPStatus.NEW).setAgents(representation.getAgents());
            MetsWrapper representationMETSWrapper = EARKMETSUtils.generateMETS(representationId,
                    representation.getDescription(),
                    IPConstants.METS_REPRESENTATION_TYPE_PART_1 + ":" + representationContentType,
                    representationProfile, false, Optional.empty(), null, header);
            representationMETSWrapper.getMainDiv().setTYPE(representation.getStatus().asString());

            // representation data
            addRepresentationDataFilesToZipAndMETS(ip, zipEntries, representationMETSWrapper, representation,
                    representationId);

            // representation descriptive metadata
            addDescriptiveMetadataToZipAndMETS(zipEntries, representationMETSWrapper,
                    representation.getDescriptiveMetadata(), representationId);

            // representation preservation metadata
            addPreservationMetadataToZipAndMETS(zipEntries, representationMETSWrapper,
                    representation.getPreservationMetadata(), representationId);

            // representation other metadata
            addOtherMetadataToZipAndMETS(zipEntries, representationMETSWrapper,
                    representation.getOtherMetadata(), representationId);

            // representation schemas
            addSchemasToZipAndMETS(zipEntries, representationMETSWrapper, representation.getSchemas(),
                    representationId);

            // representation documentation
            addDocumentationToZipAndMETS(zipEntries, representationMETSWrapper,
                    representation.getDocumentation(), representationId);

            // add representation METS to Zip file and to main METS file
            EARKMETSUtils.addRepresentationMETSToZipAndToMainMETS(zipEntries, mainMETSWrapper, representationId,
                    representationMETSWrapper, IPConstants.REPRESENTATIONS_FOLDER + representationId
                            + IPConstants.ZIP_PATH_SEPARATOR + IPConstants.METS_FILE,
                    buildDir);
        }
        if (ip instanceof SIP) {
            ((SIP) ip).notifySipBuildRepresentationsProcessingEnded();
        }
    }
}

From source file:org.roda_project.commons_ip.model.impl.eark.EARKAIP.java

private void writeToPath(final Map<String, ZipEntryInfo> zipEntryInfos, final Path path, final boolean onlyMets)
        throws IPException, InterruptedException {
    try {//from  ww  w .  j a v a  2 s .c  o  m
        Files.createDirectories(path);
        for (ZipEntryInfo zipEntryInfo : zipEntryInfos.values()) {
            if (Thread.interrupted()) {
                throw new InterruptedException();
            }

            zipEntryInfo.prepareEntryforZipping();
            LOGGER.debug("Writing file {}", zipEntryInfo.getFilePath());
            final Path outputPath = Paths.get(path.toString(), zipEntryInfo.getName());
            writeFileToPath(zipEntryInfo, outputPath, onlyMets);
        }
    } catch (final IOException | NoSuchAlgorithmException e) {
        LOGGER.debug("Error in write method", e);
        throw new IPException(e.getMessage(), e);
    }
}

From source file:com.cloudbees.jenkins.plugins.gogs.GogsSCMNavigator.java

private void add(TaskListener listener, SCMSourceObserver observer, GogsRepository repo)
        throws InterruptedException {
    String name = repo.getRepositoryName();
    if (!Pattern.compile(pattern).matcher(name).matches()) {
        listener.getLogger().format("Ignoring %s%n", name);
        return;/* www .jav  a2s  .  co m*/
    }
    listener.getLogger().format("Proposing %s%n", name);
    if (Thread.interrupted()) {
        throw new InterruptedException();
    }
    SCMSourceObserver.ProjectObserver projectObserver = observer.observe(name);
    GogsSCMSource scmSource = new GogsSCMSource(null, repoOwner, name);
    scmSource.setGogsConnector(getGogsConnector());
    scmSource.setCredentialsId(credentialsId);
    scmSource.setCheckoutCredentialsId(checkoutCredentialsId);
    scmSource.setAutoRegisterHook(isAutoRegisterHooks());
    scmSource.setGogsServerUrl(gogsServerUrl);
    scmSource.setSshPort(sshPort);
    projectObserver.addSource(scmSource);
    projectObserver.complete();
}

From source file:org.kairosdb.plugin.rabbitmq.core.RabbitmqConsumer.java

@Override
public void run() {

    try {// w w  w.  ja v a  2s  . c  om

        // Configure consumer
        readConfigurations();

        // Reads message from queue for ever (or until be killed)
        // Continue to run even even if catch exceptions in the way
        // Run only if configured correctly
        while (isAlive) {

            QueueingConsumer.Delivery delivery = null;

            try {

                delivery = consumer.nextDelivery();
                BasicProperties props = delivery.getProperties();
                String routingKey = delivery.getEnvelope().getRoutingKey();

                // Consume message
                LOGGER.debug("[KRMQ] ");
                LOGGER.debug("[KRMQ] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>");
                LOGGER.debug("[KRMQ] Consuming new message.");

                consumeMessage(routingKey, delivery.getBody(), props);

                // Acknowledges RabbitMQ server that message was processed
                // with or without a success consume
                channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);

            } catch (InterruptedException e) {
                throw new InterruptedException();
            } catch (IOException | ShutdownSignalException | ConsumerCancelledException e) {
                LOGGER.error("[KRMQ] An error occurred: ", e);
                throw new InterruptedException();
            }

            LOGGER.debug("[KRMQ] <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");

        }

    } catch (InterruptedException e) {

        isAlive = false;
        LOGGER.info("[KRMQ] Execution interrupted." + " Disconnecting from RabbitMQ broker.");

        if (connection != null) {
            try {
                connection.close();
            } catch (IOException ignore) {
                // Channel closed already
            }
        }

        Thread.currentThread().interrupt();

    }

}

From source file:org.cloudfoundry.tools.timeout.HotSwappingTimeoutProtectionStrategyTest.java

@Test
public void shouldHandleInterruptedWaitingForPollResponseConsumed() throws Exception {
    willThrow(new InterruptedException()).given(this.requestCoordinator)
            .awaitPollReponseConsumed(LONG_POLL_TIME);
    given(this.requestCoordinator.isPollResponseConsumed()).willReturn(false);
    this.strategy.handlePoll(this.request, this.response);
    verify(this.requestCoordinator).clearPollResponse();
    verify(this.response).setHeader(TimeoutProtectionHttpHeader.POLL, UUID);
    verify(this.response).setStatus(204);
}

From source file:io.s4.meter.common.EventGenerator.java

/**
 * @param docId//from w  ww. j av  a 2s . co  m
 *            unique ID for this document
 * @throws InterruptedException
 * @throws JSONException
 */
private void send(long eventID) throws InterruptedException, JSONException {

    String avgRate;
    StringBuilder docId = new StringBuilder();
    docId.append(hostname).append("-").append(driver.hashCode()).append("-").append(eventID);
    JSONObject jsonDoc = getDocument(docId.toString());
    time = System.currentTimeMillis();
    long delta = (time - startTime) - (eventPeriod * eventCount);
    if (delta < 0) {

        /*
         * Wait if we are transmitting faster than the target rate.
         */
        Thread.sleep(-delta);
    }

    Message m = new Message(s4StreamName, s4EventClassName, jsonDoc.toString());

    if (isInterrupted) {
        close();
        throw new InterruptedException();
    }

    try {
        driver.send(m);
    } catch (IOException e) {

        logger.error("Unable not send a message using the S4 driver.", e);

        avgRate = String.format("%8.2f", ((float) (eventCount * 1000) / (float) (time - startTime)));
        logger.error("Event count: " + String.format("%10d", eventCount) + " time: "
                + String.format("%8d", (time - startTime) / 1000) + " avg rate: " + avgRate + "   "
                + jsonDoc.toString() + " " + s4StreamName + " " + s4EventClassName);
    }

    if (logger.isTraceEnabled() && (eventCount % modulus) == 0) {

        if (eventCount > 0)
            avgRate = String.format("%8.2f", ((float) (eventCount * 1000) / (float) (time - startTime)));
        else
            avgRate = "--------";

        logger.trace("count: " + String.format("%10d", eventCount) + " time: "
                + String.format("%8d", (time - startTime) / 1000) + " avg rate: " + avgRate + "   "
                + jsonDoc.toString() + " " + s4StreamName + " " + s4EventClassName);
    }
    eventCount++;
}

From source file:org.roda_project.commons_ip.model.impl.eark.EARKSIP.java

private void createZipFile(Map<String, ZipEntryInfo> zipEntries, Path zipPath)
        throws IPException, InterruptedException {
    try {//from w w  w  . ja  v a 2 s  .co  m
        notifySipBuildPackagingStarted(zipEntries.size());
        ZIPUtils.zip(zipEntries, Files.newOutputStream(zipPath), this, false, true);
    } catch (ClosedByInterruptException e) {
        throw new InterruptedException();
    } catch (IOException e) {
        throw new IPException("Error generating E-ARK SIP ZIP file. Reason: " + e.getMessage(), e);
    } finally {
        notifySipBuildPackagingEnded();
    }
}

From source file:com.github.rinde.opt.localsearch.Swaps.java

static <C, T> ImmutableList<ImmutableList<T>> opt2(ImmutableList<ImmutableList<T>> schedule,
        IntList startIndices, C context, RouteEvaluator<C, T> evaluator, boolean depthFirst,
        Optional<RandomGenerator> rng, Optional<? extends ProgressListener<T>> listener)
        throws InterruptedException {

    checkArgument(schedule.size() == startIndices.size());

    final Schedule<C, T> baseSchedule = Schedule.create(context, schedule, startIndices, evaluator);

    final Object2DoubleLinkedOpenHashMap<ImmutableList<T>> routeCostCache = new Object2DoubleLinkedOpenHashMap<>(
            CACHE_SIZE);//from w  w  w.j a v  a 2  s. c o  m

    for (int i = 0; i < baseSchedule.routes.size(); i++) {
        routeCostCache.put(baseSchedule.routes.get(i), baseSchedule.objectiveValues.getDouble(i));
    }

    Schedule<C, T> bestSchedule = baseSchedule;
    boolean isImproving = true;
    while (isImproving) {
        isImproving = false;

        final Schedule<C, T> curBest = bestSchedule;
        Iterator<Swap<T>> it = swapIterator(curBest);
        if (depthFirst) {
            // randomize ordering of swaps
            final List<Swap<T>> swaps = newArrayList(it);
            Collections.shuffle(swaps, new RandomAdaptor(rng.get()));
            it = swaps.iterator();
        }

        while (it.hasNext()) {
            if (Thread.interrupted()) {
                throw new InterruptedException();
            }
            final Swap<T> swapOperation = it.next();
            final Optional<Schedule<C, T>> newSchedule = swap(curBest, swapOperation,
                    bestSchedule.objectiveValue - curBest.objectiveValue, routeCostCache);

            if (newSchedule.isPresent()) {
                isImproving = true;
                bestSchedule = newSchedule.get();

                if (listener.isPresent()) {
                    listener.get().notify(bestSchedule.routes, bestSchedule.objectiveValue);
                }
                if (depthFirst) {
                    // first improving swap is chosen as new starting point (depth
                    // first).
                    break;
                }
            }
        }
    }
    return bestSchedule.routes;
}

From source file:eu.stratosphere.nephele.io.RuntimeInputGate.java

@Override
public InputChannelResult readRecord(T target) throws IOException, InterruptedException {

    if (this.channelToReadFrom == -1) {
        if (this.isClosed()) {
            return InputChannelResult.END_OF_STREAM;
        }//from  w  w w .java 2 s  . c o  m

        if (Thread.interrupted()) {
            throw new InterruptedException();
        }

        this.channelToReadFrom = waitForAnyChannelToBecomeAvailable();
    }

    InputChannelResult result = this.getInputChannel(this.channelToReadFrom).readRecord(target);
    switch (result) {
    case INTERMEDIATE_RECORD_FROM_BUFFER: // full record and we can stay on the same channel
        return InputChannelResult.INTERMEDIATE_RECORD_FROM_BUFFER;

    case LAST_RECORD_FROM_BUFFER: // full record, but we must switch the channel afterwards
        this.channelToReadFrom = -1;
        return InputChannelResult.LAST_RECORD_FROM_BUFFER;

    case END_OF_SUPERSTEP:
        this.channelToReadFrom = -1;
        return InputChannelResult.END_OF_SUPERSTEP;

    case TASK_EVENT: // task event
        this.currentEvent = this.getInputChannel(this.channelToReadFrom).getCurrentEvent();
        this.channelToReadFrom = -1; // event always marks a unit as consumed
        return InputChannelResult.TASK_EVENT;

    case NONE: // internal event or an incomplete record that needs further chunks
        // the current unit is exhausted
        this.channelToReadFrom = -1;
        return InputChannelResult.NONE;

    case END_OF_STREAM: // channel is done
        this.channelToReadFrom = -1;
        return isClosed() ? InputChannelResult.END_OF_STREAM : InputChannelResult.NONE;

    default: // silence the compiler
        throw new RuntimeException();
    }
}