Example usage for java.util.concurrent TimeoutException TimeoutException

List of usage examples for java.util.concurrent TimeoutException TimeoutException

Introduction

In this page you can find the example usage for java.util.concurrent TimeoutException TimeoutException.

Prototype

public TimeoutException(String message) 

Source Link

Document

Constructs a TimeoutException with the specified detail message.

Usage

From source file:org.ow2.proactive.scheduler.rest.SchedulerClient.java

@Override
public Map.Entry<String, JobResult> waitForAnyJob(List<String> jobIds, long timeout)
        throws NotConnectedException, UnknownJobException, PermissionException, TimeoutException {
    timeout += currentTimeMillis();/*from  ww w  .  j  ava  2  s.  c  om*/
    while (currentTimeMillis() < timeout) {
        for (String jobId : jobIds) {
            if (isJobFinished(jobId)) {
                return toEntry(jobId, getJobResult(jobId));
            }
        }
        if (currentTimeMillis() + RETRY_INTERVAL < timeout) {
            sleep(RETRY_INTERVAL);
        } else {
            break;
        }
    }
    throw new TimeoutException(format("Timeout waiting for any job: jobIds=%s.", String.valueOf(jobIds)));
}

From source file:org.apache.tinkerpop.gremlin.server.op.traversal.TraversalOpProcessor.java

protected void handleIterator(final Context context, final Iterator itty, final Graph graph)
        throws TimeoutException, InterruptedException {
    final ChannelHandlerContext ctx = context.getChannelHandlerContext();
    final RequestMessage msg = context.getRequestMessage();
    final Settings settings = context.getSettings();
    final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get();
    final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get();
    boolean warnOnce = false;

    // we have an empty iterator - happens on stuff like: g.V().iterate()
    if (!itty.hasNext()) {
        // as there is nothing left to iterate if we are transaction managed then we should execute a
        // commit here before we send back a NO_CONTENT which implies success
        onTraversalSuccess(graph, context);
        ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create());
        return;/* w w  w  .  j  a v a2 s. co m*/
    }

    // timer for the total serialization time
    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    // the batch size can be overridden by the request
    final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE)
            .orElse(settings.resultIterationBatchSize);
    List<Object> aggregate = new ArrayList<>(resultIterationBatchSize);

    // use an external control to manage the loop as opposed to just checking hasNext() in the while.  this
    // prevent situations where auto transactions create a new transaction after calls to commit() withing
    // the loop on calls to hasNext().
    boolean hasMore = itty.hasNext();

    while (hasMore) {
        if (Thread.interrupted())
            throw new InterruptedException();

        // check if an implementation needs to force flush the aggregated results before the iteration batch
        // size is reached.
        final boolean forceFlush = isForceFlushed(ctx, msg, itty);

        // have to check the aggregate size because it is possible that the channel is not writeable (below)
        // so iterating next() if the message is not written and flushed would bump the aggregate size beyond
        // the expected resultIterationBatchSize.  Total serialization time for the response remains in
        // effect so if the client is "slow" it may simply timeout.
        //
        // there is a need to check hasNext() on the iterator because if the channel is not writeable the
        // previous pass through the while loop will have next()'d the iterator and if it is "done" then a
        // NoSuchElementException will raise its head. also need a check to ensure that this iteration doesn't
        // require a forced flush which can be forced by sub-classes.
        //
        // this could be placed inside the isWriteable() portion of the if-then below but it seems better to
        // allow iteration to continue into a batch if that is possible rather than just doing nothing at all
        // while waiting for the client to catch up
        if (aggregate.size() < resultIterationBatchSize && itty.hasNext() && !forceFlush)
            aggregate.add(itty.next());

        // send back a page of results if batch size is met or if it's the end of the results being iterated.
        // also check writeability of the channel to prevent OOME for slow clients.
        if (ctx.channel().isWritable()) {
            if (forceFlush || aggregate.size() == resultIterationBatchSize || !itty.hasNext()) {
                final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT
                        : ResponseStatusCode.SUCCESS;

                // serialize here because in sessionless requests the serialization must occur in the same
                // thread as the eval.  as eval occurs in the GremlinExecutor there's no way to get back to the
                // thread that processed the eval of the script so, we have to push serialization down into that
                Frame frame = null;
                try {
                    frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code,
                            generateMetaData(ctx, msg, code, itty));
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();

                    // exception is handled in makeFrame() - serialization error gets written back to driver
                    // at that point
                    onError(graph, context);
                    break;
                }

                try {
                    // only need to reset the aggregation list if there's more stuff to write
                    if (itty.hasNext())
                        aggregate = new ArrayList<>(resultIterationBatchSize);
                    else {
                        // iteration and serialization are both complete which means this finished successfully. note that
                        // errors internal to script eval or timeout will rollback given GremlinServer's global configurations.
                        // local errors will get rolledback below because the exceptions aren't thrown in those cases to be
                        // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if
                        // there are no more items to iterate and serialization is complete
                        onTraversalSuccess(graph, context);

                        // exit the result iteration loop as there are no more results left.  using this external control
                        // because of the above commit.  some graphs may open a new transaction on the call to
                        // hasNext()
                        hasMore = false;
                    }
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();
                    throw ex;
                }

                if (!itty.hasNext())
                    iterateComplete(ctx, msg, itty);

                // the flush is called after the commit has potentially occurred.  in this way, if a commit was
                // required then it will be 100% complete before the client receives it. the "frame" at this point
                // should have completely detached objects from the transaction (i.e. serialization has occurred)
                // so a new one should not be opened on the flush down the netty pipeline
                ctx.writeAndFlush(frame);
            }
        } else {
            // don't keep triggering this warning over and over again for the same request
            if (!warnOnce) {
                logger.warn(
                        "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up",
                        msg);
                warnOnce = true;
            }

            // since the client is lagging we can hold here for a period of time for the client to catch up.
            // this isn't blocking the IO thread - just a worker.
            TimeUnit.MILLISECONDS.sleep(10);
        }

        stopWatch.split();
        if (settings.serializedResponseTimeout > 0
                && stopWatch.getSplitTime() > settings.serializedResponseTimeout) {
            final String timeoutMsg = String.format(
                    "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s",
                    warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]"
                            : "");
            throw new TimeoutException(timeoutMsg.trim());
        }

        stopWatch.unsplit();
    }

    stopWatch.stop();
}

From source file:org.ow2.proactive.scheduler.rest.SchedulerClient.java

@Override
public Entry<String, TaskResult> waitForAnyTask(String jobId, List<String> taskNames, long timeout)
        throws UnknownJobException, NotConnectedException, PermissionException, UnknownTaskException,
        TimeoutException {/*w ww. j  a v  a  2  s.co  m*/
    timeout += currentTimeMillis();
    while (currentTimeMillis() < timeout) {
        for (String taskName : taskNames) {
            if (isTaskFinished(jobId, taskName)) {
                return toEntry(taskName, getTaskResult(jobId, taskName));
            }
        }
        if (currentTimeMillis() + RETRY_INTERVAL < timeout) {
            sleep(RETRY_INTERVAL);
        } else {
            break;
        }
    }
    throw new TimeoutException(
            format("Timeout waiting for any task: job-id=%s, task-ids=%s.", jobId, String.valueOf(taskNames)));
}

From source file:org.rhq.modules.plugins.wildfly10.BaseComponent.java

protected <R> R readAttribute(Address address, String name, Class<R> resultType, int timeoutSec)
        throws Exception {
    Operation op = new ReadAttribute(address, name);
    Result res = getASConnection().execute(op, timeoutSec);

    if (!res.isSuccess()) {
        if (res.isTimedout()) {
            throw new TimeoutException("Read attribute operation timed out");
        }/* ww w .ja v a2 s.c  o m*/
        if (res.isRolledBack() && res.getFailureDescription().startsWith("JBAS013456")) {
            throw new UnauthorizedException("Failed to read attribute [" + name + "] of address ["
                    + getAddress().getPath() + "] - response: " + res);
        }
        if (res.isRolledBack() && !res.getFailureDescription().startsWith("JBAS015135")) {
            // this means we've connected, authenticated, but still failed
            throw new ResultFailedException("Failed to read attribute [" + name + "] of address ["
                    + getAddress().getPath() + "] - response: " + res);
        }
        if (res.isRolledBack() && res.getFailureDescription().startsWith("JBAS015135")) {
            // this means we've connected, authenticated, but still failed
            throw new SecurityRealmNotReadyException("Failed to read attribute [" + name + "] of address ["
                    + getAddress().getPath() + "] - response: " + res);
        }

        throw new Exception("Failed to read attribute [" + name + "] of address [" + getAddress().getPath()
                + "] - response: " + res);
    }

    return resultType.cast(res.getResult());
}

From source file:org.cloudifysource.esc.driver.provisioning.privateEc2.PrivateEC2CloudifyDriver.java

private Instance waitRunningInstance(final Instance ec2instance, final long duration, final TimeUnit unit)
        throws CloudProvisioningException, TimeoutException {

    final long endTime = System.currentTimeMillis() + unit.toMillis(duration);

    while (System.currentTimeMillis() < endTime) {
        // Sleep before requesting the instance description
        // because we can get a AWS Error Code: InvalidInstanceID.NotFound if the request is too early.
        this.sleep();

        final DescribeInstancesRequest describeRequest = new DescribeInstancesRequest();
        describeRequest.setInstanceIds(Arrays.asList(ec2instance.getInstanceId()));
        final DescribeInstancesResult describeInstances = this.ec2.describeInstances(describeRequest);

        for (final Reservation resa : describeInstances.getReservations()) {
            for (final Instance instance : resa.getInstances()) {
                final InstanceStateType state = InstanceStateType.valueOf(instance.getState().getCode());
                if (logger.isLoggable(Level.FINER)) {
                    logger.finer("instance= " + instance.getInstanceId() + " state=" + state);
                }// w  w  w  . ja  va2s. co  m
                switch (state) {
                case PENDING:
                    break;
                case RUNNING:
                    logger.fine("running okay...");
                    return instance;
                case STOPPING:
                case SHUTTING_DOWN:
                case TERMINATED:
                case STOPPED:
                default:
                    throw new CloudProvisioningException("Failed to allocate server - Cloud reported node in "
                            + state.getName() + " state. Node details: " + ec2instance);

                }
            }
        }
    }

    throw new TimeoutException("Node failed to reach RUNNING mode in time");
}

From source file:org.openspaces.admin.internal.gsm.DefaultGridServiceManager.java

private void undeployProcessingUnitsAndWaitInternal(ProcessingUnit[] processingUnits, long timeout,
        TimeUnit timeUnit) throws TimeoutException, InterruptedException {
    long end = SystemTime.timeMillis() + timeUnit.toMillis(timeout);

    List<GridServiceContainer> containersPendingRemoval = new ArrayList<GridServiceContainer>();
    List<ProcessingUnitInstance> puInstancesPendingRemoval = new ArrayList<ProcessingUnitInstance>();
    List<SpaceInstance> spaceInstancesPendingRemoval = new ArrayList<SpaceInstance>();

    for (ProcessingUnit pu : processingUnits) {
        for (GridServiceContainer container : admin.getGridServiceContainers()) {
            ProcessingUnitInstance[] processingUnitInstances = container
                    .getProcessingUnitInstances(pu.getName());
            if (processingUnitInstances.length > 0) {
                puInstancesPendingRemoval.addAll(Arrays.asList(processingUnitInstances));
                for (ProcessingUnitInstance puInstance : processingUnitInstances) {
                    SpaceInstance spaceInstance = puInstance.getSpaceInstance();
                    if (spaceInstance != null) {
                        spaceInstancesPendingRemoval.add(spaceInstance);
                    }//from   ww  w. j  a v  a2s.  c o m
                }
                if (isManagedByElasticServiceManager(pu)) {
                    // add all containers that are managed by the elastic pu
                    containersPendingRemoval.add(container);
                }
            }
        }
    }

    final Map<String, CountDownLatch> latches = new HashMap<String, CountDownLatch>();
    for (ProcessingUnit pu : processingUnits) {
        latches.put(pu.getName(), new CountDownLatch(1));
    }

    ProcessingUnitRemovedEventListener listener = new ProcessingUnitRemovedEventListener() {
        public void processingUnitRemoved(ProcessingUnit removedPu) {
            CountDownLatch latch = latches.get(removedPu.getName());
            if (latch != null) {
                latch.countDown();
            }
        }
    };
    admin.getProcessingUnits().getProcessingUnitRemoved().add(listener);
    try {
        for (final ProcessingUnit pu : processingUnits) {
            long gsmTimeout = end - SystemTime.timeMillis();
            if (gsmTimeout < 0) {
                throw new TimeoutException("Timeout expired before udeploying processing unit " + pu);
            }
            final InternalGridServiceManager managingGsm = (InternalGridServiceManager) pu
                    .waitForManaged(gsmTimeout, TimeUnit.MILLISECONDS);
            if (managingGsm == null) {
                throw new TimeoutException(
                        "Timeout expired while waiting for GSM that manages processing unit " + pu);
            }

            admin.scheduleAdminOperation(new Runnable() {
                @Override
                public void run() {
                    managingGsm.undeployProcessingUnit(pu.getName());
                }
            });
        }
        for (ProcessingUnit pu : processingUnits) {
            long puRemovedTimeout = end - SystemTime.timeMillis();
            if (puRemovedTimeout < 0) {
                throw new TimeoutException(
                        "Timeout expired before waiting for processing unit " + pu + " to undeploy");
            }
            if (!latches.get(pu.getName()).await(puRemovedTimeout, TimeUnit.MILLISECONDS)) {
                throw new TimeoutException(
                        "Timeout expired while waiting for processing unit " + pu + " to undeploy");
            }
        }
    } finally {
        admin.getProcessingUnits().getProcessingUnitRemoved().remove(listener);
    }

    // use polling to determine elastic pu completed undeploy cleanup of containers (and machines)
    // and that the admin has been updated with the relevant lookup service remove events.
    while (true) {
        try {
            verifyUndeployComplete(processingUnits);
            verifyNotDiscovered(puInstancesPendingRemoval);
            verifyNotDiscovered(spaceInstancesPendingRemoval);
            verifyNotDiscovered(containersPendingRemoval);
            verifyInstancesNotUndeploying(puInstancesPendingRemoval);
            verifyOrphanInstancesNotDeploying(processingUnits);
            verifyOrphanInstancesNotDeployed(processingUnits);
            break;
        } catch (TimeoutException e) {
            long sleepDuration = end - SystemTime.timeMillis();
            if (sleepDuration < 0) {
                throw e;
            }
            //suppress and retry
            Thread.sleep(Math.min(1000, sleepDuration));
        }
    }
}

From source file:org.apache.bookkeeper.replication.AuditorLedgerCheckerTest.java

private void waitForAuditToComplete() throws Exception {
    long endTime = System.currentTimeMillis() + 5_000;
    while (System.currentTimeMillis() < endTime) {
        Auditor auditor = getAuditorBookiesAuditor();
        if (auditor != null) {
            Future<?> task = auditor.submitAuditTask();
            task.get(5, TimeUnit.SECONDS);
            return;
        }/*from w w  w .  j  av a 2  s  . co m*/
        Thread.sleep(100);
    }
    throw new TimeoutException("Could not find an audit within 5 seconds");
}

From source file:com.microsoft.azurebatch.jenkins.azurebatch.AzureBatchHelper.java

private void waitForAtLeastOneJobPreparationTaskCompleted(String poolId, int waitTimeoutInMin)
        throws InterruptedException, BatchErrorException, IOException, TimeoutException {
    long startTime = System.currentTimeMillis();
    long elapsedTime = 0;
    boolean completed = false;
    int lastPreparingVmCount = 0;
    int lastTotalVmCount = 0;

    // wait for at least one JobPreparationTask to complete
    while (elapsedTime < (long) waitTimeoutInMin * 60 * 1000) {
        List<JobPreparationAndReleaseTaskExecutionInformation> statusList = client.jobOperations()
                .listPreparationAndReleaseTaskStatus(jobId);
        if (statusList.size() > 0) {
            for (JobPreparationAndReleaseTaskExecutionInformation info : statusList) {
                JobPreparationTaskExecutionInformation taskInfo = info.jobPreparationTaskExecutionInfo();
                if (taskInfo != null && taskInfo.state() == JobPreparationTaskState.COMPLETED) {
                    if (taskInfo.exitCode() != 0) {
                        Logger.log(listener,
                                "Warning: JobPreparation task failed (ExitCode %d is non-zero) on VM %s.",
                                taskInfo.exitCode(), info.nodeId());
                        Logger.log(listener,
                                "Warning: One or more JobPreparation tasks failed on VM(s), no test tasks "
                                        + "will be scheduled to such VMs. Please check your VM setup script for that VM. "
                                        + "You may find more information from JobPreparation tasks' log files stdout.txt "
                                        + "and stderr.txt.");
                    }/*from  w w w . jav  a  2s  .  c o m*/

                    completed = true;
                    break;
                }
            }
        }

        if (completed) {
            break;
        }

        int totalVmCount = 0;
        int preparingVmCount = 0;
        List<ComputeNode> nodes = client.computeNodeOperations().listComputeNodes(poolId,
                new DetailLevel.Builder().withSelectClause("state").withFilterClause("state eq 'idle'")
                        .build());
        if (nodes != null) {
            totalVmCount = nodes.size();
            for (ComputeNode node : nodes) {
                if (node.state() == ComputeNodeState.IDLE) {
                    preparingVmCount++;
                }
            }
        }

        if (lastTotalVmCount != totalVmCount || lastPreparingVmCount != preparingVmCount) {
            Logger.log(listener,
                    "Waiting for at least one Azure Batch JobPreparation task to complete, %d/%d VM(s) are preparing...",
                    preparingVmCount, totalVmCount);

            lastTotalVmCount = totalVmCount;
            lastPreparingVmCount = preparingVmCount;
        }

        long nextWaitTime = 15 * 1000 - (System.currentTimeMillis() - startTime - elapsedTime);
        if (nextWaitTime > 0) {
            Thread.sleep(nextWaitTime);
        }

        elapsedTime = System.currentTimeMillis() - startTime;
    }

    if (!completed) {
        throw new TimeoutException(String.format(
                "No JobPreparationTask of job %s is completed after %d minutes.", jobId, waitTimeoutInMin));
    } else {
        Logger.log(listener,
                "At least one JobPreparationTask of job %s is completed, and tasks will be running.", jobId);
    }
}

From source file:org.openspaces.admin.internal.gsm.DefaultGridServiceManager.java

private void verifyOrphanInstancesNotDeployed(ProcessingUnit[] processingUnits) throws TimeoutException {

    ProcessingUnitInstance[] orphanProcessingUnitInstances = ((InternalAdmin) admin)
            .getOrphanProcessingUnitInstances();

    final Set<String> puNames = new HashSet<String>();
    for (ProcessingUnit pu : processingUnits) {
        puNames.add(pu.getName());//from   ww w  .  j a  v a 2 s .c  o m
    }

    for (ProcessingUnitInstance orphanInstance : orphanProcessingUnitInstances) {
        final String puName = orphanInstance.getName();
        if (puNames.contains(puName)) {
            throw new TimeoutException(
                    "Orphan instance still being deployed. " + orphanInstance.getProcessingUnitInstanceName());
        }
    }
}

From source file:org.openspaces.admin.internal.gsm.DefaultGridServiceManager.java

private void verifyOrphanInstancesNotDeploying(ProcessingUnit[] processingUnits) throws TimeoutException {
    try {// www  . j a v  a 2s  .c o  m
        for (ProcessingUnit pu : processingUnits) {
            if (gsm.isOrphanInstancesBeingProvisioned(pu.getName())) {
                throw new TimeoutException(pu.getName() + " has orphan instances still being deployed.");
            }
        }
    } catch (RemoteException e) {
        throw new AdminException("Failed to check with gsm if orphan instances are being provisioned or not",
                e);
    }
}