Example usage for java.util.concurrent TimeoutException TimeoutException

List of usage examples for java.util.concurrent TimeoutException TimeoutException

Introduction

In this page you can find the example usage for java.util.concurrent TimeoutException TimeoutException.

Prototype

public TimeoutException(String message) 

Source Link

Document

Constructs a TimeoutException with the specified detail message.

Usage

From source file:org.jenkinsci.plugins.os_ci.model.Product.java

public boolean cleanOpenstackBeforeDeployment(NexusClient nexusClient, OpenstackParameters openstackParameters,
        DeployParmeters deployParmeters) throws Exception {
    final String targetFolder = Joiner.on(File.separator).join(build.getWorkspace().getRemote(), "archive");

    OpenStackClient openStackClient = new OpenStackClient(listener, openstackParameters.getOpenstackUser(),
            openstackParameters.getOpenstackPassword(), openstackParameters.getOpenstackTenant(),
            "/v2.0/tokens", openstackParameters.getOpenstackIP(), 5000);

    //        String[] fileTypes = {"pom"};
    List<String> fileTypes = new ArrayList<String>();
    fileTypes.add("pom");
    nexusClient.downloadProductArtifacts(getArtifact(),
            Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId()), fileTypes);

    // parse pom and get its product dependencies
    MavenPom mp = new MavenPom(
            new File(Joiner.on(File.separator).join(targetFolder, artifact.getArtifactId(), "pom.xml")));
    List<ArtifactParameters> dependentProducts = mp.getPomProductDependencies();

    List<ArtifactParameters> subsystems = new ArrayList<ArtifactParameters>();
    subsystems.addAll(dependentProducts);
    subsystems.add(artifact);//from w  ww  . j  a  va2  s  . c  om

    LogUtils.logSection(listener, "Clean Openstack previous deployments:");
    // delete all stacks
    if (subsystems != null) {
        for (ArtifactParameters s : subsystems) {
            String productName = s.getArtifactId().toLowerCase().replace("-product", "");

            StackDetails stackDetails = openStackClient.getStackDetails(productName);
            if (stackDetails.getStackStatus() != StackStatus.UNDEFINED) {
                LogUtils.log(listener, "Deleting stack " + productName);
                openStackClient.deleteStack(stackDetails);
                LogUtils.log(listener, "Releasing IPs");
                openStackClient.releaseStackFloatingIPs(productName);
            }
        }
    }
    deployParmeters.setGlobalOutputs(new HashMap<String, String>());

    // verify all deletions completed - run every minute for 30 minutes.
    if (subsystems != null) {
        for (ArtifactParameters s : subsystems) {
            String productName = s.getArtifactId().toLowerCase().replace("-product", "");

            long startTime = System.currentTimeMillis();
            boolean deleteComplete = false;

            while (!deleteComplete && (System.currentTimeMillis() - startTime < DELETE_TIMEOUT)) {
                StackStatus stackStatus = openStackClient.getStackStatus(productName);
                LogUtils.log(listener, "Waiting for deletion of " + productName + ". Status:" + stackStatus);
                if (stackStatus == StackStatus.DELETE_COMPLETE || stackStatus == StackStatus.UNDEFINED) {
                    deleteComplete = true;
                } else
                    Thread.sleep(SLEEP_TIME);
            }
            if (!deleteComplete)
                throw new TimeoutException("Delete Stack -timeout exception");
        }
    }

    // Prepare parameters map which override the stack 'env'
    prepareStackOverrides(openStackClient, openstackParameters, deployParmeters);
    LogUtils.log(listener, "Get User parameters");

    deployParmeters.resetDeployCounter();
    return true;

}

From source file:com.amazonaws.services.simpleworkflow.flow.common.WorkflowExecutionUtils.java

/**
 * Waits up to specified timeout for workflow instance completion.
 * <strong>Never</strong> use in production setting as polling for worklow
 * instance status is an expensive operation.
 * //w  w w  . ja  v  a  2s  . c  om
 * @param workflowExecution
 *            result of
 *            {@link AmazonSimpleWorkflow#startWorkflowInstance(com.amazonaws.services.simpleworkflow.model.StartWorkflowInstanceRequest)}
 * @param timeoutSeconds
 *            maximum time to wait for completion. 0 means wait forever.
 * @return instance close status
 * @throws TimeoutException
 */
public static String waitForWorkflowInstanceCompletion(AmazonSimpleWorkflow service, String domain,
        WorkflowExecution workflowExecution, long timeoutSeconds)
        throws InterruptedException, TimeoutException {
    long start = System.currentTimeMillis();
    WorkflowExecutionInfo executionInfo = null;
    do {
        if (timeoutSeconds > 0 && System.currentTimeMillis() - start >= timeoutSeconds * 1000) {
            String historyDump = WorkflowExecutionUtils.prettyPrintHistory(service, domain, workflowExecution);
            throw new TimeoutException(
                    "Workflow instance is not complete after " + timeoutSeconds + " seconds: \n" + historyDump);
        }
        if (executionInfo != null) {
            Thread.sleep(1000);
        }
        executionInfo = describeWorkflowInstance(service, domain, workflowExecution);
    } while (executionInfo.getExecutionStatus().equals(ExecutionStatus.OPEN.toString()));
    return executionInfo.getCloseStatus();
}

From source file:com.mgmtp.perfload.core.console.LtConsole.java

private void sendJars() throws InterruptedException, TimeoutException {
    List<TestJar> testJars = config.getTestJars();
    if (testJars.isEmpty()) {
        while (jarLatch.getCount() > 0) {
            jarLatch.countDown();/*from  www . j  a  v a2  s  .c om*/
        }
    }

    for (Daemon daemon : daemons) {
        int daemonId = daemon.getId();
        LOG.info("Transferring jars to daemon {}", daemonId);
        Client client = clients.get(daemonId);

        // Send jars separately in order to save memory
        for (TestJar jar : testJars) {
            LOG.debug("Transferring jar file: {}", jar.getName());
            // Await sending of the message. Otherwise jars might pile up in memory causing an OOME.
            if (!client.sendMessage(new Payload(PayloadType.JAR, jar)).await(30L, TimeUnit.SECONDS)) {
                throw new TimeoutException("Timeout waiting for jar to be sent.");
            }
        }
    }
}

From source file:com.jivesoftware.os.amza.service.AmzaService.java

@Override
public void awaitOnline(PartitionName partitionName, long timeoutMillis) throws Exception {
    if (!ringStoreWriter.isMemberOfRing(partitionName.getRingName(), 0)) {
        throw new NotARingMemberException("Not a member of the ring for partition: " + partitionName);
    }//from   w  ww.  java  2s. co m

    boolean online = false;
    String errorMessage = null;
    long endAfterTimestamp = System.currentTimeMillis() + timeoutMillis;
    do {
        try {
            partitionStripeProvider.txPartition(partitionName,
                    (txPartitionStripe, highwaterStorage, versionedAquarium) -> {
                        versionedAquarium.wipeTheGlass();
                        VersionedPartitionName versionedPartitionName = versionedAquarium
                                .getVersionedPartitionName();

                        txPartitionStripe.tx((deltaIndex, stripeIndex, partitionStripe) -> {
                            partitionCreator.createStoreIfAbsent(versionedPartitionName, stripeIndex);
                            return null;
                        });

                        versionedAquarium
                                .awaitOnline(Math.max(endAfterTimestamp - System.currentTimeMillis(), 0));
                        return null;
                    });
            online = true;
            break;
        } catch (PartitionIsExpungedException e) {
            LOG.warn("Awaiting online for expunged partition {}, we will compost and retry", partitionName);
            errorMessage = e.getMessage();
            partitionComposter.compostPartitionIfNecessary(partitionName);
        } catch (PropertiesNotPresentException e) {
            errorMessage = e.getMessage();
            long timeRemaining = endAfterTimestamp - System.currentTimeMillis();
            if (timeRemaining > 0) {
                Thread.sleep(Math.min(100, Math.max(timeRemaining / 2, 10))); //TODO this is stupid
            }
        }
    } while (System.currentTimeMillis() < endAfterTimestamp);

    if (!online) {
        throw new TimeoutException(
                errorMessage != null ? errorMessage : "Timed out waiting for the partition to come online");
    }
}

From source file:org.openspaces.grid.gsm.rebalancing.RebalancingUtils.java

static FutureStatefulProcessingUnitInstance relocateProcessingUnitInstanceAsync(
        final GridServiceContainer targetContainer, final ProcessingUnitInstance puInstance, final Log logger,
        final long duration, final TimeUnit timeUnit) {

    final ProcessingUnit pu = puInstance.getProcessingUnit();
    final GridServiceContainer[] replicationSourceContainers = getReplicationSourceContainers(puInstance);
    final int instanceId = puInstance.getInstanceId();

    final AtomicReference<Throwable> relocateThrowable = new AtomicReference<Throwable>();

    final Admin admin = puInstance.getAdmin();
    final int runningNumber = puInstance.getClusterInfo().getRunningNumber();
    final String puName = puInstance.getName();

    final GridServiceContainer sourceContainer = puInstance.getGridServiceContainer();
    final Set<ProcessingUnitInstance> puInstancesFromSamePartition = getOtherInstancesFromSamePartition(
            puInstance);/*from   w ww  .j av a  2  s . com*/
    if (logger.isDebugEnabled()) {
        logger.debug(
                "Found instances from the same partition as " + RebalancingUtils.puInstanceToString(puInstance)
                        + " : " + RebalancingUtils.puInstancesToString(puInstancesFromSamePartition));
    }

    if (puInstancesFromSamePartition.size() != pu.getNumberOfBackups()) {
        // total number of instances per partition = numberOfBackups + 1
        throw new IllegalStateException("puInstancesFromSamePartition has "
                + puInstancesFromSamePartition.size() + " instances instead of " + pu.getNumberOfBackups());
    }

    final long start = System.currentTimeMillis();
    final long end = start + timeUnit.toMillis(duration);

    ((InternalAdmin) admin).scheduleAdminOperation(new Runnable() {
        public void run() {
            try {
                logger.debug("Relocation of " + RebalancingUtils.puInstanceToString(puInstance) + " to "
                        + ContainersSlaUtils.gscToString(targetContainer) + " has started.");
                puInstance.relocate(targetContainer);
            } catch (AdminException e) {
                logger.error("Admin exception " + e.getMessage(), e);
                relocateThrowable.set(e);
            } catch (Throwable e) {
                logger.error("Unexpected exception " + e.getMessage(), e);
                relocateThrowable.set(e);
            }
        }
    });

    return new FutureStatefulProcessingUnitInstance() {

        Throwable throwable;
        ProcessingUnitInstance newInstance;

        public boolean isTimedOut() {
            return System.currentTimeMillis() > end;
        }

        public boolean isDone() {

            endRelocation();

            return isTimedOut() || throwable != null || newInstance != null;
        }

        public ProcessingUnitInstance get() throws ExecutionException, IllegalStateException, TimeoutException {

            endRelocation();

            ExecutionException exception = getException();
            if (exception != null) {
                throw exception;
            }
            if (newInstance == null) {
                if (isTimedOut()) {
                    throw new TimeoutException("Relocation timeout");
                }
                throw new IllegalStateException("Async operation is not done yet.");
            }

            return newInstance;
        }

        public Date getTimestamp() {
            return new Date(start);
        }

        public ExecutionException getException() {

            endRelocation();
            if (throwable != null) {
                return new ExecutionException(throwable.getMessage(), throwable);
            }
            return null;
        }

        /**
         * populates this.exception or this.newInstance if relocation is complete
         */
        private void endRelocation() {
            boolean inProgress = true;

            tryStateChange(); // this makes relocation synchronous
            if (newInstance != null || throwable != null) {
                inProgress = false;
            }

            if (inProgress) {
                if (logger.isDebugEnabled()) {
                    logger.debug("Relocation from " + ContainersSlaUtils.gscToString(getSourceContainer())
                            + " to " + ContainersSlaUtils.gscToString(getTargetContainer())
                            + " is in progress.");
                }
                // do nothing. relocate() method running on another thread has not returned yet.
            }
        }

        private void tryStateChange() {
            ProcessingUnitInstance relocatedInstance = getRelocatedProcessingUnitInstance();
            if (relocatedInstance != null) {

                if (relocatedInstance.getGridServiceContainer().equals(targetContainer)) {
                    if (relocatedInstance.getSpaceInstance() != null
                            && relocatedInstance.getSpaceInstance().getMode() != SpaceMode.NONE) {
                        if (logger.isDebugEnabled()) {
                            logger.debug(
                                    "Relocation from " + ContainersSlaUtils.gscToString(getSourceContainer())
                                            + " to " + ContainersSlaUtils.gscToString(getTargetContainer())
                                            + " had ended successfully.");
                        }
                        newInstance = relocatedInstance;
                    }
                } else {
                    if (logger.isDebugEnabled()) {
                        logger.debug("Relocation from " + ContainersSlaUtils.gscToString(getSourceContainer())
                                + " to " + ContainersSlaUtils.gscToString(getTargetContainer())
                                + " has ended with an error.");
                    }
                    throwable = new WrongContainerProcessingUnitRelocationException(puInstance,
                            targetContainer);

                }
            }
        }

        private ProcessingUnitInstance getRelocatedProcessingUnitInstance() {
            for (GridServiceContainer container : admin.getGridServiceContainers()) {
                for (ProcessingUnitInstance instance : container.getProcessingUnitInstances(puName)) {
                    if (!instance.equals(puInstance)
                            && instance.getClusterInfo().getRunningNumber() == runningNumber
                            && !puInstancesFromSamePartition.contains(instance)) {
                        return instance;
                    }
                }
            }
            return null;
        }

        private boolean isAtLeastOneInstanceValid(Set<ProcessingUnitInstance> instances) {
            boolean isValidState = false;
            for (ProcessingUnitInstance instance : instances) {
                if (instance.isDiscovered() && instance.getGridServiceContainer().isDiscovered()) {
                    isValidState = true;
                    break;
                }
            }
            return isValidState;
        }

        public String getFailureMessage() {
            if (isTimedOut()) {
                return "relocation timeout of processing unit instance " + instanceId + " from "
                        + gscToString(sourceContainer) + " to " + gscToString(targetContainer);
            }

            if (getException() != null) {
                return getException().getMessage();
            }

            throw new IllegalStateException("Relocation has not encountered any failure.");

        }

        public GridServiceContainer getTargetContainer() {
            return targetContainer;
        }

        public ProcessingUnit getProcessingUnit() {
            return pu;
        }

        public int getInstanceId() {
            return instanceId;
        }

        public GridServiceContainer getSourceContainer() {
            return sourceContainer;
        }

        public GridServiceContainer[] getReplicaitonSourceContainers() {
            return replicationSourceContainers;
        }

    };
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutor.java

/**
 * Evaluate a script and allow for the submission of alteration to the entire evaluation execution lifecycle.
 *
 * @param script the script to evaluate/* w w w  .ja v  a2s.  c o m*/
 * @param language the language to evaluate it in
 * @param boundVars the bindings to evaluate in the context of the script
 * @param lifeCycle a set of functions that can be applied at various stages of the evaluation process
 */
public CompletableFuture<Object> eval(final String script, final String language, final Bindings boundVars,
        final LifeCycle lifeCycle) {
    final String lang = Optional.ofNullable(language).orElse("gremlin-groovy");

    logger.debug("Preparing to evaluate script - {} - in thread [{}]", script,
            Thread.currentThread().getName());

    final Bindings bindings = new SimpleBindings();
    bindings.putAll(globalBindings);
    bindings.putAll(boundVars);

    final CompletableFuture<Object> evaluationFuture = new CompletableFuture<>();
    final FutureTask<Void> f = new FutureTask<>(() -> {
        try {
            lifeCycle.getBeforeEval().orElse(beforeEval).accept(bindings);

            logger.debug("Evaluating script - {} - in thread [{}]", script, Thread.currentThread().getName());

            final Object o = scriptEngines.eval(script, bindings, lang);

            // apply a transformation before sending back the result - useful when trying to force serialization
            // in the same thread that the eval took place given ThreadLocal nature of graphs as well as some
            // transactional constraints
            final Object result = lifeCycle.getTransformResult().isPresent()
                    ? lifeCycle.getTransformResult().get().apply(o)
                    : o;

            // a mechanism for taking the final result and doing something with it in the same thread, but
            // AFTER the eval and transform are done and that future completed.  this provides a final means
            // for working with the result in the same thread as it was eval'd
            if (lifeCycle.getWithResult().isPresent())
                lifeCycle.getWithResult().get().accept(result);

            lifeCycle.getAfterSuccess().orElse(afterSuccess).accept(bindings);

            // the evaluationFuture must be completed after all processing as an exception in lifecycle events
            // that must raise as an exception to the caller who has the returned evaluationFuture. in other words,
            // if it occurs before this point, then the handle() method won't be called again if there is an
            // exception that ends up below trying to completeExceptionally()
            evaluationFuture.complete(result);
        } catch (Throwable ex) {
            final Throwable root = null == ex.getCause() ? ex : ExceptionUtils.getRootCause(ex);

            // thread interruptions will typically come as the result of a timeout, so in those cases,
            // check for that situation and convert to TimeoutException
            if (root instanceof InterruptedException)
                evaluationFuture.completeExceptionally(new TimeoutException(String.format(
                        "Script evaluation exceeded the configured 'scriptEvaluationTimeout' threshold of %s ms for request [%s]: %s",
                        scriptEvaluationTimeout, script, root.getMessage())));
            else {
                lifeCycle.getAfterFailure().orElse(afterFailure).accept(bindings, root);
                evaluationFuture.completeExceptionally(root);
            }
        }

        return null;
    });

    executorService.execute(f);

    if (scriptEvaluationTimeout > 0) {
        // Schedule a timeout in the thread pool for future execution
        final ScheduledFuture<?> sf = scheduledExecutorService.schedule(() -> {
            logger.warn("Timing out script - {} - in thread [{}]", script, Thread.currentThread().getName());
            if (!f.isDone()) {
                lifeCycle.getAfterTimeout().orElse(afterTimeout).accept(bindings);
                f.cancel(true);
            }
        }, scriptEvaluationTimeout, TimeUnit.MILLISECONDS);

        // Cancel the scheduled timeout if the eval future is complete or the script evaluation failed
        // with exception
        evaluationFuture.handleAsync((v, t) -> {
            logger.debug(
                    "Killing scheduled timeout on script evaluation as the eval completed (possibly with exception).");
            return sf.cancel(true);
        });
    }

    return evaluationFuture;
}

From source file:io.cloudslang.content.services.WSManRemoteShellService.java

/**
 * Waits for a specific command that is running on a remote shell to finnish it's execution.
 *
 * @param csHttpClient//from  w ww  . ja va 2 s. co m
 * @param httpClientInputs
 * @param shellId
 * @param commandId
 * @param wsManRequestInputs
 * @return the command execution result and exit code.
 * @throws RuntimeException
 * @throws IOException
 * @throws URISyntaxException
 * @throws TransformerException
 * @throws TimeoutException
 * @throws XPathExpressionException
 * @throws SAXException
 * @throws ParserConfigurationException
 * @throws InterruptedException
 */
private Map<String, String> receiveCommandResult(CSHttpClient csHttpClient, HttpClientInputs httpClientInputs,
        String shellId, String commandId, WSManRequestInputs wsManRequestInputs)
        throws RuntimeException, IOException, URISyntaxException, TransformerException, TimeoutException,
        XPathExpressionException, SAXException, ParserConfigurationException, InterruptedException {
    String documentStr = ResourceLoader.loadAsString(RECEIVE_REQUEST_XML);
    documentStr = createReceiveRequestBody(documentStr, httpClientInputs.getUrl(), shellId, commandId,
            String.valueOf(wsManRequestInputs.getMaxEnvelopeSize()), wsManRequestInputs.getWinrmLocale(),
            String.valueOf(wsManRequestInputs.getOperationTimeout()));
    Map<String, String> receiveResult;
    while (true) {
        receiveResult = executeRequest(csHttpClient, httpClientInputs, documentStr);
        if (executionIsTimedOut(commandExecutionStartTime, wsManRequestInputs.getOperationTimeout())) {
            throw new TimeoutException(EXECUTION_TIMED_OUT);
        } else if (WSManUtils.isSpecificResponseAction(receiveResult.get(RETURN_RESULT),
                RECEIVE_RESPONSE_ACTION)
                && WSManUtils.commandExecutionIsDone(receiveResult.get(RETURN_RESULT))) {
            return processCommandExecutionResponse(receiveResult);
        } else if (WSManUtils.isFaultResponse(receiveResult.get(RETURN_RESULT))) {
            throw new RuntimeException(WSManUtils.getResponseFault(receiveResult.get(RETURN_RESULT)));
        }

        try {
            Thread.sleep(200);
        } catch (InterruptedException e) {
            throw e;
        }
    }

}

From source file:org.xflatdb.xflat.db.XFlatDatabase.java

private void doShutdown(int timeout) throws TimeoutException {
    if (!this.state.compareAndSet(DatabaseState.Running, DatabaseState.ShuttingDown)) {
        return;//w w  w .  j  a va 2  s.  c om
    }

    if (log.isTraceEnabled())
        log.trace(String.format("Shutting down, timeout %dms", timeout));

    //by default, wait as long as it takes
    Long lTimeout = Long.MAX_VALUE;
    if (timeout > 0) {
        //wait only until the timeout
        lTimeout = System.currentTimeMillis() + timeout;
    }

    //spin them all down
    Set<EngineBase> engines = new HashSet<>();
    for (Map.Entry<String, TableMetadata> table : this.tables.entrySet()) {
        try {
            EngineBase e = table.getValue().spinDown(true, false);
            if (e != null) {
                if (e.getState() == EngineState.Running) {
                    //don't care, force spin down
                    e.spinDown(null);
                }
                engines.add(e);
            }
        } catch (Exception ex) {
            //eat
        }
    }

    //save all metadata
    for (Map.Entry<String, TableMetadata> table : this.tables.entrySet()) {
        try {
            this.metadataFactory.saveTableMetadata(table.getValue());
        } catch (IOException ex) {
            this.log.warn("Unable to save metadata for table " + table.getKey(), ex);
        }
    }

    this.tables.clear();

    //wait for the engines to finish spinning down
    do {
        Iterator<EngineBase> it = engines.iterator();
        while (it.hasNext()) {
            EngineBase e = it.next();
            EngineState state = e.getState();
            if (state == EngineState.Uninitialized || state == EngineState.SpunDown) {
                it.remove();
                continue;
            }
        }

        if (engines.isEmpty()) {
            //COOL! we're done
            return;
        }

    } while (System.currentTimeMillis() < lTimeout);

    //force any remaining tables to spin down now
    boolean anyLeft = false;
    for (EngineBase engine : engines) {
        anyLeft = true;
        try {
            if (engine != null)
                engine.forceSpinDown();
        } catch (Exception ex) {
            //eat
        }
    }

    if (anyLeft)
        throw new TimeoutException("Shutdown timed out");

    try {
        Runtime.getRuntime().removeShutdownHook(shutdownHook);
    } catch (Exception ex) {
        //that's ok
    }
}

From source file:org.cloudifysource.esc.driver.provisioning.byon.ByonProvisioningDriver.java

@Override
public MachineDetails[] startManagementMachines(final long duration, final TimeUnit unit)
        throws TimeoutException, CloudProvisioningException {
    if (duration < 0) {
        throw new TimeoutException("Starting a new machine timed out");
    }/* w w w.j  a  v  a2s.  c o m*/
    final long endTime = System.currentTimeMillis() + unit.toMillis(duration);

    logger.info("DefaultCloudProvisioning: startMachine - management == " + management);

    // first check if management already exists
    final MachineDetails[] mds = findManagementInAdmin();
    if (mds.length != 0) {
        return mds;
    }

    // launch the management machines
    publishEvent(EVENT_ATTEMPT_START_MGMT_VMS);
    final int numberOfManagementMachines = this.cloud.getProvider().getNumberOfManagementMachines();
    final MachineDetails[] createdMachines = doStartManagementMachines(endTime, numberOfManagementMachines);
    publishEvent(EVENT_MGMT_VMS_STARTED);
    return createdMachines;
}