Example usage for java.util.concurrent ExecutionException getCause

List of usage examples for java.util.concurrent ExecutionException getCause

Introduction

In this page you can find the example usage for java.util.concurrent ExecutionException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:org.apache.hadoop.hbase.client.HTable.java

/**
 * {@inheritDoc}/* w  ww.  j av a 2s.com*/
 */
@Override
public <T extends Service, R> void coprocessorService(final Class<T> service, byte[] startKey, byte[] endKey,
        final Batch.Call<T, R> callable, final Batch.Callback<R> callback) throws ServiceException, Throwable {

    // get regions covered by the row range
    List<byte[]> keys = getStartKeysInRange(startKey, endKey);

    Map<byte[], Future<R>> futures = new TreeMap<byte[], Future<R>>(Bytes.BYTES_COMPARATOR);
    for (final byte[] r : keys) {
        final RegionCoprocessorRpcChannel channel = new RegionCoprocessorRpcChannel(connection, tableName, r);
        Future<R> future = pool.submit(new Callable<R>() {
            public R call() throws Exception {
                T instance = ProtobufUtil.newServiceStub(service, channel);
                R result = callable.call(instance);
                byte[] region = channel.getLastRegion();
                if (callback != null) {
                    callback.update(region, r, result);
                }
                return result;
            }
        });
        futures.put(r, future);
    }
    for (Map.Entry<byte[], Future<R>> e : futures.entrySet()) {
        try {
            e.getValue().get();
        } catch (ExecutionException ee) {
            LOG.warn("Error calling coprocessor service " + service.getName() + " for row "
                    + Bytes.toStringBinary(e.getKey()), ee);
            throw ee.getCause();
        } catch (InterruptedException ie) {
            throw new InterruptedIOException("Interrupted calling coprocessor service " + service.getName()
                    + " for row " + Bytes.toStringBinary(e.getKey())).initCause(ie);
        }
    }
}

From source file:org.openspaces.grid.gsm.machines.DefaultMachinesSlaEnforcementEndpoint.java

private void cleanMachinesGoingDown(AbstractMachinesSlaPolicy sla)
        throws FailedToStopMachineException, FailedToStopGridServiceAgentException {

    for (FutureStoppedMachine futureStoppedMachine : state.getMachinesGoingDown(getKey(sla))) {

        GridServiceAgent agent = futureStoppedMachine.getGridServiceAgent();
        Exception exception = null;
        try {/*from w  w w.j  av a 2s .c om*/
            if (futureStoppedMachine.isDone()) {
                futureStoppedMachine.get();

                if (agent.isDiscovered()) {
                    throw new IllegalStateException(
                            "Agent [" + agent.getUid() + "] should not be discovered at this point.");
                }
                removeFutureStoppedMachine(sla, futureStoppedMachine);
            }
        } catch (ExecutionException e) {
            // if runtime or error propagate exception "as-is"
            Throwable cause = e.getCause();
            if (cause instanceof TimeoutException || cause instanceof ElasticMachineProvisioningException
                    || cause instanceof ElasticGridServiceAgentProvisioningException
                    || cause instanceof InterruptedException) {
                // expected exception
                exception = e;
            } else {
                throw new IllegalStateException("Unexpected Exception from machine provisioning.", cause);
            }
        } catch (TimeoutException e) {
            // expected exception
            exception = e;
        }

        if (exception != null) {
            if (logger.isDebugEnabled()) {
                if (agent.isDiscovered()) {
                    logger.debug("Agent [" + agent.getUid()
                            + "] is still discovered. Another processing unit may use it if needed."
                            + "if not, another attempt to shut it down will be executed.");
                } else {
                    logger.debug("agent [" + agent.getUid()
                            + "] is not discovered. but an error happened while terminating the machine");
                }

            }
            removeFutureStoppedMachine(sla, futureStoppedMachine);
            if (exception.getCause() != null
                    && exception.getCause() instanceof ElasticGridServiceAgentProvisioningException) {
                throw new FailedToStopGridServiceAgentException(pu, agent, exception);
            }
            throw new FailedToStopMachineException(pu, agent, exception);
        }
    }
}

From source file:com.spotify.docker.client.DefaultDockerClientTest.java

@Test(expected = DockerTimeoutException.class)
public void testConnectionRequestTimeout() throws Exception {
    final int connectionPoolSize = 1;
    final int callableCount = connectionPoolSize * 100;

    final ExecutorService executor = Executors.newCachedThreadPool();
    final CompletionService completion = new ExecutorCompletionService(executor);

    // Spawn and wait on many more containers than the connection pool size.
    // This should cause a timeout once the connection pool is exhausted.

    try (final DockerClient dockerClient = DefaultDockerClient.fromEnv().connectionPoolSize(connectionPoolSize)
            .build()) {//w  w w  .  j av  a  2  s .  co m
        // Create container
        final ContainerConfig config = ContainerConfig.builder().image(BUSYBOX_LATEST)
                .cmd("sh", "-c", "while :; do sleep 1; done").build();
        final String name = randomName();
        final ContainerCreation creation = dockerClient.createContainer(config, name);
        final String id = creation.id();

        // Start the container
        dockerClient.startContainer(id);

        // Submit a bunch of waitContainer requests
        for (int i = 0; i < callableCount; i++) {
            //noinspection unchecked
            completion.submit(new Callable<ContainerExit>() {
                @Override
                public ContainerExit call() throws Exception {
                    return dockerClient.waitContainer(id);
                }
            });
        }

        // Wait for the requests to complete or throw expected exception
        for (int i = 0; i < callableCount; i++) {
            try {
                completion.take().get();
            } catch (ExecutionException e) {
                Throwables.propagateIfInstanceOf(e.getCause(), DockerTimeoutException.class);
                throw e;
            }
        }
    } finally {
        executor.shutdown();
    }
}

From source file:org.cloudifysource.rest.controllers.DeploymentsController.java

/********************************
 * Returns the last event for a specific operation.
 * // w  w  w . j a va  2 s. c  o m
 * @param operationId
 *            the operation ID.
 * @return the last event received for this operation. May be an empty set.
 * @throws Throwable
 *             in case of an error while retrieving events.
 */
@RequestMapping(value = "{operationId}/events/last", method = RequestMethod.GET)
public DeploymentEvents getLastDeploymentEvent(@PathVariable final String operationId) throws Throwable {

    EventsCacheKey key = new EventsCacheKey(operationId);
    logger.fine(EventsUtils.getThreadId() + " Received request for last event of key : " + key);
    EventsCacheValue value;
    try {
        logger.fine(EventsUtils.getThreadId() + " Retrieving events from cache for key : " + key);
        value = eventsCache.get(key);
    } catch (final ExecutionException e) {
        throw e.getCause();
    }

    // we don't want another request to modify our object during this calculation.
    synchronized (value.getMutex()) {
        eventsCache.refresh(key);
        int lastEventId = value.getLastEventIndex();
        if (lastEventId > 0) {
            lastEventId = lastEventId - 1;
        }
        // return the events. this MAY or MAY NOT be the complete set of events requested.
        // request for specific events is treated as best effort. no guarantees all events are returned.
        return EventsUtils.extractDesiredEvents(value.getEvents(), lastEventId, lastEventId);
    }
}

From source file:org.cloudifysource.rest.controllers.DeploymentsController.java

/**
 * Retrieves events based on deployment id. The deployment id may be of service or application. In the case of an
 * application deployment id, all services events will be returned.
 * /*from w  w w.  j a v a2 s .  co m*/
 * @param deploymentId
 *            The deployment id given at install time.
 * @param from
 *            The starting index.
 * @param to
 *            The finish index.
 * @return {@link org.cloudifysource.dsl.rest.response.DeploymentEvents} - The deployment events.
 * @throws Throwable
 *             Thrown in case of any error.
 */
@RequestMapping(value = "{deploymentId}/events", method = RequestMethod.GET)
public DeploymentEvents getDeploymentEvents(@PathVariable final String deploymentId,
        @RequestParam(required = false, defaultValue = "0") final int from,
        @RequestParam(required = false, defaultValue = "-1") final int to) throws Throwable {

    // limit the default number of events returned to the client.
    int actualTo = to;
    if (to == -1) {
        actualTo = from + MAX_NUMBER_OF_EVENTS;
    }

    EventsCacheKey key = new EventsCacheKey(deploymentId);
    logger.fine(EventsUtils.getThreadId() + " Received request for events [" + from + "]-[" + to + "] . key : "
            + key);
    EventsCacheValue value;
    try {
        logger.fine(EventsUtils.getThreadId() + " Retrieving events from cache for key : " + key);
        value = eventsCache.get(key);
    } catch (final ExecutionException e) {
        throw e.getCause();
    }

    // we don't want another request to modify our object during this calculation.
    synchronized (value.getMutex()) {
        if (!EventsUtils.eventsPresent(value.getEvents(), from, actualTo)) {
            // enforce time restriction on refresh operations.
            long now = System.currentTimeMillis();
            if (now - value.getLastRefreshedTimestamp() > REFRESH_INTERVAL_MILLIS) {
                logger.fine(EventsUtils.getThreadId() + " Some events are missing from cache. Refreshing...");
                // refresh the cache for this deployment.
                eventsCache.refresh(key);
            }
        } else {
            logger.fine(EventsUtils.getThreadId() + " Found all relevant events in cache.");
        }

        // return the events. this MAY or MAY NOT be the complete set of events requested.
        // request for specific events is treated as best effort. no guarantees all events are returned.
        return EventsUtils.extractDesiredEvents(value.getEvents(), from, actualTo);
    }
}

From source file:org.openspaces.grid.gsm.machines.DefaultMachinesSlaEnforcementEndpoint.java

/**
 * @param sla //from   www .  ja  v  a2 s.  c  om
 * @param discoveredAgents 
 * @return true - if removed future agent from the state
 * @throws InconsistentMachineProvisioningException 
 * @throws UnexpectedShutdownOfNewGridServiceAgentException 
 * @throws MachinesSlaEnforcementInProgressException 
 * @throws FailedToStartNewGridServiceAgentException 
 * @throws FailedToStartNewMachineException 
 * @throws ExpectedMachineWithMoreMemoryException 
 * @throws FailedMachineProvisioningException
 * @throws FailedGridServiceAgentReconnectedException 
 */
private void validateHealthyAgent(AbstractMachinesSlaPolicy sla, Collection<GridServiceAgent> discoveredAgents,
        FutureGridServiceAgent futureAgent)
        throws UnexpectedShutdownOfNewGridServiceAgentException, InconsistentMachineProvisioningException,
        FailedToStartNewGridServiceAgentException, FailedToStartNewMachineException,
        ExpectedMachineWithMoreMemoryException, FailedGridServiceAgentReconnectedException {

    final NonBlockingElasticMachineProvisioning machineProvisioning = sla.getMachineProvisioning();
    final Collection<String> usedAgentUids = state.getAllUsedAgentUids();
    final Collection<String> usedAgentUidsForPu = state.getUsedAgentUids(getKey(sla));

    StartedGridServiceAgent newStartedAgent = null;
    {
        Exception exception = null;
        try {
            newStartedAgent = futureAgent.get();
        } catch (ExecutionException e) {
            // if runtime or error propagate exception "as-is"
            Throwable cause = e.getCause();
            if (cause instanceof TimeoutException || cause instanceof ElasticMachineProvisioningException
                    || cause instanceof ElasticGridServiceAgentProvisioningException
                    || cause instanceof InterruptedException) {
                // expected exception
                exception = e;
            } else {
                throw new IllegalStateException("Unexpected Exception from machine provisioning.", e);
            }
        } catch (TimeoutException e) {
            // expected exception
            exception = e;
        }

        if (exception != null) {

            if (exception.getCause() != null
                    && exception.getCause() instanceof ElasticGridServiceAgentProvisioningException) {
                throw new FailedToStartNewGridServiceAgentException(pu, exception);
            }
            throw new FailedToStartNewMachineException(pu, exception);
        }
    }
    if (newStartedAgent == null) {
        throw new IllegalStateException(
                "Machine provisioning future is done without exception, but returned null.");
    }

    final GridServiceAgent newAgent = newStartedAgent.getAgent();
    if (newAgent == null) {
        throw new IllegalStateException(
                "Machine provisioning future is done without exception, but returned a null agent.");
    }

    GSAReservationId actualReservationId = ((InternalGridServiceAgent) newAgent).getReservationId();
    if (actualReservationId == null) {
        throw new IllegalStateException(
                "Machine provisioning future is done without exception, but returned a null reservationId from the agent");
    }
    GSAReservationId expectedReservationId = futureAgent.getReservationId();
    if (!actualReservationId.equals(expectedReservationId)) {
        throw new IllegalStateException(
                "Machine provisioning future is done without exception, but returned an agent "
                        + agentToString(newAgent) + "with the wrong reservationId: expected="
                        + expectedReservationId + " actual=" + actualReservationId);
    }

    if (!newAgent.isDiscovered()) {
        UnexpectedShutdownOfNewGridServiceAgentException unexpectedShutdownException = new UnexpectedShutdownOfNewGridServiceAgentException(
                newAgent.getMachine(), pu);
        if (logger.isWarnEnabled()) {
            logger.warn("Failed to start agent on new machine.", unexpectedShutdownException);
        }
        throw unexpectedShutdownException;
    }

    if (usedAgentUidsForPu.contains(newAgent.getUid())) {
        //some nasty bug in machine provisioning implementation 
        throw new IllegalStateException("Machine provisioning for " + pu.getName() + " "
                + "has provided the agent " + agentToString(newAgent) + " which is already in use by this PU."
                + "The machine is ignored");
    }

    if (usedAgentUids.contains(newAgent.getUid())) {
        //some nasty bug in machine provisioning implementation        
        throw new IllegalStateException("Machine provisioning for " + pu.getName() + " "
                + "has provided the agent " + agentToString(newAgent)
                + " which is already in use by another PU." + "This machine is ignored");
    }

    if (!discoveredAgents.contains(newAgent)) {

        //Handle the case of a new machine that was started with @{link {@link NonBlockingElasticMachineProvisioning#startMachinesAsync(CapacityRequirements, long, TimeUnit)}
        //but still not in the list returned by {@link NonBlockingElasticMachineProvisioning#getDiscoveredMachinesAsync(long, TimeUnit)}

        final NonBlockingElasticMachineProvisioning oldMachineProvisioning = futureAgent
                .getMachineProvisioning();
        if (
        // checking for a bug in the implementation of machineProvisioning
        oldMachineProvisioning != null && !MachinesSlaUtils.isAgentConformsToMachineProvisioningConfig(newAgent,
                oldMachineProvisioning.getConfig()) &&

        // ignore error if machine provisioning was modified
                oldMachineProvisioning == machineProvisioning) {

            throw new IllegalStateException(agentToString(newAgent)
                    + " has been started but with the wrong zone or management settings. " + "newagent.zones="
                    + newAgent.getExactZones() + " " + "oldMachineProvisioning.config.zones="
                    + oldMachineProvisioning.getConfig().getGridServiceAgentZones());
        }

        // providing a grace period for provisionedAgents to update.
        throw new InconsistentMachineProvisioningException(getProcessingUnit(), newAgent);
    }

    if (sla.isUndeploying()) {
        logger.info("Not performing memory validation on agent " + newAgent.getUid()
                + " since undeploy is in progress");
    } else { // no need to throw the exception since we don't need this machine now anyway.
        validateMemory(sla, newAgent);
    }

    // agent started successfully. check if failed machine reconnected to the network, 
    // if so - we do not need the new machine that was meant to replace it.
    final FailedGridServiceAgent failedAgent = futureAgent.getFailedGridServiceAgent();
    if (failedAgent != null) {
        final GridServiceAgent reconnectedFailedAgent = isFailedAgentDiscovered(discoveredAgents, failedAgent);
        if (reconnectedFailedAgent != null) {
            throw new FailedGridServiceAgentReconnectedException(pu, newAgent, failedAgent,
                    reconnectedFailedAgent);
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.FSHLog.java

private Span blockOnSync(final SyncFuture syncFuture) throws IOException {
    // Now we have published the ringbuffer, halt the current thread until we get an answer back.
    try {/*from   w w w  .  j  av  a  2 s .  c  om*/
        syncFuture.get();
        return syncFuture.getSpan();
    } catch (InterruptedException ie) {
        LOG.warn("Interrupted", ie);
        throw convertInterruptedExceptionToIOException(ie);
    } catch (ExecutionException e) {
        throw ensureIOException(e.getCause());
    }
}

From source file:org.languagetool.server.TextChecker.java

void checkText(AnnotatedText aText, HttpExchange httpExchange, Map<String, String> parameters,
        ErrorRequestLimiter errorRequestLimiter, String remoteAddress) throws Exception {
    checkParams(parameters);//  w ww  .  j a va  2 s . com
    long timeStart = System.currentTimeMillis();
    UserLimits limits = ServerTools.getUserLimits(parameters, config);

    // logging information
    String agent = parameters.get("useragent") != null ? parameters.get("useragent") : "-";
    Long agentId = null, userId = null;
    if (logger.isLogging()) {
        DatabaseAccess db = DatabaseAccess.getInstance();
        agentId = db.getOrCreateClientId(parameters.get("useragent"));
        userId = limits.getPremiumUid();
    }
    String referrer = httpExchange.getRequestHeaders().getFirst("Referer");
    String userAgent = httpExchange.getRequestHeaders().getFirst("User-Agent");

    if (aText.getPlainText().length() > limits.getMaxTextLength()) {
        String msg = "limit: " + limits.getMaxTextLength() + ", size: " + aText.getPlainText().length();
        logger.log(new DatabaseAccessLimitLogEntry("MaxCharacterSizeExceeded", logServerId, agentId, userId,
                msg, referrer, userAgent));
        ServerMetricsCollector.getInstance()
                .logRequestError(ServerMetricsCollector.RequestErrorType.MAX_TEXT_SIZE);
        throw new TextTooLongException(
                "Your text exceeds the limit of " + limits.getMaxTextLength() + " characters (it's "
                        + aText.getPlainText().length() + " characters). Please submit a shorter text.");
    }
    UserConfig userConfig = new UserConfig(
            limits.getPremiumUid() != null ? getUserDictWords(limits.getPremiumUid()) : Collections.emptyList(),
            new HashMap<>(), config.getMaxSpellingSuggestions());

    // NOTE: at the moment, feedback for A/B-Tests is only delivered from this client, so only run tests there
    if (agent != null && agent.equals("ltorg")) {
        userConfig.setAbTest(config.getAbTest());
    }

    //print("Check start: " + text.length() + " chars, " + langParam);
    boolean autoDetectLanguage = getLanguageAutoDetect(parameters);
    List<String> preferredVariants = getPreferredVariants(parameters);
    if (parameters.get("noopLanguages") != null && !autoDetectLanguage) {
        ServerMetricsCollector.getInstance()
                .logRequestError(ServerMetricsCollector.RequestErrorType.INVALID_REQUEST);
        throw new IllegalArgumentException(
                "You can specify 'noopLanguages' only when also using 'language=auto'");
    }
    List<String> noopLangs = parameters.get("noopLanguages") != null
            ? Arrays.asList(parameters.get("noopLanguages").split(","))
            : Collections.emptyList();
    List<String> preferredLangs = parameters.get("preferredLanguages") != null
            ? Arrays.asList(parameters.get("preferredLanguages").split(","))
            : Collections.emptyList();
    DetectedLanguage detLang = getLanguage(aText.getPlainText(), parameters, preferredVariants, noopLangs,
            preferredLangs);
    Language lang = detLang.getGivenLanguage();
    Integer count = languageCheckCounts.get(lang.getShortCodeWithCountryAndVariant());
    if (count == null) {
        count = 1;
    } else {
        count++;
    }
    //print("Starting check: " + aText.getPlainText().length() + " chars, #" + count);
    String motherTongueParam = parameters.get("motherTongue");
    Language motherTongue = motherTongueParam != null ? Languages.getLanguageForShortCode(motherTongueParam)
            : null;
    boolean useEnabledOnly = "yes".equals(parameters.get("enabledOnly"))
            || "true".equals(parameters.get("enabledOnly"));
    List<Language> altLanguages = new ArrayList<>();
    if (parameters.get("altLanguages") != null) {
        String[] altLangParams = parameters.get("altLanguages").split(",\\s*");
        for (String langCode : altLangParams) {
            Language altLang = Languages.getLanguageForShortCode(langCode);
            altLanguages.add(altLang);
            if (altLang.hasVariant() && !altLang.isVariant()) {
                ServerMetricsCollector.getInstance()
                        .logRequestError(ServerMetricsCollector.RequestErrorType.INVALID_REQUEST);
                throw new IllegalArgumentException("You specified altLanguage '" + langCode
                        + "', but for this language you need to specify a variant, e.g. 'en-GB' instead of just 'en'");
            }
        }
    }
    List<String> enabledRules = getEnabledRuleIds(parameters);

    List<String> disabledRules = getDisabledRuleIds(parameters);
    List<CategoryId> enabledCategories = getCategoryIds("enabledCategories", parameters);
    List<CategoryId> disabledCategories = getCategoryIds("disabledCategories", parameters);

    if ((disabledRules.size() > 0 || disabledCategories.size() > 0) && useEnabledOnly) {
        ServerMetricsCollector.getInstance()
                .logRequestError(ServerMetricsCollector.RequestErrorType.INVALID_REQUEST);
        throw new IllegalArgumentException(
                "You cannot specify disabled rules or categories using enabledOnly=true");
    }
    if (enabledRules.isEmpty() && enabledCategories.isEmpty() && useEnabledOnly) {
        ServerMetricsCollector.getInstance()
                .logRequestError(ServerMetricsCollector.RequestErrorType.INVALID_REQUEST);
        throw new IllegalArgumentException(
                "You must specify enabled rules or categories when using enabledOnly=true");
    }

    boolean useQuerySettings = enabledRules.size() > 0 || disabledRules.size() > 0
            || enabledCategories.size() > 0 || disabledCategories.size() > 0;
    boolean allowIncompleteResults = "true".equals(parameters.get("allowIncompleteResults"));
    boolean enableHiddenRules = "true".equals(parameters.get("enableHiddenRules"));
    JLanguageTool.Mode mode = ServerTools.getMode(parameters);
    String callback = parameters.get("callback");
    QueryParams params = new QueryParams(altLanguages, enabledRules, disabledRules, enabledCategories,
            disabledCategories, useEnabledOnly, useQuerySettings, allowIncompleteResults, enableHiddenRules,
            mode, callback);

    Long textSessionId = null;
    try {
        if (parameters.containsKey("textSessionId")) {
            String textSessionIdStr = parameters.get("textSessionId");
            if (textSessionIdStr.contains(":")) { // transitioning to new format used in chrome addon
                // format: "{random number in 0..99999}:{unix time}"
                long random, timestamp;
                int sepPos = textSessionIdStr.indexOf(':');
                random = Long.valueOf(textSessionIdStr.substring(0, sepPos));
                timestamp = Long.valueOf(textSessionIdStr.substring(sepPos + 1));
                // use random number to choose a slice in possible range of values
                // then choose position in slice by timestamp
                long maxRandom = 100000;
                long randomSegmentSize = (Long.MAX_VALUE - maxRandom) / maxRandom;
                long segmentOffset = random * randomSegmentSize;
                if (timestamp > randomSegmentSize) {
                    print(String.format("Could not transform textSessionId '%s'", textSessionIdStr));
                }
                textSessionId = segmentOffset + timestamp;
            } else {
                textSessionId = Long.valueOf(textSessionIdStr);
            }

            userConfig.setTextSessionId(textSessionId);
        }
    } catch (NumberFormatException ex) {
        print("Could not parse textSessionId '" + parameters.get("textSessionId") + "' as long: "
                + ex.getMessage());
    }
    int textSize = aText.getPlainText().length();

    List<RuleMatch> ruleMatchesSoFar = Collections.synchronizedList(new ArrayList<>());

    Future<List<RuleMatch>> future = executorService.submit(new Callable<List<RuleMatch>>() {
        @Override
        public List<RuleMatch> call() throws Exception {
            // use to fake OOM in thread for testing:
            /*if (Math.random() < 0.1) {
              throw new OutOfMemoryError();
            }*/
            return getRuleMatches(aText, lang, motherTongue, parameters, params, userConfig,
                    f -> ruleMatchesSoFar.add(f));
        }
    });
    String incompleteResultReason = null;
    List<RuleMatch> matches;
    try {
        if (limits.getMaxCheckTimeMillis() < 0) {
            matches = future.get();
        } else {
            matches = future.get(limits.getMaxCheckTimeMillis(), TimeUnit.MILLISECONDS);
        }
    } catch (ExecutionException e) {
        future.cancel(true);
        if (ExceptionUtils.getRootCause(e) instanceof ErrorRateTooHighException) {
            ServerMetricsCollector.getInstance()
                    .logRequestError(ServerMetricsCollector.RequestErrorType.TOO_MANY_ERRORS);
            logger.log(new DatabaseCheckErrorLogEntry("ErrorRateTooHigh", logServerId, agentId, userId, lang,
                    detLang.getDetectedLanguage(), textSize, "matches: " + ruleMatchesSoFar.size()));
        }
        if (params.allowIncompleteResults
                && ExceptionUtils.getRootCause(e) instanceof ErrorRateTooHighException) {
            print(e.getMessage() + " - returning " + ruleMatchesSoFar.size()
                    + " matches found so far. Detected language: " + detLang);
            matches = new ArrayList<>(ruleMatchesSoFar); // threads might still be running, so make a copy
            incompleteResultReason = "Results are incomplete: " + ExceptionUtils.getRootCause(e).getMessage();
        } else if (e.getCause() != null && e.getCause() instanceof OutOfMemoryError) {
            throw (OutOfMemoryError) e.getCause();
        } else {
            throw new RuntimeException(e.getMessage() + ", detected: " + detLang, e);
        }
    } catch (TimeoutException e) {
        boolean cancelled = future.cancel(true);
        Path loadFile = Paths.get("/proc/loadavg"); // works in Linux only(?)
        String loadInfo = loadFile.toFile().exists() ? Files.readAllLines(loadFile).toString() : "(unknown)";
        if (errorRequestLimiter != null) {
            errorRequestLimiter.logAccess(remoteAddress, httpExchange.getRequestHeaders(), parameters);
        }
        String message = "Text checking took longer than allowed maximum of " + limits.getMaxCheckTimeMillis()
                + " milliseconds (cancelled: " + cancelled + ", lang: "
                + lang.getShortCodeWithCountryAndVariant() + ", detected: " + detLang + ", #" + count + ", "
                + aText.getPlainText().length() + " characters of text" + ", mode: "
                + mode.toString().toLowerCase() + ", h: " + reqCounter.getHandleCount() + ", r: "
                + reqCounter.getRequestCount() + ", system load: " + loadInfo + ")";
        if (params.allowIncompleteResults) {
            print(message + " - returning " + ruleMatchesSoFar.size() + " matches found so far");
            matches = new ArrayList<>(ruleMatchesSoFar); // threads might still be running, so make a copy
            incompleteResultReason = "Results are incomplete: text checking took longer than allowed maximum of "
                    + String.format(Locale.ENGLISH, "%.2f", limits.getMaxCheckTimeMillis() / 1000.0)
                    + " seconds";
        } else {
            ServerMetricsCollector.getInstance()
                    .logRequestError(ServerMetricsCollector.RequestErrorType.MAX_CHECK_TIME);
            logger.log(new DatabaseCheckErrorLogEntry("MaxCheckTimeExceeded", logServerId, agentId,
                    limits.getPremiumUid(), lang, detLang.getDetectedLanguage(), textSize,
                    "load: " + loadInfo));
            throw new RuntimeException(message, e);
        }
    }

    setHeaders(httpExchange);

    List<RuleMatch> hiddenMatches = new ArrayList<>();
    if (config.getHiddenMatchesServer() != null && params.enableHiddenRules
            && config.getHiddenMatchesLanguages().contains(lang)) {
        if (config.getHiddenMatchesServerFailTimeout() > 0 && lastHiddenMatchesServerTimeout != -1
                && System.currentTimeMillis() - lastHiddenMatchesServerTimeout < config
                        .getHiddenMatchesServerFailTimeout()) {
            ServerMetricsCollector.getInstance().logHiddenServerStatus(false);
            print("Warn: Skipped querying hidden matches server at " + config.getHiddenMatchesServer()
                    + " because of recent error/timeout (timeout=" + config.getHiddenMatchesServerFailTimeout()
                    + "ms).");
        } else {
            ResultExtender resultExtender = new ResultExtender(config.getHiddenMatchesServer(),
                    config.getHiddenMatchesServerTimeout());
            try {
                long start = System.currentTimeMillis();
                List<RemoteRuleMatch> extensionMatches = resultExtender
                        .getExtensionMatches(aText.getPlainText(), parameters);
                hiddenMatches = resultExtender.getFilteredExtensionMatches(matches, extensionMatches);
                long end = System.currentTimeMillis();
                print("Hidden matches: " + extensionMatches.size() + " -> " + hiddenMatches.size() + " in "
                        + (end - start) + "ms for " + lang.getShortCodeWithCountryAndVariant());
                ServerMetricsCollector.getInstance().logHiddenServerStatus(true);
                lastHiddenMatchesServerTimeout = -1;
            } catch (Exception e) {
                ServerMetricsCollector.getInstance().logHiddenServerStatus(false);
                print("Warn: Failed to query hidden matches server at " + config.getHiddenMatchesServer() + ": "
                        + e.getClass() + ": " + e.getMessage());
                lastHiddenMatchesServerTimeout = System.currentTimeMillis();
            }
        }
    }
    int compactMode = Integer.parseInt(parameters.getOrDefault("c", "0"));
    String response = getResponse(aText, detLang, motherTongue, matches, hiddenMatches, incompleteResultReason,
            compactMode);
    if (params.callback != null) {
        // JSONP - still needed today for the special case of hosting your own on-premise LT without SSL
        // and using it from a local MS Word (not Online Word) - issue #89 in the add-in repo:
        response = params.callback + "(" + response + ");";
    }
    String messageSent = "sent";
    String languageMessage = lang.getShortCodeWithCountryAndVariant();
    try {
        httpExchange.sendResponseHeaders(HttpURLConnection.HTTP_OK, response.getBytes(ENCODING).length);
        httpExchange.getResponseBody().write(response.getBytes(ENCODING));
        ServerMetricsCollector.getInstance().logResponse(HttpURLConnection.HTTP_OK);
    } catch (IOException exception) {
        // the client is disconnected
        messageSent = "notSent: " + exception.getMessage();
    }
    if (motherTongue != null) {
        languageMessage += " (mother tongue: " + motherTongue.getShortCodeWithCountryAndVariant() + ")";
    }
    if (autoDetectLanguage) {
        languageMessage += "[auto]";
    }
    languageCheckCounts.put(lang.getShortCodeWithCountryAndVariant(), count);
    int computationTime = (int) (System.currentTimeMillis() - timeStart);
    String version = parameters.get("v") != null ? ", v:" + parameters.get("v") : "";
    print("Check done: " + aText.getPlainText().length() + " chars, " + languageMessage + ", #" + count + ", "
            + referrer + ", " + matches.size() + " matches, " + computationTime + "ms, agent:" + agent + version
            + ", " + messageSent + ", q:" + (workQueue != null ? workQueue.size() : "?") + ", h:"
            + reqCounter.getHandleCount() + ", dH:" + reqCounter.getDistinctIps() + ", m:"
            + mode.toString().toLowerCase());

    int matchCount = matches.size();
    Map<String, Integer> ruleMatchCount = new HashMap<>();
    for (RuleMatch match : matches) {
        String ruleId = match.getRule().getId();
        ruleMatchCount.put(ruleId, ruleMatchCount.getOrDefault(ruleId, 0) + 1);
    }

    ServerMetricsCollector.getInstance().logCheck(lang, computationTime, textSize, matchCount, mode, agent,
            ruleMatchCount);

    if (!config.isSkipLoggingChecks()) {
        DatabaseCheckLogEntry logEntry = new DatabaseCheckLogEntry(userId, agentId, logServerId, textSize,
                matchCount, lang, detLang.getDetectedLanguage(), computationTime, textSessionId,
                mode.toString());
        logEntry.setRuleMatches(new DatabaseRuleMatchLogEntry(
                config.isSkipLoggingRuleMatches() ? Collections.emptyMap() : ruleMatchCount));
        logger.log(logEntry);
    }
}

From source file:com.streamsets.pipeline.stage.origin.jdbc.cdc.oracle.OracleCDCSource.java

private int addRecordsToQueue(LocalDateTime commitTimestamp, String commitScn, String xid)
        throws InterruptedException {
    TransactionIdKey key = new TransactionIdKey(xid);
    int seq = 0;/*from   w  w  w  . ja  v a  2  s .  c o m*/
    bufferedRecordsLock.lock();
    HashQueue<RecordSequence> records;
    try {
        records = bufferedRecords.getOrDefault(key, EMPTY_LINKED_HASHSET);
        records.completeInserts();
        bufferedRecords.remove(key);
    } finally {
        bufferedRecordsLock.unlock();
    }
    final List<FutureWrapper> parseFutures = new ArrayList<>();
    while (!records.isEmpty()) {
        RecordSequence r = records.remove();
        if (configBean.keepOriginalQuery) {
            r.headers.put(QUERY_KEY, r.sqlString);
        }
        final Future<Record> recordFuture = parsingExecutor
                .submit(() -> generateRecord(r.sqlString, r.headers, r.opCode));
        parseFutures.add(new FutureWrapper(recordFuture, r.sqlString, r.seq));
    }
    records.close();
    LinkedList<RecordOffset> recordOffsets = new LinkedList<>();
    for (FutureWrapper recordFuture : parseFutures) {
        try {
            Record record = recordFuture.future.get();
            if (record != null) {
                final RecordOffset recordOffset = new RecordOffset(record,
                        new Offset(VERSION_UNCOMMITTED, commitTimestamp, commitScn, recordFuture.seq, xid));

                // Is this a record generated by a rollback? If it is find the previous record that matches this row id and
                // remove it from the queue.
                if (recordOffset.record.getHeader().getAttribute(ROLLBACK).equals(ONE)) {
                    String rowId = recordOffset.record.getHeader().getAttribute(ROWID_KEY);
                    Iterator<RecordOffset> reverseIter = recordOffsets.descendingIterator();
                    while (reverseIter.hasNext()) {
                        if (reverseIter.next().record.getHeader().getAttribute(ROWID_KEY).equals(rowId)) {
                            reverseIter.remove();
                            break;
                        }
                    }
                } else {
                    recordOffsets.add(recordOffset);
                }
            }
        } catch (ExecutionException e) {
            LOG.error("{}:{}", JDBC_405.getMessage(), e.getMessage(), e);
            final Throwable cause = e.getCause();
            if (cause instanceof UnparseableSQLException) {
                unparseable.offer(recordFuture.sql);
            } else {
                otherErrors.offer(new ErrorAndCause(JDBC_405, cause));
            }
        }
    }

    for (RecordOffset ro : recordOffsets) {
        try {
            seq = ro.offset.sequence;
            while (!recordQueue.offer(ro, 1, TimeUnit.SECONDS)) {
                if (getContext().isStopped()) {
                    return seq;
                }
            }
            LOG.debug(GENERATED_RECORD, ro.record, ro.record.getHeader().getAttribute(XID));
        } catch (InterruptedException ex) {
            try {
                errorRecordHandler.onError(JDBC_405, ex);
            } catch (StageException stageException) {
                addToStageExceptionsQueue(stageException);
            }
        }
    }
    return seq;
}

From source file:org.epics.archiverappliance.config.DefaultConfigService.java

@Override
public PolicyConfig computePolicyForPV(String pvName, MetaInfo metaInfo,
        UserSpecifiedSamplingParams userSpecParams) throws IOException {
    try (InputStream is = this.getPolicyText()) {
        logger.debug("Computing policy for pvName");
        HashMap<String, Object> pvInfo = new HashMap<String, Object>();
        pvInfo.put("dbrtype", metaInfo.getArchDBRTypes().toString());
        pvInfo.put("elementCount", metaInfo.getCount());
        pvInfo.put("eventRate", metaInfo.getEventRate());
        pvInfo.put("storageRate", metaInfo.getStorageRate());
        pvInfo.put("aliasName", metaInfo.getAliasName());
        if (userSpecParams != null && userSpecParams.getPolicyName() != null) {
            logger.debug("Passing user override of policy " + userSpecParams.getPolicyName()
                    + " as the dict entry policyName");
            pvInfo.put("policyName", userSpecParams.getPolicyName());
        }/* w w  w. j  a v  a  2 s  .  c o m*/
        if (userSpecParams.getControllingPV() != null) {
            pvInfo.put("controlPV", userSpecParams.getControllingPV());
        }

        HashMap<String, String> otherMetaInfo = metaInfo.getOtherMetaInfo();
        for (String otherMetaInfoKey : this.getExtraFields()) {
            if (otherMetaInfo.containsKey(otherMetaInfoKey)) {
                if (otherMetaInfoKey.equals("ADEL") || otherMetaInfoKey.equals("MDEL")) {
                    try {
                        pvInfo.put(otherMetaInfoKey, Double.parseDouble(otherMetaInfo.get(otherMetaInfoKey)));
                    } catch (Exception ex) {
                        logger.error("Exception adding MDEL and ADEL to the info", ex);
                    }
                } else {
                    pvInfo.put(otherMetaInfoKey, otherMetaInfo.get(otherMetaInfoKey));
                }
            }
        }

        if (logger.isDebugEnabled()) {
            StringBuilder buf = new StringBuilder();
            buf.append("Before computing policy for");
            buf.append(pvName);
            buf.append(" pvInfo is \n");
            for (String key : pvInfo.keySet()) {
                buf.append(key);
                buf.append("=");
                buf.append(pvInfo.get(key));
                buf.append("\n");
            }
            logger.debug(buf.toString());
        }

        try {
            // We only have one policy in the cache...
            ExecutePolicy executePolicy = theExecutionPolicy.get("ThePolicy");
            PolicyConfig policyConfig = executePolicy.computePolicyForPV(pvName, pvInfo);
            return policyConfig;
        } catch (ExecutionException e) {
            Throwable cause = e.getCause();
            logger.error("Exception executing policy for pv " + pvName, cause);
            if (cause instanceof IOException) {
                throw (IOException) cause;
            } else {
                throw new IOException(cause);
            }
        }
    }
}