Example usage for java.lang Exception addSuppressed

List of usage examples for java.lang Exception addSuppressed

Introduction

In this page you can find the example usage for java.lang Exception addSuppressed.

Prototype

public final synchronized void addSuppressed(Throwable exception) 

Source Link

Document

Appends the specified exception to the exceptions that were suppressed in order to deliver this exception.

Usage

From source file:no.digipost.api.useragreements.client.response.ResponseUtils.java

public static Optional<Exception> close(AutoCloseable... closeables) {
    Exception exception = null;
    for (AutoCloseable closeable : closeables) {
        try (AutoCloseable autoClosed = closeable) {
            continue;
        } catch (Exception e) {
            if (exception == null) {
                exception = e;// w  w  w.  j  av  a  2  s.  c  om
            } else {
                exception.addSuppressed(e);
            }
        }
    }
    return Optional.ofNullable(exception);
}

From source file:com.spotify.heroic.HeroicShell.java

static void interactive(Parameters params, CoreInterface core) throws Exception {
    log.info("Setting up interactive shell...");

    Exception e = null;/*from  w  w  w  .ja  v  a  2 s  .c o  m*/
    try {
        runInteractiveShell(core);
    } catch (final Exception inner) {
        e = inner;
    }

    log.info("Closing core bridge...");

    try {
        core.shutdown();
    } catch (final Exception inner) {
        if (e != null) {
            inner.addSuppressed(e);
        }
        e = inner;
    }
    if (e != null) {
        throw e;
    }
}

From source file:org.elasticsearch.client.RestClient.java

private static Exception addSuppressedException(Exception suppressedException, Exception currentException) {
    if (suppressedException != null) {
        currentException.addSuppressed(suppressedException);
    }//from  w  ww  .j ava  2 s  . c o m
    return currentException;
}

From source file:edu.uci.ics.asterix.metadata.feeds.FeedUtil.java

private static boolean preProcessingRequired(FeedConnectionId connectionId) {
    MetadataTransactionContext ctx = null;
    Feed feed = null;//from   www .j a v  a2s. c om
    boolean preProcessingRequired = false;
    try {
        MetadataManager.INSTANCE.acquireReadLatch();
        ctx = MetadataManager.INSTANCE.beginTransaction();
        feed = MetadataManager.INSTANCE.getFeed(ctx, connectionId.getFeedId().getDataverse(),
                connectionId.getFeedId().getFeedName());
        preProcessingRequired = feed.getAppliedFunction() != null;
        MetadataManager.INSTANCE.commitTransaction(ctx);
    } catch (Exception e) {
        if (ctx != null) {
            try {
                MetadataManager.INSTANCE.abortTransaction(ctx);
            } catch (Exception abortException) {
                e.addSuppressed(abortException);
                throw new IllegalStateException(e);
            }
        }
    } finally {
        MetadataManager.INSTANCE.releaseReadLatch();
    }
    return preProcessingRequired;
}

From source file:com.splicemachine.derby.stream.control.ControlDataSetWriter.java

@Override
public DataSet<LocatedRow> write() throws StandardException {
    SpliceOperation operation = operationContext.getOperation();
    Txn txn = null;//from w  w w .  jav  a2 s.c  o  m
    try {
        txn = SIDriver.driver().lifecycleManager().beginChildTransaction(getTxn(),
                pipelineWriter.getDestinationTable());
        pipelineWriter.setTxn(txn);
        operation.fireBeforeStatementTriggers();
        pipelineWriter.open(operation.getTriggerHandler(), operation);
        pipelineWriter.write(dataSet.values().toLocalIterator());
        ValueRow valueRow = new ValueRow(1);
        valueRow.setColumn(1, new SQLLongint(operationContext.getRecordsWritten()));
        operationContext.getActivation().getLanguageConnectionContext()
                .setRecordsImported(operationContext.getRecordsWritten());
        if (operation instanceof InsertOperation) {
            InsertOperation insertOperation = (InsertOperation) operation;
            BadRecordsRecorder brr = operationContext.getBadRecordsRecorder();
            /*
             * In Control-side execution, we have different operation contexts for each operation,
             * and all operations are held in this JVM. this means that parse errors could be present
             * at the context for the lower operation (i.e. in an import), so we need to collect those errors
             * directly.
             */
            List<SpliceOperation> ops = insertOperation.getOperationStack();
            for (SpliceOperation op : ops) {
                if (op == null || op == insertOperation || op.getOperationContext() == null)
                    continue;
                if (brr != null) {
                    brr = brr.merge(op.getOperationContext().getBadRecordsRecorder());
                } else {
                    brr = op.getOperationContext().getBadRecordsRecorder();
                }
            }
            long badRecords = (brr != null ? brr.getNumberOfBadRecords() : 0);
            operationContext.getActivation().getLanguageConnectionContext().setFailedRecords(badRecords);
            if (badRecords > 0) {
                String fileName = operationContext.getBadRecordFileName();
                operationContext.getActivation().getLanguageConnectionContext().setBadFile(fileName);
                if (insertOperation.isAboveFailThreshold(badRecords)) {
                    throw ErrorState.LANG_IMPORT_TOO_MANY_BAD_RECORDS.newException(fileName);
                }
            }
        }
        txn.commit();
        return new ControlDataSet<>(new SingletonIterator(new LocatedRow(valueRow)));
    } catch (Exception e) {
        if (txn != null) {
            try {
                txn.rollback();
            } catch (IOException e1) {
                e.addSuppressed(e1);
            }
        }
        throw Exceptions.parseException(e);
    } finally {
        try {
            if (pipelineWriter != null)
                pipelineWriter.close();
            operation.fireAfterStatementTriggers();
        } catch (Exception e) {
            throw Exceptions.parseException(e);
        }

    }
}

From source file:client.DockerSshClient.java

private <R> R withSession(final FunctionWithException<Session, R> f) throws DockerDeploymentClientException {
    Session session = null;//  ww  w . j av a2s .co  m
    try {
        session = _sshClient.startSession();
        // CHECKSTYLE.OFF: IllegalCatch - we need to catch everything, we'll rethrow it later
    } catch (final Exception e) {
        // CHECKSTYLE.ON: IllegalCatch
        throw new DockerDeploymentClientException("SSH Error", e);
    }

    final R ret;
    try {
        ret = f.accept(session);
        // CHECKSTYLE.OFF: IllegalCatch - we need to catch everything, we'll rethrow it later
    } catch (final Exception e) {
        // CHECKSTYLE.ON: IllegalCatch
        try {
            session.close();
        } catch (final TransportException | ConnectionException ignored) {
            // Don't care
            e.addSuppressed(ignored);
        }
        throw new DockerDeploymentClientException("SSH Error", e);
    }
    return ret;
}

From source file:io.druid.storage.cassandra.CassandraDataSegmentPuller.java

public com.metamx.common.FileUtils.FileCopyResult getSegmentFiles(final String key, final File outDir)
        throws SegmentLoadingException {
    log.info("Pulling index from C* at path[%s] to outDir[%s]", key, outDir);
    if (!outDir.exists()) {
        outDir.mkdirs();/*from  w  w w . ja  v  a  2  s . c  o  m*/
    }

    if (!outDir.isDirectory()) {
        throw new ISE("outDir[%s] must be a directory.", outDir);
    }

    long startTime = System.currentTimeMillis();
    final File tmpFile = new File(outDir, "index.zip");
    log.info("Pulling to temporary local cache [%s]", tmpFile.getAbsolutePath());

    final com.metamx.common.FileUtils.FileCopyResult localResult;
    try {
        localResult = RetryUtils.retry(new Callable<com.metamx.common.FileUtils.FileCopyResult>() {
            @Override
            public com.metamx.common.FileUtils.FileCopyResult call() throws Exception {
                try (OutputStream os = new FileOutputStream(tmpFile)) {
                    final ObjectMetadata meta = ChunkedStorage.newReader(indexStorage, key, os)
                            .withBatchSize(BATCH_SIZE).withConcurrencyLevel(CONCURRENCY).call();
                }
                return new com.metamx.common.FileUtils.FileCopyResult(tmpFile);
            }
        }, Predicates.<Throwable>alwaysTrue(), 10);
    } catch (Exception e) {
        throw new SegmentLoadingException(e, "Unable to copy key [%s] to file [%s]", key,
                tmpFile.getAbsolutePath());
    }
    try {
        final com.metamx.common.FileUtils.FileCopyResult result = CompressionUtils.unzip(tmpFile, outDir);
        log.info("Pull of file[%s] completed in %,d millis (%s bytes)", key,
                System.currentTimeMillis() - startTime, result.size());
        return result;
    } catch (Exception e) {
        try {
            FileUtils.deleteDirectory(outDir);
        } catch (IOException e1) {
            log.error(e1, "Error clearing segment directory [%s]", outDir.getAbsolutePath());
            e.addSuppressed(e1);
        }
        throw new SegmentLoadingException(e, e.getMessage());
    } finally {
        if (!tmpFile.delete()) {
            log.warn("Could not delete cache file at [%s]", tmpFile.getAbsolutePath());
        }
    }
}

From source file:com.appdynamics.analytics.processor.event.ElasticSearchEventService.java

public void searchEvents(int requestVersion, String accountName, String eventType,
        String searchRequest, OutputStream out)
/*      */ {/*w  w  w.j  a  va 2  s  . com*/
    /*      */ try
    /*      */ {
        /* 1159 */ eventType = this.indexNameResolver.resolveEventType(eventType);
        /* 1160 */ accountName = AccountConfiguration.normalizeAccountName(accountName);
        /*      */
        /* 1162 */ verifyEventTypeRepairingIndicesIfNecessary(requestVersion, accountName, eventType);
        /*      */
        /* 1164 */ searchIndex(accountName, eventType, searchRequest, out);
        /*      */ }
    /*      */ catch (Exception e)
    /*      */ {
        /* 1168 */ String clusterName = "Unavailable";
        /*      */ try {
            /* 1170 */ String tempClusterName = this.clientProvider.getSearchClient(accountName).settings()
                    .get("cluster.name");
            /*      */
            /* 1172 */ if (tempClusterName != null) {
                /* 1173 */ clusterName = tempClusterName;
                /*      */ }
            /*      */ } catch (Exception ignored) {
            /* 1176 */ e.addSuppressed(ignored);
            /*      */ }
        /*      */
        /* 1179 */ throw ElasticSearchServiceHelper.propagateAsException(e,
                "Failed to search account [{}] on clusters [{}] with event type [{}] on search request [{}]",
                new Object[] { accountName, clusterName, eventType, searchRequest });
        /*      */ }
    /*      */ }

From source file:edu.uci.ics.asterix.aql.translator.AqlTranslator.java

private void abort(Exception rootE, Exception parentE, MetadataTransactionContext mdTxnCtx) {
    try {/* ww w  .ja  va2  s .  c  om*/
        if (IS_DEBUG_MODE) {
            rootE.printStackTrace();
        }
        MetadataManager.INSTANCE.abortTransaction(mdTxnCtx);
    } catch (Exception e2) {
        parentE.addSuppressed(e2);
        throw new IllegalStateException(rootE);
    }
}

From source file:edu.uci.ics.asterix.aql.translator.AqlTranslator.java

private void handleDatasetDropStatement(AqlMetadataProvider metadataProvider, Statement stmt,
        IHyracksClientConnection hcc) throws Exception {
    DropStatement stmtDelete = (DropStatement) stmt;
    String dataverseName = getActiveDataverse(stmtDelete.getDataverseName());
    String datasetName = stmtDelete.getDatasetName().getValue();

    ProgressState progress = ProgressState.NO_PROGRESS;
    MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
    boolean bActiveTxn = true;
    metadataProvider.setMetadataTxnContext(mdTxnCtx);

    MetadataLockManager.INSTANCE.dropDatasetBegin(dataverseName, dataverseName + "." + datasetName);
    List<JobSpecification> jobsToExecute = new ArrayList<JobSpecification>();
    try {// w  w w .  j av a 2 s .  co  m

        Dataset ds = MetadataManager.INSTANCE.getDataset(mdTxnCtx, dataverseName, datasetName);
        if (ds == null) {
            if (stmtDelete.getIfExists()) {
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                return;
            } else {
                throw new AlgebricksException("There is no dataset with this name " + datasetName
                        + " in dataverse " + dataverseName + ".");
            }
        }

        Map<FeedConnectionId, Pair<JobSpecification, Boolean>> disconnectJobList = new HashMap<FeedConnectionId, Pair<JobSpecification, Boolean>>();
        if (ds.getDatasetType() == DatasetType.INTERNAL) {
            // prepare job spec(s) that would disconnect any active feeds involving the dataset.
            List<FeedConnectionId> feedConnections = FeedLifecycleListener.INSTANCE
                    .getActiveFeedConnections(null);
            if (feedConnections != null && !feedConnections.isEmpty()) {
                for (FeedConnectionId connection : feedConnections) {
                    Pair<JobSpecification, Boolean> p = FeedOperations
                            .buildDisconnectFeedJobSpec(metadataProvider, connection);
                    disconnectJobList.put(connection, p);
                    if (LOGGER.isLoggable(Level.INFO)) {
                        LOGGER.info("Disconnecting feed " + connection.getFeedId().getFeedName()
                                + " from dataset " + datasetName + " as dataset is being dropped");
                    }
                }
            }

            //#. prepare jobs to drop the datatset and the indexes in NC
            List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName,
                    datasetName);
            for (int j = 0; j < indexes.size(); j++) {
                if (indexes.get(j).isSecondaryIndex()) {
                    CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                            indexes.get(j).getIndexName());
                    jobsToExecute
                            .add(IndexOperations.buildDropSecondaryIndexJobSpec(cds, metadataProvider, ds));
                }
            }
            CompiledDatasetDropStatement cds = new CompiledDatasetDropStatement(dataverseName, datasetName);
            jobsToExecute.add(DatasetOperations.createDropDatasetJobSpec(cds, metadataProvider));

            //#. mark the existing dataset as PendingDropOp
            MetadataManager.INSTANCE.dropDataset(mdTxnCtx, dataverseName, datasetName);
            MetadataManager.INSTANCE.addDataset(mdTxnCtx,
                    new Dataset(dataverseName, datasetName, ds.getItemTypeName(), ds.getDatasetDetails(),
                            ds.getHints(), ds.getDatasetType(), ds.getDatasetId(),
                            IMetadataEntity.PENDING_DROP_OP));

            MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            bActiveTxn = false;
            progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;

            //# disconnect the feeds
            for (Pair<JobSpecification, Boolean> p : disconnectJobList.values()) {
                runJob(hcc, p.first, true);
            }

            //#. run the jobs
            for (JobSpecification jobSpec : jobsToExecute) {
                runJob(hcc, jobSpec, true);
            }

            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
        } else {
            // External dataset
            ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(ds);
            //#. prepare jobs to drop the datatset and the indexes in NC
            List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName,
                    datasetName);
            for (int j = 0; j < indexes.size(); j++) {
                if (ExternalIndexingOperations.isFileIndex(indexes.get(j))) {
                    CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                            indexes.get(j).getIndexName());
                    jobsToExecute
                            .add(IndexOperations.buildDropSecondaryIndexJobSpec(cds, metadataProvider, ds));
                } else {
                    CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                            indexes.get(j).getIndexName());
                    jobsToExecute.add(
                            ExternalIndexingOperations.buildDropFilesIndexJobSpec(cds, metadataProvider, ds));
                }
            }

            //#. mark the existing dataset as PendingDropOp
            MetadataManager.INSTANCE.dropDataset(mdTxnCtx, dataverseName, datasetName);
            MetadataManager.INSTANCE.addDataset(mdTxnCtx,
                    new Dataset(dataverseName, datasetName, ds.getItemTypeName(), ds.getDatasetDetails(),
                            ds.getHints(), ds.getDatasetType(), ds.getDatasetId(),
                            IMetadataEntity.PENDING_DROP_OP));

            MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            bActiveTxn = false;
            progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;

            //#. run the jobs
            for (JobSpecification jobSpec : jobsToExecute) {
                runJob(hcc, jobSpec, true);
            }
            if (indexes.size() > 0) {
                ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(ds);
            }
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
        }

        //#. finally, delete the dataset.
        MetadataManager.INSTANCE.dropDataset(mdTxnCtx, dataverseName, datasetName);
        // Drop the associated nodegroup
        String nodegroup = ds.getDatasetDetails().getNodeGroupName();
        if (!nodegroup.equalsIgnoreCase(MetadataConstants.METADATA_DEFAULT_NODEGROUP_NAME)) {
            MetadataManager.INSTANCE.dropNodegroup(mdTxnCtx, dataverseName + ":" + datasetName);
        }

        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
    } catch (Exception e) {
        if (bActiveTxn) {
            abort(e, e, mdTxnCtx);
        }

        if (progress == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
            //#. execute compensation operations
            //   remove the all indexes in NC
            try {
                for (JobSpecification jobSpec : jobsToExecute) {
                    runJob(hcc, jobSpec, true);
                }
            } catch (Exception e2) {
                //do no throw exception since still the metadata needs to be compensated.
                e.addSuppressed(e2);
            }

            //   remove the record from the metadata.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            try {
                MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                        datasetName);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                abort(e, e2, mdTxnCtx);
                throw new IllegalStateException("System is inconsistent state: pending dataset(" + dataverseName
                        + "." + datasetName + ") couldn't be removed from the metadata", e);
            }
        }

        throw e;
    } finally {
        MetadataLockManager.INSTANCE.dropDatasetEnd(dataverseName, dataverseName + "." + datasetName);
    }
}