Example usage for java.lang Exception addSuppressed

List of usage examples for java.lang Exception addSuppressed

Introduction

In this page you can find the example usage for java.lang Exception addSuppressed.

Prototype

public final synchronized void addSuppressed(Throwable exception) 

Source Link

Document

Appends the specified exception to the exceptions that were suppressed in order to deliver this exception.

Usage

From source file:org.apache.asterix.app.translator.QueryTranslator.java

protected void handleExternalDatasetRefreshStatement(AqlMetadataProvider metadataProvider, Statement stmt,
        IHyracksClientConnection hcc) throws Exception {
    RefreshExternalDatasetStatement stmtRefresh = (RefreshExternalDatasetStatement) stmt;
    String dataverseName = getActiveDataverse(stmtRefresh.getDataverseName());
    String datasetName = stmtRefresh.getDatasetName().getValue();
    ExternalDatasetTransactionState transactionState = ExternalDatasetTransactionState.COMMIT;
    MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
    MetadataLockManager.INSTANCE.refreshDatasetBegin(dataverseName, dataverseName + "." + datasetName);
    boolean bActiveTxn = true;
    metadataProvider.setMetadataTxnContext(mdTxnCtx);
    JobSpecification spec = null;//from  w ww  .  j a v a 2  s  .c o m
    Dataset ds = null;
    List<ExternalFile> metadataFiles = null;
    List<ExternalFile> deletedFiles = null;
    List<ExternalFile> addedFiles = null;
    List<ExternalFile> appendedFiles = null;
    List<Index> indexes = null;
    Dataset transactionDataset = null;
    boolean lockAquired = false;
    boolean success = false;
    try {
        ds = MetadataManager.INSTANCE.getDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                datasetName);

        // Dataset exists ?
        if (ds == null) {
            throw new AlgebricksException(
                    "There is no dataset with this name " + datasetName + " in dataverse " + dataverseName);
        }
        // Dataset external ?
        if (ds.getDatasetType() != DatasetType.EXTERNAL) {
            throw new AlgebricksException("dataset " + datasetName + " in dataverse " + dataverseName
                    + " is not an external dataset");
        }
        // Dataset has indexes ?
        indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
        if (indexes.isEmpty()) {
            throw new AlgebricksException("External dataset " + datasetName + " in dataverse " + dataverseName
                    + " doesn't have any index");
        }

        // Record transaction time
        Date txnTime = new Date();

        // refresh lock here
        ExternalDatasetsRegistry.INSTANCE.refreshBegin(ds);
        lockAquired = true;

        // Get internal files
        metadataFiles = MetadataManager.INSTANCE.getDatasetExternalFiles(mdTxnCtx, ds);
        deletedFiles = new ArrayList<>();
        addedFiles = new ArrayList<>();
        appendedFiles = new ArrayList<>();

        // Compute delta
        // Now we compare snapshot with external file system
        if (ExternalIndexingOperations.isDatasetUptodate(ds, metadataFiles, addedFiles, deletedFiles,
                appendedFiles)) {
            ((ExternalDatasetDetails) ds.getDatasetDetails()).setRefreshTimestamp(txnTime);
            MetadataManager.INSTANCE.updateDataset(mdTxnCtx, ds);
            MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            // latch will be released in the finally clause
            return;
        }

        // At this point, we know data has changed in the external file system, record
        // transaction in metadata and start
        transactionDataset = ExternalIndexingOperations.createTransactionDataset(ds);
        /*
         * Remove old dataset record and replace it with a new one
         */
        MetadataManager.INSTANCE.updateDataset(mdTxnCtx, transactionDataset);

        // Add delta files to the metadata
        for (ExternalFile file : addedFiles) {
            MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
        }
        for (ExternalFile file : appendedFiles) {
            MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
        }
        for (ExternalFile file : deletedFiles) {
            MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
        }

        // Create the files index update job
        spec = ExternalIndexingOperations.buildFilesIndexUpdateOp(ds, metadataFiles, deletedFiles, addedFiles,
                appendedFiles, metadataProvider);

        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
        bActiveTxn = false;
        transactionState = ExternalDatasetTransactionState.BEGIN;

        // run the files update job
        JobUtils.runJob(hcc, spec, true);

        for (Index index : indexes) {
            if (!ExternalIndexingOperations.isFileIndex(index)) {
                spec = ExternalIndexingOperations.buildIndexUpdateOp(ds, index, metadataFiles, deletedFiles,
                        addedFiles, appendedFiles, metadataProvider);
                // run the files update job
                JobUtils.runJob(hcc, spec, true);
            }
        }

        // all index updates has completed successfully, record transaction state
        spec = ExternalIndexingOperations.buildCommitJob(ds, indexes, metadataProvider);

        // Aquire write latch again -> start a transaction and record the decision to commit
        mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
        metadataProvider.setMetadataTxnContext(mdTxnCtx);
        bActiveTxn = true;
        ((ExternalDatasetDetails) transactionDataset.getDatasetDetails())
                .setState(ExternalDatasetTransactionState.READY_TO_COMMIT);
        ((ExternalDatasetDetails) transactionDataset.getDatasetDetails()).setRefreshTimestamp(txnTime);
        MetadataManager.INSTANCE.updateDataset(mdTxnCtx, transactionDataset);
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
        bActiveTxn = false;
        transactionState = ExternalDatasetTransactionState.READY_TO_COMMIT;
        // We don't release the latch since this job is expected to be quick
        JobUtils.runJob(hcc, spec, true);
        // Start a new metadata transaction to record the final state of the transaction
        mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
        metadataProvider.setMetadataTxnContext(mdTxnCtx);
        bActiveTxn = true;

        for (ExternalFile file : metadataFiles) {
            if (file.getPendingOp() == ExternalFilePendingOp.PENDING_DROP_OP) {
                MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
            } else if (file.getPendingOp() == ExternalFilePendingOp.PENDING_NO_OP) {
                Iterator<ExternalFile> iterator = appendedFiles.iterator();
                while (iterator.hasNext()) {
                    ExternalFile appendedFile = iterator.next();
                    if (file.getFileName().equals(appendedFile.getFileName())) {
                        // delete existing file
                        MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
                        // delete existing appended file
                        MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, appendedFile);
                        // add the original file with appended information
                        appendedFile.setFileNumber(file.getFileNumber());
                        appendedFile.setPendingOp(ExternalFilePendingOp.PENDING_NO_OP);
                        MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, appendedFile);
                        iterator.remove();
                    }
                }
            }
        }

        // remove the deleted files delta
        for (ExternalFile file : deletedFiles) {
            MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
        }

        // insert new files
        for (ExternalFile file : addedFiles) {
            MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
            file.setPendingOp(ExternalFilePendingOp.PENDING_NO_OP);
            MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
        }

        // mark the transaction as complete
        ((ExternalDatasetDetails) transactionDataset.getDatasetDetails())
                .setState(ExternalDatasetTransactionState.COMMIT);
        MetadataManager.INSTANCE.updateDataset(mdTxnCtx, transactionDataset);

        // commit metadata transaction
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
        success = true;
    } catch (Exception e) {
        if (bActiveTxn) {
            abort(e, e, mdTxnCtx);
        }
        if (transactionState == ExternalDatasetTransactionState.READY_TO_COMMIT) {
            throw new IllegalStateException("System is inconsistent state: commit of (" + dataverseName + "."
                    + datasetName + ") refresh couldn't carry out the commit phase", e);
        }
        if (transactionState == ExternalDatasetTransactionState.COMMIT) {
            // Nothing to do , everything should be clean
            throw e;
        }
        if (transactionState == ExternalDatasetTransactionState.BEGIN) {
            // transaction failed, need to do the following
            // clean NCs removing transaction components
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            spec = ExternalIndexingOperations.buildAbortOp(ds, indexes, metadataProvider);
            MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            bActiveTxn = false;
            try {
                JobUtils.runJob(hcc, spec, true);
            } catch (Exception e2) {
                // This should never happen -- fix throw illegal
                e.addSuppressed(e2);
                throw new IllegalStateException("System is in inconsistent state. Failed to abort refresh", e);
            }
            // remove the delta of files
            // return the state of the dataset to committed
            try {
                mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
                for (ExternalFile file : deletedFiles) {
                    MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
                }
                for (ExternalFile file : addedFiles) {
                    MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
                }
                for (ExternalFile file : appendedFiles) {
                    MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
                }
                MetadataManager.INSTANCE.updateDataset(mdTxnCtx, ds);
                // commit metadata transaction
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            } catch (Exception e2) {
                abort(e, e2, mdTxnCtx);
                e.addSuppressed(e2);
                throw new IllegalStateException("System is in inconsistent state. Failed to drop delta files",
                        e);
            }
        }
    } finally {
        if (lockAquired) {
            ExternalDatasetsRegistry.INSTANCE.refreshEnd(ds, success);
        }
        MetadataLockManager.INSTANCE.refreshDatasetEnd(dataverseName, dataverseName + "." + datasetName);
    }
}

From source file:org.apache.asterix.app.translator.QueryTranslator.java

/**
 * Abort the ongoing metadata transaction logging the error cause
 *
 * @param rootE//from  w  ww . j  av a  2s  .  c o  m
 * @param parentE
 * @param mdTxnCtx
 */
public static void abort(Exception rootE, Exception parentE, MetadataTransactionContext mdTxnCtx) {
    try {
        if (IS_DEBUG_MODE) {
            LOGGER.log(Level.SEVERE, rootE.getMessage(), rootE);
        }
        if (mdTxnCtx != null) {
            MetadataManager.INSTANCE.abortTransaction(mdTxnCtx);
        }
    } catch (Exception e2) {
        parentE.addSuppressed(e2);
        throw new IllegalStateException(rootE);
    }
}

From source file:org.apache.asterix.aql.translator.AqlTranslator.java

private void handleCreateDatasetStatement(AqlMetadataProvider metadataProvider, Statement stmt,
        IHyracksClientConnection hcc) throws AsterixException, Exception {

    ProgressState progress = ProgressState.NO_PROGRESS;
    DatasetDecl dd = (DatasetDecl) stmt;
    String dataverseName = getActiveDataverse(dd.getDataverse());
    String datasetName = dd.getName().getValue();
    DatasetType dsType = dd.getDatasetType();
    String itemTypeName = dd.getItemTypeName().getValue();
    Identifier ngNameId = dd.getNodegroupName();
    String nodegroupName = getNodeGroupName(ngNameId, dd, dataverseName);
    String compactionPolicy = dd.getCompactionPolicy();
    Map<String, String> compactionPolicyProperties = dd.getCompactionPolicyProperties();
    boolean defaultCompactionPolicy = (compactionPolicy == null);
    boolean temp = dd.getDatasetDetailsDecl().isTemp();

    MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
    boolean bActiveTxn = true;
    metadataProvider.setMetadataTxnContext(mdTxnCtx);

    MetadataLockManager.INSTANCE.createDatasetBegin(dataverseName, dataverseName + "." + itemTypeName,
            nodegroupName, compactionPolicy, dataverseName + "." + datasetName, defaultCompactionPolicy);
    Dataset dataset = null;//www.j av  a2 s  . c om
    try {

        IDatasetDetails datasetDetails = null;
        Dataset ds = MetadataManager.INSTANCE.getDataset(metadataProvider.getMetadataTxnContext(),
                dataverseName, datasetName);
        if (ds != null) {
            if (dd.getIfNotExists()) {
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                return;
            } else {
                throw new AlgebricksException("A dataset with this name " + datasetName + " already exists.");
            }
        }
        Datatype dt = MetadataManager.INSTANCE.getDatatype(metadataProvider.getMetadataTxnContext(),
                dataverseName, itemTypeName);
        if (dt == null) {
            throw new AlgebricksException(": type " + itemTypeName + " could not be found.");
        }
        String ngName = ngNameId != null ? ngNameId.getValue()
                : configureNodegroupForDataset(dd, dataverseName, mdTxnCtx);

        if (compactionPolicy == null) {
            compactionPolicy = GlobalConfig.DEFAULT_COMPACTION_POLICY_NAME;
            compactionPolicyProperties = GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES;
        } else {
            validateCompactionPolicy(compactionPolicy, compactionPolicyProperties, mdTxnCtx, false);
        }
        switch (dd.getDatasetType()) {
        case INTERNAL: {
            IAType itemType = dt.getDatatype();
            if (itemType.getTypeTag() != ATypeTag.RECORD) {
                throw new AlgebricksException("Can only partition ARecord's.");
            }
            List<List<String>> partitioningExprs = ((InternalDetailsDecl) dd.getDatasetDetailsDecl())
                    .getPartitioningExprs();
            boolean autogenerated = ((InternalDetailsDecl) dd.getDatasetDetailsDecl()).isAutogenerated();
            ARecordType aRecordType = (ARecordType) itemType;
            List<IAType> partitioningTypes = aRecordType.validatePartitioningExpressions(partitioningExprs,
                    autogenerated);

            List<String> filterField = ((InternalDetailsDecl) dd.getDatasetDetailsDecl()).getFilterField();
            if (filterField != null) {
                aRecordType.validateFilterField(filterField);
            }
            if (compactionPolicy == null) {
                if (filterField != null) {
                    // If the dataset has a filter and the user didn't specify a merge policy, then we will pick the
                    // correlated-prefix as the default merge policy.
                    compactionPolicy = GlobalConfig.DEFAULT_FILTERED_DATASET_COMPACTION_POLICY_NAME;
                    compactionPolicyProperties = GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES;
                }
            }
            datasetDetails = new InternalDatasetDetails(InternalDatasetDetails.FileStructure.BTREE,
                    InternalDatasetDetails.PartitioningStrategy.HASH, partitioningExprs, partitioningExprs,
                    partitioningTypes, autogenerated, filterField, temp);
            break;
        }
        case EXTERNAL: {
            String adapter = ((ExternalDetailsDecl) dd.getDatasetDetailsDecl()).getAdapter();
            Map<String, String> properties = ((ExternalDetailsDecl) dd.getDatasetDetailsDecl()).getProperties();

            datasetDetails = new ExternalDatasetDetails(adapter, properties, new Date(),
                    ExternalDatasetTransactionState.COMMIT);
            break;
        }

        }

        //#. initialize DatasetIdFactory if it is not initialized.
        if (!DatasetIdFactory.isInitialized()) {
            DatasetIdFactory.initialize(MetadataManager.INSTANCE.getMostRecentDatasetId());
        }

        //#. add a new dataset with PendingAddOp
        dataset = new Dataset(dataverseName, datasetName, itemTypeName, ngName, compactionPolicy,
                compactionPolicyProperties, datasetDetails, dd.getHints(), dsType,
                DatasetIdFactory.generateDatasetId(), IMetadataEntity.PENDING_ADD_OP);
        MetadataManager.INSTANCE.addDataset(metadataProvider.getMetadataTxnContext(), dataset);

        if (dd.getDatasetType() == DatasetType.INTERNAL) {
            Dataverse dataverse = MetadataManager.INSTANCE
                    .getDataverse(metadataProvider.getMetadataTxnContext(), dataverseName);
            JobSpecification jobSpec = DatasetOperations.createDatasetJobSpec(dataverse, datasetName,
                    metadataProvider);

            //#. make metadataTxn commit before calling runJob.
            MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            bActiveTxn = false;
            progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;

            //#. runJob
            runJob(hcc, jobSpec, true);

            //#. begin new metadataTxn
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
        }

        //#. add a new dataset with PendingNoOp after deleting the dataset with PendingAddOp
        MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                datasetName);
        dataset.setPendingOp(IMetadataEntity.PENDING_NO_OP);
        MetadataManager.INSTANCE.addDataset(metadataProvider.getMetadataTxnContext(), dataset);
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
    } catch (Exception e) {
        if (bActiveTxn) {
            abort(e, e, mdTxnCtx);
        }

        if (progress == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {

            //#. execute compensation operations
            //   remove the index in NC
            //   [Notice]
            //   As long as we updated(and committed) metadata, we should remove any effect of the job
            //   because an exception occurs during runJob.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            CompiledDatasetDropStatement cds = new CompiledDatasetDropStatement(dataverseName, datasetName);
            try {
                JobSpecification jobSpec = DatasetOperations.createDropDatasetJobSpec(cds, metadataProvider);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                bActiveTxn = false;

                runJob(hcc, jobSpec, true);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                if (bActiveTxn) {
                    abort(e, e2, mdTxnCtx);
                }
            }

            //   remove the record from the metadata.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            try {
                MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                        datasetName);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                abort(e, e2, mdTxnCtx);
                throw new IllegalStateException("System is inconsistent state: pending dataset(" + dataverseName
                        + "." + datasetName + ") couldn't be removed from the metadata", e);
            }
        }

        throw e;
    } finally {
        MetadataLockManager.INSTANCE.createDatasetEnd(dataverseName, dataverseName + "." + itemTypeName,
                nodegroupName, compactionPolicy, dataverseName + "." + datasetName, defaultCompactionPolicy);
    }
}

From source file:org.apache.asterix.aql.translator.AqlTranslator.java

@SuppressWarnings("unchecked")
private void handleCreateIndexStatement(AqlMetadataProvider metadataProvider, Statement stmt,
        IHyracksClientConnection hcc) throws Exception {
    ProgressState progress = ProgressState.NO_PROGRESS;
    CreateIndexStatement stmtCreateIndex = (CreateIndexStatement) stmt;
    String dataverseName = getActiveDataverse(stmtCreateIndex.getDataverseName());
    String datasetName = stmtCreateIndex.getDatasetName().getValue();

    MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
    boolean bActiveTxn = true;
    metadataProvider.setMetadataTxnContext(mdTxnCtx);

    MetadataLockManager.INSTANCE.createIndexBegin(dataverseName, dataverseName + "." + datasetName);

    String indexName = null;//from   w  ww .  j a  va 2 s  .co  m
    JobSpecification spec = null;
    Dataset ds = null;
    // For external datasets
    ArrayList<ExternalFile> externalFilesSnapshot = null;
    boolean firstExternalDatasetIndex = false;
    boolean filesIndexReplicated = false;
    Index filesIndex = null;
    boolean datasetLocked = false;
    try {
        ds = MetadataManager.INSTANCE.getDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                datasetName);
        if (ds == null) {
            throw new AlgebricksException(
                    "There is no dataset with this name " + datasetName + " in dataverse " + dataverseName);
        }

        indexName = stmtCreateIndex.getIndexName().getValue();
        Index idx = MetadataManager.INSTANCE.getIndex(metadataProvider.getMetadataTxnContext(), dataverseName,
                datasetName, indexName);

        String itemTypeName = ds.getItemTypeName();
        Datatype dt = MetadataManager.INSTANCE.getDatatype(metadataProvider.getMetadataTxnContext(),
                dataverseName, itemTypeName);
        IAType itemType = dt.getDatatype();
        ARecordType aRecordType = (ARecordType) itemType;

        List<List<String>> indexFields = new ArrayList<List<String>>();
        List<IAType> indexFieldTypes = new ArrayList<IAType>();
        for (Pair<List<String>, TypeExpression> fieldExpr : stmtCreateIndex.getFieldExprs()) {
            IAType fieldType = null;
            boolean isOpen = aRecordType.isOpen();
            ARecordType subType = aRecordType;
            int i = 0;
            if (fieldExpr.first.size() > 1 && !isOpen) {
                for (; i < fieldExpr.first.size() - 1;) {
                    subType = (ARecordType) subType.getFieldType(fieldExpr.first.get(i));
                    i++;
                    if (subType.isOpen()) {
                        isOpen = true;
                        break;
                    }
                    ;
                }
            }
            if (fieldExpr.second == null) {
                fieldType = subType.getSubFieldType(fieldExpr.first.subList(i, fieldExpr.first.size()));
            } else {
                if (!stmtCreateIndex.isEnforced())
                    throw new AlgebricksException("Cannot create typed index on \"" + fieldExpr.first
                            + "\" field without enforcing it's type");
                if (!isOpen)
                    throw new AlgebricksException("Typed index on \"" + fieldExpr.first
                            + "\" field could be created only for open datatype");
                Map<TypeSignature, IAType> typeMap = TypeTranslator.computeTypes(mdTxnCtx, fieldExpr.second,
                        indexName, dataverseName);
                TypeSignature typeSignature = new TypeSignature(dataverseName, indexName);
                fieldType = typeMap.get(typeSignature);
            }
            if (fieldType == null)
                throw new AlgebricksException("Unknown type " + fieldExpr.second);

            indexFields.add(fieldExpr.first);
            indexFieldTypes.add(fieldType);
        }

        aRecordType.validateKeyFields(indexFields, indexFieldTypes, stmtCreateIndex.getIndexType());

        if (idx != null) {
            if (stmtCreateIndex.getIfNotExists()) {
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                return;
            } else {
                throw new AlgebricksException("An index with this name " + indexName + " already exists.");
            }
        }

        // Checks whether a user is trying to create an inverted secondary index on a dataset with a variable-length primary key.
        // Currently, we do not support this. Therefore, as a temporary solution, we print an error message and stop.
        if (stmtCreateIndex.getIndexType() == IndexType.SINGLE_PARTITION_WORD_INVIX
                || stmtCreateIndex.getIndexType() == IndexType.SINGLE_PARTITION_NGRAM_INVIX
                || stmtCreateIndex.getIndexType() == IndexType.LENGTH_PARTITIONED_WORD_INVIX
                || stmtCreateIndex.getIndexType() == IndexType.LENGTH_PARTITIONED_NGRAM_INVIX) {
            List<List<String>> partitioningKeys = DatasetUtils.getPartitioningKeys(ds);
            for (List<String> partitioningKey : partitioningKeys) {
                IAType keyType = aRecordType.getSubFieldType(partitioningKey);
                ITypeTraits typeTrait = AqlTypeTraitProvider.INSTANCE.getTypeTrait(keyType);

                // If it is not a fixed length
                if (typeTrait.getFixedLength() < 0) {
                    throw new AlgebricksException("The keyword or ngram index -" + indexName
                            + " cannot be created on the dataset -" + datasetName
                            + " due to its variable-length primary key field - " + partitioningKey);
                }

            }
        }

        if (ds.getDatasetType() == DatasetType.INTERNAL) {
            validateIfResourceIsActiveInFeed(dataverseName, datasetName);
        } else {
            // External dataset
            // Check if the dataset is indexible
            if (!ExternalIndexingOperations.isIndexible((ExternalDatasetDetails) ds.getDatasetDetails())) {
                throw new AlgebricksException(
                        "dataset using " + ((ExternalDatasetDetails) ds.getDatasetDetails()).getAdapter()
                                + " Adapter can't be indexed");
            }
            // check if the name of the index is valid
            if (!ExternalIndexingOperations.isValidIndexName(datasetName, indexName)) {
                throw new AlgebricksException("external dataset index name is invalid");
            }

            // Check if the files index exist
            filesIndex = MetadataManager.INSTANCE.getIndex(metadataProvider.getMetadataTxnContext(),
                    dataverseName, datasetName, ExternalIndexingOperations.getFilesIndexName(datasetName));
            firstExternalDatasetIndex = (filesIndex == null);
            // lock external dataset
            ExternalDatasetsRegistry.INSTANCE.buildIndexBegin(ds, firstExternalDatasetIndex);
            datasetLocked = true;
            if (firstExternalDatasetIndex) {
                // verify that no one has created an index before we acquire the lock
                filesIndex = MetadataManager.INSTANCE.getIndex(metadataProvider.getMetadataTxnContext(),
                        dataverseName, datasetName, ExternalIndexingOperations.getFilesIndexName(datasetName));
                if (filesIndex != null) {
                    ExternalDatasetsRegistry.INSTANCE.buildIndexEnd(ds, firstExternalDatasetIndex);
                    firstExternalDatasetIndex = false;
                    ExternalDatasetsRegistry.INSTANCE.buildIndexBegin(ds, firstExternalDatasetIndex);
                }
            }
            if (firstExternalDatasetIndex) {
                // Get snapshot from External File System
                externalFilesSnapshot = ExternalIndexingOperations.getSnapshotFromExternalFileSystem(ds);
                // Add an entry for the files index
                filesIndex = new Index(dataverseName, datasetName,
                        ExternalIndexingOperations.getFilesIndexName(datasetName), IndexType.BTREE,
                        ExternalIndexingOperations.FILE_INDEX_FIELD_NAMES,
                        ExternalIndexingOperations.FILE_INDEX_FIELD_TYPES, false, false,
                        IMetadataEntity.PENDING_ADD_OP);
                MetadataManager.INSTANCE.addIndex(metadataProvider.getMetadataTxnContext(), filesIndex);
                // Add files to the external files index
                for (ExternalFile file : externalFilesSnapshot) {
                    MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
                }
                // This is the first index for the external dataset, replicate the files index
                spec = ExternalIndexingOperations.buildFilesIndexReplicationJobSpec(ds, externalFilesSnapshot,
                        metadataProvider, true);
                if (spec == null) {
                    throw new AsterixException(
                            "Failed to create job spec for replicating Files Index For external dataset");
                }
                filesIndexReplicated = true;
                runJob(hcc, spec, true);
            }
        }

        //check whether there exists another enforced index on the same field
        if (stmtCreateIndex.isEnforced()) {
            List<Index> indexes = MetadataManager.INSTANCE
                    .getDatasetIndexes(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName);
            for (Index index : indexes) {
                if (index.getKeyFieldNames().equals(indexFields)
                        && !index.getKeyFieldTypes().equals(indexFieldTypes) && index.isEnforcingKeyFileds())
                    throw new AsterixException(
                            "Cannot create index " + indexName + " , enforced index " + index.getIndexName()
                                    + " on field \"" + StringUtils.join(indexFields, ',') + "\" already exist");
            }
        }

        //#. add a new index with PendingAddOp
        Index index = new Index(dataverseName, datasetName, indexName, stmtCreateIndex.getIndexType(),
                indexFields, indexFieldTypes, stmtCreateIndex.getGramLength(), stmtCreateIndex.isEnforced(),
                false, IMetadataEntity.PENDING_ADD_OP);
        MetadataManager.INSTANCE.addIndex(metadataProvider.getMetadataTxnContext(), index);

        ARecordType enforcedType = null;
        if (stmtCreateIndex.isEnforced()) {
            enforcedType = IntroduceSecondaryIndexInsertDeleteRule.createEnforcedType(aRecordType,
                    Lists.newArrayList(index));
        }

        //#. prepare to create the index artifact in NC.
        CompiledCreateIndexStatement cis = new CompiledCreateIndexStatement(index.getIndexName(), dataverseName,
                index.getDatasetName(), index.getKeyFieldNames(), index.getKeyFieldTypes(),
                index.isEnforcingKeyFileds(), index.getGramLength(), index.getIndexType());
        spec = IndexOperations.buildSecondaryIndexCreationJobSpec(cis, aRecordType, enforcedType,
                metadataProvider);
        if (spec == null) {
            throw new AsterixException("Failed to create job spec for creating index '"
                    + stmtCreateIndex.getDatasetName() + "." + stmtCreateIndex.getIndexName() + "'");
        }
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
        bActiveTxn = false;

        progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;

        //#. create the index artifact in NC.
        runJob(hcc, spec, true);

        mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
        bActiveTxn = true;
        metadataProvider.setMetadataTxnContext(mdTxnCtx);

        //#. load data into the index in NC.
        cis = new CompiledCreateIndexStatement(index.getIndexName(), dataverseName, index.getDatasetName(),
                index.getKeyFieldNames(), index.getKeyFieldTypes(), index.isEnforcingKeyFileds(),
                index.getGramLength(), index.getIndexType());
        spec = IndexOperations.buildSecondaryIndexLoadingJobSpec(cis, aRecordType, enforcedType,
                metadataProvider);
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
        bActiveTxn = false;

        runJob(hcc, spec, true);

        //#. begin new metadataTxn
        mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
        bActiveTxn = true;
        metadataProvider.setMetadataTxnContext(mdTxnCtx);

        //#. add another new index with PendingNoOp after deleting the index with PendingAddOp
        MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName,
                indexName);
        index.setPendingOp(IMetadataEntity.PENDING_NO_OP);
        MetadataManager.INSTANCE.addIndex(metadataProvider.getMetadataTxnContext(), index);
        // add another new files index with PendingNoOp after deleting the index with PendingAddOp
        if (firstExternalDatasetIndex) {
            MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName,
                    datasetName, filesIndex.getIndexName());
            filesIndex.setPendingOp(IMetadataEntity.PENDING_NO_OP);
            MetadataManager.INSTANCE.addIndex(metadataProvider.getMetadataTxnContext(), filesIndex);
            // update transaction timestamp
            ((ExternalDatasetDetails) ds.getDatasetDetails()).setRefreshTimestamp(new Date());
            MetadataManager.INSTANCE.updateDataset(mdTxnCtx, ds);
        }
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);

    } catch (Exception e) {
        if (bActiveTxn) {
            abort(e, e, mdTxnCtx);
        }
        // If files index was replicated for external dataset, it should be cleaned up on NC side
        if (filesIndexReplicated) {
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                    ExternalIndexingOperations.getFilesIndexName(datasetName));
            try {
                JobSpecification jobSpec = ExternalIndexingOperations.buildDropFilesIndexJobSpec(cds,
                        metadataProvider, ds);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                bActiveTxn = false;
                runJob(hcc, jobSpec, true);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                if (bActiveTxn) {
                    abort(e, e2, mdTxnCtx);
                }
            }
        }

        if (progress == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
            //#. execute compensation operations
            //   remove the index in NC
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                    indexName);
            try {
                JobSpecification jobSpec = IndexOperations.buildDropSecondaryIndexJobSpec(cds, metadataProvider,
                        ds);

                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                bActiveTxn = false;
                runJob(hcc, jobSpec, true);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                if (bActiveTxn) {
                    abort(e, e2, mdTxnCtx);
                }
            }

            if (firstExternalDatasetIndex) {
                mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
                metadataProvider.setMetadataTxnContext(mdTxnCtx);
                try {
                    // Drop External Files from metadata
                    MetadataManager.INSTANCE.dropDatasetExternalFiles(mdTxnCtx, ds);
                    MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                } catch (Exception e2) {
                    e.addSuppressed(e2);
                    abort(e, e2, mdTxnCtx);
                    throw new IllegalStateException("System is inconsistent state: pending files for("
                            + dataverseName + "." + datasetName + ") couldn't be removed from the metadata", e);
                }
                mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
                metadataProvider.setMetadataTxnContext(mdTxnCtx);
                try {
                    // Drop the files index from metadata
                    MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName,
                            datasetName, ExternalIndexingOperations.getFilesIndexName(datasetName));
                    MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                } catch (Exception e2) {
                    e.addSuppressed(e2);
                    abort(e, e2, mdTxnCtx);
                    throw new IllegalStateException(
                            "System is inconsistent state: pending index(" + dataverseName + "." + datasetName
                                    + "." + ExternalIndexingOperations.getFilesIndexName(datasetName)
                                    + ") couldn't be removed from the metadata",
                            e);
                }
            }
            // remove the record from the metadata.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            try {
                MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName,
                        datasetName, indexName);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                abort(e, e2, mdTxnCtx);
                throw new IllegalStateException(
                        "System is in inconsistent state: pending index(" + dataverseName + "." + datasetName
                                + "." + indexName + ") couldn't be removed from the metadata",
                        e);
            }
        }
        throw e;
    } finally {
        MetadataLockManager.INSTANCE.createIndexEnd(dataverseName, dataverseName + "." + datasetName);
        if (datasetLocked) {
            ExternalDatasetsRegistry.INSTANCE.buildIndexEnd(ds, firstExternalDatasetIndex);
        }
    }
}

From source file:org.apache.asterix.aql.translator.AqlTranslator.java

private void handleDatasetDropStatement(AqlMetadataProvider metadataProvider, Statement stmt,
        IHyracksClientConnection hcc) throws Exception {
    DropStatement stmtDelete = (DropStatement) stmt;
    String dataverseName = getActiveDataverse(stmtDelete.getDataverseName());
    String datasetName = stmtDelete.getDatasetName().getValue();

    ProgressState progress = ProgressState.NO_PROGRESS;
    MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
    boolean bActiveTxn = true;
    metadataProvider.setMetadataTxnContext(mdTxnCtx);

    MetadataLockManager.INSTANCE.dropDatasetBegin(dataverseName, dataverseName + "." + datasetName);
    List<JobSpecification> jobsToExecute = new ArrayList<JobSpecification>();
    try {//from  w  ww . j a v a 2  s.c o  m

        Dataset ds = MetadataManager.INSTANCE.getDataset(mdTxnCtx, dataverseName, datasetName);
        if (ds == null) {
            if (stmtDelete.getIfExists()) {
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                return;
            } else {
                throw new AlgebricksException("There is no dataset with this name " + datasetName
                        + " in dataverse " + dataverseName + ".");
            }
        }

        Map<FeedConnectionId, Pair<JobSpecification, Boolean>> disconnectJobList = new HashMap<FeedConnectionId, Pair<JobSpecification, Boolean>>();
        if (ds.getDatasetType() == DatasetType.INTERNAL) {
            // prepare job spec(s) that would disconnect any active feeds involving the dataset.
            List<FeedConnectionId> feedConnections = FeedLifecycleListener.INSTANCE
                    .getActiveFeedConnections(null);
            if (feedConnections != null && !feedConnections.isEmpty()) {
                for (FeedConnectionId connection : feedConnections) {
                    Pair<JobSpecification, Boolean> p = FeedOperations
                            .buildDisconnectFeedJobSpec(metadataProvider, connection);
                    disconnectJobList.put(connection, p);
                    if (LOGGER.isLoggable(Level.INFO)) {
                        LOGGER.info("Disconnecting feed " + connection.getFeedId().getFeedName()
                                + " from dataset " + datasetName + " as dataset is being dropped");
                    }
                }
            }

            //#. prepare jobs to drop the datatset and the indexes in NC
            List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName,
                    datasetName);
            for (int j = 0; j < indexes.size(); j++) {
                if (indexes.get(j).isSecondaryIndex()) {
                    CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                            indexes.get(j).getIndexName());
                    jobsToExecute
                            .add(IndexOperations.buildDropSecondaryIndexJobSpec(cds, metadataProvider, ds));
                }
            }
            CompiledDatasetDropStatement cds = new CompiledDatasetDropStatement(dataverseName, datasetName);
            jobsToExecute.add(DatasetOperations.createDropDatasetJobSpec(cds, metadataProvider));

            //#. mark the existing dataset as PendingDropOp
            MetadataManager.INSTANCE.dropDataset(mdTxnCtx, dataverseName, datasetName);
            MetadataManager.INSTANCE.addDataset(mdTxnCtx,
                    new Dataset(dataverseName, datasetName, ds.getItemTypeName(), ds.getNodeGroupName(),
                            ds.getCompactionPolicy(), ds.getCompactionPolicyProperties(),
                            ds.getDatasetDetails(), ds.getHints(), ds.getDatasetType(), ds.getDatasetId(),
                            IMetadataEntity.PENDING_DROP_OP));

            MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            bActiveTxn = false;
            progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;

            //# disconnect the feeds
            for (Pair<JobSpecification, Boolean> p : disconnectJobList.values()) {
                runJob(hcc, p.first, true);
            }

            //#. run the jobs
            for (JobSpecification jobSpec : jobsToExecute) {
                runJob(hcc, jobSpec, true);
            }

            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
        } else {
            // External dataset
            ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(ds);
            //#. prepare jobs to drop the datatset and the indexes in NC
            List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName,
                    datasetName);
            for (int j = 0; j < indexes.size(); j++) {
                if (ExternalIndexingOperations.isFileIndex(indexes.get(j))) {
                    CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                            indexes.get(j).getIndexName());
                    jobsToExecute
                            .add(IndexOperations.buildDropSecondaryIndexJobSpec(cds, metadataProvider, ds));
                } else {
                    CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                            indexes.get(j).getIndexName());
                    jobsToExecute.add(
                            ExternalIndexingOperations.buildDropFilesIndexJobSpec(cds, metadataProvider, ds));
                }
            }

            //#. mark the existing dataset as PendingDropOp
            MetadataManager.INSTANCE.dropDataset(mdTxnCtx, dataverseName, datasetName);
            MetadataManager.INSTANCE.addDataset(mdTxnCtx,
                    new Dataset(dataverseName, datasetName, ds.getItemTypeName(), ds.getNodeGroupName(),
                            ds.getCompactionPolicy(), ds.getCompactionPolicyProperties(),
                            ds.getDatasetDetails(), ds.getHints(), ds.getDatasetType(), ds.getDatasetId(),
                            IMetadataEntity.PENDING_DROP_OP));

            MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            bActiveTxn = false;
            progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;

            //#. run the jobs
            for (JobSpecification jobSpec : jobsToExecute) {
                runJob(hcc, jobSpec, true);
            }
            if (indexes.size() > 0) {
                ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(ds);
            }
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
        }

        //#. finally, delete the dataset.
        MetadataManager.INSTANCE.dropDataset(mdTxnCtx, dataverseName, datasetName);
        // Drop the associated nodegroup
        String nodegroup = ds.getNodeGroupName();
        if (!nodegroup.equalsIgnoreCase(MetadataConstants.METADATA_DEFAULT_NODEGROUP_NAME)) {
            MetadataManager.INSTANCE.dropNodegroup(mdTxnCtx, dataverseName + ":" + datasetName);
        }

        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
    } catch (Exception e) {
        if (bActiveTxn) {
            abort(e, e, mdTxnCtx);
        }

        if (progress == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
            //#. execute compensation operations
            //   remove the all indexes in NC
            try {
                for (JobSpecification jobSpec : jobsToExecute) {
                    runJob(hcc, jobSpec, true);
                }
            } catch (Exception e2) {
                //do no throw exception since still the metadata needs to be compensated.
                e.addSuppressed(e2);
            }

            //   remove the record from the metadata.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            try {
                MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                        datasetName);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                abort(e, e2, mdTxnCtx);
                throw new IllegalStateException("System is inconsistent state: pending dataset(" + dataverseName
                        + "." + datasetName + ") couldn't be removed from the metadata", e);
            }
        }

        throw e;
    } finally {
        MetadataLockManager.INSTANCE.dropDatasetEnd(dataverseName, dataverseName + "." + datasetName);
    }
}

From source file:org.apache.asterix.metadata.feeds.FeedMetadataUtil.java

private static boolean preProcessingRequired(FeedConnectionId connectionId) {
    MetadataTransactionContext ctx = null;
    Feed feed = null;//from w  ww .j av a  2 s  .c  o m
    boolean preProcessingRequired = false;
    try {
        MetadataManager.INSTANCE.acquireReadLatch();
        ctx = MetadataManager.INSTANCE.beginTransaction();
        feed = MetadataManager.INSTANCE.getFeed(ctx, connectionId.getFeedId().getDataverse(),
                connectionId.getFeedId().getEntityName());
        preProcessingRequired = feed.getAppliedFunction() != null;
        MetadataManager.INSTANCE.commitTransaction(ctx);
    } catch (Exception e) {
        if (ctx != null) {
            try {
                MetadataManager.INSTANCE.abortTransaction(ctx);
            } catch (Exception abortException) {
                e.addSuppressed(abortException);
                throw new IllegalStateException(e);
            }
        }
    } finally {
        MetadataManager.INSTANCE.releaseReadLatch();
    }
    return preProcessingRequired;
}

From source file:org.apache.flink.streaming.api.operators.AbstractStreamOperator.java

@Override
public final OperatorSnapshotResult snapshotState(long checkpointId, long timestamp,
        CheckpointOptions checkpointOptions) throws Exception {

    KeyGroupRange keyGroupRange = null != keyedStateBackend ? keyedStateBackend.getKeyGroupRange()
            : KeyGroupRange.EMPTY_KEY_GROUP_RANGE;

    OperatorSnapshotResult snapshotInProgress = new OperatorSnapshotResult();

    CheckpointStreamFactory factory = getCheckpointStreamFactory(checkpointOptions);

    try (StateSnapshotContextSynchronousImpl snapshotContext = new StateSnapshotContextSynchronousImpl(
            checkpointId, timestamp, factory, keyGroupRange, getContainingTask().getCancelables())) {

        snapshotState(snapshotContext);/*from  w w  w.  java 2  s  .co  m*/

        snapshotInProgress.setKeyedStateRawFuture(snapshotContext.getKeyedStateStreamFuture());
        snapshotInProgress.setOperatorStateRawFuture(snapshotContext.getOperatorStateStreamFuture());

        if (null != operatorStateBackend) {
            snapshotInProgress.setOperatorStateManagedFuture(
                    operatorStateBackend.snapshot(checkpointId, timestamp, factory, checkpointOptions));
        }

        if (null != keyedStateBackend) {
            snapshotInProgress.setKeyedStateManagedFuture(
                    keyedStateBackend.snapshot(checkpointId, timestamp, factory, checkpointOptions));
        }
    } catch (Exception snapshotException) {
        try {
            snapshotInProgress.cancel();
        } catch (Exception e) {
            snapshotException.addSuppressed(e);
        }

        throw new Exception(
                "Could not complete snapshot " + checkpointId + " for operator " + getOperatorName() + '.',
                snapshotException);
    }

    return snapshotInProgress;
}

From source file:org.elasticsearch.ExceptionsHelperTests.java

public void testMaybeError() {
    final Error outOfMemoryError = new OutOfMemoryError();
    assertError(outOfMemoryError, outOfMemoryError);

    final DecoderException decoderException = new DecoderException(outOfMemoryError);
    assertError(decoderException, outOfMemoryError);

    final Exception e = new Exception();
    e.addSuppressed(decoderException);
    assertError(e, outOfMemoryError);//  w  w  w. j a va 2s.co m

    final int depth = randomIntBetween(1, 16);
    Throwable cause = new Exception();
    boolean fatal = false;
    Error error = null;
    for (int i = 0; i < depth; i++) {
        final int length = randomIntBetween(1, 4);
        for (int j = 0; j < length; j++) {
            if (!fatal && rarely()) {
                error = new Error();
                cause.addSuppressed(error);
                fatal = true;
            } else {
                cause.addSuppressed(new Exception());
            }
        }
        if (!fatal && rarely()) {
            cause = error = new Error(cause);
            fatal = true;
        } else {
            cause = new Exception(cause);
        }
    }
    if (fatal) {
        assertError(cause, error);
    } else {
        assertFalse(maybeError(cause, logger).isPresent());
    }

    assertFalse(maybeError(new Exception(new DecoderException()), logger).isPresent());

    Throwable chain = outOfMemoryError;
    for (int i = 0; i < MAX_ITERATIONS; i++) {
        chain = new Exception(chain);
    }
    assertFalse(maybeError(chain, logger).isPresent());
}

From source file:org.nuxeo.ecm.core.work.AbstractWork.java

@Override
public void run() {
    if (isSuspending()) {
        // don't run anything if we're being started while a suspend
        // has been requested
        suspended();//from   w w w. j  av  a  2 s.  c om
        return;
    }
    Exception suppressed = null;
    int retryCount = getRetryCount(); // may be 0
    for (int i = 0; i <= retryCount; i++) {
        if (i > 0) {
            log.debug("Retrying work due to concurrent update (" + i + "): " + this);
            log.trace("Concurrent update", suppressed);
        }
        Exception e = runWorkWithTransactionAndCheckExceptions();
        if (e == null) {
            // no exception, work is done
            return;
        }
        if (suppressed == null) {
            suppressed = e;
        } else {
            suppressed.addSuppressed(e);
        }
    }
    // all retries have been done, throw the exception
    if (suppressed != null) {
        if (suppressed instanceof RuntimeException) {
            throw (RuntimeException) suppressed;
        } else {
            throw new RuntimeException(suppressed);
        }
    }
}