Example usage for com.google.common.collect Sets newLinkedHashSetWithExpectedSize

List of usage examples for com.google.common.collect Sets newLinkedHashSetWithExpectedSize

Introduction

In this page you can find the example usage for com.google.common.collect Sets newLinkedHashSetWithExpectedSize.

Prototype

public static <E> LinkedHashSet<E> newLinkedHashSetWithExpectedSize(int expectedSize) 

Source Link

Document

Creates a LinkedHashSet instance, with a high enough "initial capacity" that it should hold expectedSize elements without growth.

Usage

From source file:org.apache.phoenix.index.IndexMaintainer.java

@Override
public void readFields(DataInput input) throws IOException {
    int encodedIndexSaltBucketsAndMultiTenant = WritableUtils.readVInt(input);
    isMultiTenant = encodedIndexSaltBucketsAndMultiTenant < 0;
    nIndexSaltBuckets = Math.abs(encodedIndexSaltBucketsAndMultiTenant) - 1;
    int encodedIndexedColumnsAndViewId = WritableUtils.readVInt(input);
    boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0;
    if (hasViewIndexId) {
        // Fixed length
        viewIndexId = new byte[MetaDataUtil.getViewIndexIdDataType().getByteSize()];
        input.readFully(viewIndexId);/*w  w w.j  a v  a  2  s  . co m*/
    }
    int nIndexedColumns = Math.abs(encodedIndexedColumnsAndViewId) - 1;
    indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexedColumns);
    for (int i = 0; i < nIndexedColumns; i++) {
        byte[] cf = Bytes.readByteArray(input);
        byte[] cq = Bytes.readByteArray(input);
        indexedColumns.add(new ColumnReference(cf, cq));
    }
    indexedColumnTypes = Lists.newArrayListWithExpectedSize(nIndexedColumns);
    for (int i = 0; i < nIndexedColumns; i++) {
        PDataType type = PDataType.values()[WritableUtils.readVInt(input)];
        indexedColumnTypes.add(type);
    }
    int encodedCoveredolumnsAndLocalIndex = WritableUtils.readVInt(input);
    isLocalIndex = encodedCoveredolumnsAndLocalIndex < 0;
    int nCoveredColumns = Math.abs(encodedCoveredolumnsAndLocalIndex) - 1;
    coveredColumns = Sets.newLinkedHashSetWithExpectedSize(nCoveredColumns);
    for (int i = 0; i < nCoveredColumns; i++) {
        byte[] cf = Bytes.readByteArray(input);
        byte[] cq = Bytes.readByteArray(input);
        coveredColumns.add(new ColumnReference(cf, cq));
    }
    // Hack to serialize whether the index row key is optimizable
    int len = WritableUtils.readVInt(input);
    if (len < 0) {
        rowKeyOrderOptimizable = false;
        len *= -1;
    } else {
        rowKeyOrderOptimizable = true;
    }
    indexTableName = new byte[len];
    input.readFully(indexTableName, 0, len);
    dataEmptyKeyValueCF = Bytes.readByteArray(input);
    len = WritableUtils.readVInt(input);
    //TODO remove this in the next major release
    boolean isNewClient = false;
    if (len < 0) {
        isNewClient = true;
        len = Math.abs(len);
    }
    byte[] emptyKeyValueCF = new byte[len];
    input.readFully(emptyKeyValueCF, 0, len);
    emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueCF);

    if (isNewClient) {
        int numIndexedExpressions = WritableUtils.readVInt(input);
        indexedExpressions = Lists.newArrayListWithExpectedSize(numIndexedExpressions);
        for (int i = 0; i < numIndexedExpressions; i++) {
            Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
            expression.readFields(input);
            indexedExpressions.add(expression);
        }
    } else {
        indexedExpressions = Lists.newArrayListWithExpectedSize(indexedColumns.size());
        Iterator<ColumnReference> colReferenceIter = indexedColumns.iterator();
        Iterator<PDataType> dataTypeIter = indexedColumnTypes.iterator();
        while (colReferenceIter.hasNext()) {
            ColumnReference colRef = colReferenceIter.next();
            final PDataType dataType = dataTypeIter.next();
            indexedExpressions.add(new KeyValueColumnExpression(new PDatum() {

                @Override
                public boolean isNullable() {
                    return true;
                }

                @Override
                public SortOrder getSortOrder() {
                    return SortOrder.getDefault();
                }

                @Override
                public Integer getScale() {
                    return null;
                }

                @Override
                public Integer getMaxLength() {
                    return null;
                }

                @Override
                public PDataType getDataType() {
                    return dataType;
                }
            }, colRef.getFamily(), colRef.getQualifier()));
        }
    }

    rowKeyMetaData = newRowKeyMetaData();
    rowKeyMetaData.readFields(input);
    int nDataCFs = WritableUtils.readVInt(input);
    // Encode indexWALDisabled in nDataCFs
    indexWALDisabled = nDataCFs < 0;
    this.nDataCFs = Math.abs(nDataCFs) - 1;
    int encodedEstimatedIndexRowKeyBytesAndImmutableRows = WritableUtils.readVInt(input);
    this.immutableRows = encodedEstimatedIndexRowKeyBytesAndImmutableRows < 0;
    this.estimatedIndexRowKeyBytes = Math.abs(encodedEstimatedIndexRowKeyBytesAndImmutableRows);
    initCachedState();
}

From source file:org.apache.phoenix.index.IndexMaintainer.java

/**
 * Init calculated state reading/creating
 *//*from w ww. j  ava2  s. c o m*/
private void initCachedState() {
    dataEmptyKeyValueRef = new ColumnReference(emptyKeyValueCFPtr.copyBytesIfNecessary(),
            QueryConstants.EMPTY_COLUMN_BYTES);

    indexQualifiers = Lists.newArrayListWithExpectedSize(this.coveredColumns.size());
    for (ColumnReference ref : coveredColumns) {
        indexQualifiers
                .add(new ImmutableBytesPtr(IndexUtil.getIndexColumnName(ref.getFamily(), ref.getQualifier())));
    }

    this.allColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size() + coveredColumns.size());
    // columns that are required to evaluate all expressions in indexedExpressions (not including columns in data row key)
    this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size());
    for (Expression expression : indexedExpressions) {
        KeyValueExpressionVisitor visitor = new KeyValueExpressionVisitor() {
            @Override
            public Void visit(KeyValueColumnExpression expression) {
                indexedColumns
                        .add(new ColumnReference(expression.getColumnFamily(), expression.getColumnName()));
                indexedColumnTypes.add(expression.getDataType());
                return null;
            }
        };
        expression.accept(visitor);
    }
    allColumns.addAll(indexedColumns);
    allColumns.addAll(coveredColumns);

    int dataPkOffset = (isDataTableSalted ? 1 : 0) + (isMultiTenant ? 1 : 0);
    int nIndexPkColumns = getIndexPkColumnCount();
    dataPkPosition = new int[nIndexPkColumns];
    Arrays.fill(dataPkPosition, EXPRESSION_NOT_PRESENT);
    int numViewConstantColumns = 0;
    BitSet viewConstantColumnBitSet = rowKeyMetaData.getViewConstantColumnBitSet();
    for (int i = dataPkOffset; i < dataRowKeySchema.getFieldCount(); i++) {
        if (!viewConstantColumnBitSet.get(i)) {
            int indexPkPosition = rowKeyMetaData.getIndexPkPosition(i - dataPkOffset);
            this.dataPkPosition[indexPkPosition] = i;
        } else {
            numViewConstantColumns++;
        }
    }

    // Calculate the max number of trailing nulls that we should get rid of after building the index row key.
    // We only get rid of nulls for variable length types, so we have to be careful to consider the type of the
    // index table, not the data type of the data table
    int expressionsPos = indexedExpressions.size();
    int indexPkPos = nIndexPkColumns - numViewConstantColumns - 1;
    while (indexPkPos >= 0) {
        int dataPkPos = dataPkPosition[indexPkPos];
        boolean isDataNullable;
        PDataType dataType;
        if (dataPkPos == EXPRESSION_NOT_PRESENT) {
            isDataNullable = true;
            dataType = indexedExpressions.get(--expressionsPos).getDataType();
        } else {
            Field dataField = dataRowKeySchema.getField(dataPkPos);
            dataType = dataField.getDataType();
            isDataNullable = dataField.isNullable();
        }
        PDataType indexDataType = IndexUtil.getIndexColumnDataType(isDataNullable, dataType);
        if (indexDataType.isFixedWidth()) {
            break;
        }
        indexPkPos--;
    }
    maxTrailingNulls = nIndexPkColumns - indexPkPos - 1;
}

From source file:org.apache.phoenix.schema.MetaDataClient.java

/**
 * Create an index table by morphing the CreateIndexStatement into a CreateTableStatement and calling
 * MetaDataClient.createTable. In doing so, we perform the following translations:
 * 1) Change the type of any columns being indexed to types that support null if the column is nullable.
 *    For example, a BIGINT type would be coerced to a DECIMAL type, since a DECIMAL type supports null
 *    when it's in the row key while a BIGINT does not.
 * 2) Append any row key column from the data table that is not in the indexed column list. Our indexes
 *    rely on having a 1:1 correspondence between the index and data rows.
 * 3) Change the name of the columns to include the column family. For example, if you have a column
 *    named "B" in a column family named "A", the indexed column name will be "A:B". This makes it easy
 *    to translate the column references in a query to the correct column references in an index table
 *    regardless of whether the column reference is prefixed with the column family name or not. It also
 *    has the side benefit of allowing the same named column in different column families to both be
 *    listed as an index column./*w ww  .j a v a 2  s. c  o  m*/
 * @param statement
 * @param splits
 * @return MutationState from population of index table from data table
 * @throws SQLException
 */
public MutationState createIndex(CreateIndexStatement statement, byte[][] splits) throws SQLException {
    IndexKeyConstraint ik = statement.getIndexConstraint();
    TableName indexTableName = statement.getIndexTableName();

    List<Pair<ParseNode, SortOrder>> indexParseNodeAndSortOrderList = ik.getParseNodeAndSortOrderList();
    List<ColumnName> includedColumns = statement.getIncludeColumns();
    TableRef tableRef = null;
    PTable table = null;
    boolean retry = true;
    Short indexId = null;
    boolean allocateIndexId = false;
    boolean isLocalIndex = statement.getIndexType() == IndexType.LOCAL;
    int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
    if (isLocalIndex) {
        if (!connection.getQueryServices().getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB,
                QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES)
                    .setTableName(indexTableName.getTableName()).build().buildException();
        }
        if (!connection.getQueryServices().supportsFeature(Feature.LOCAL_INDEX)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_LOCAL_INDEXES)
                    .setTableName(indexTableName.getTableName()).build().buildException();
        }
    }
    while (true) {
        try {
            ColumnResolver resolver = FromCompiler.getResolver(statement, connection,
                    statement.getUdfParseNodes());
            tableRef = resolver.getTables().get(0);
            PTable dataTable = tableRef.getTable();
            boolean isTenantConnection = connection.getTenantId() != null;
            if (isTenantConnection) {
                if (dataTable.getType() != PTableType.VIEW) {
                    throw new SQLFeatureNotSupportedException(
                            "An index may only be created for a VIEW through a tenant-specific connection");
                }
            }
            if (!dataTable.isImmutableRows()) {
                if (hbaseVersion < PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES)
                            .setTableName(indexTableName.getTableName()).build().buildException();
                }
                if (connection.getQueryServices().hasInvalidIndexConfiguration()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG)
                            .setTableName(indexTableName.getTableName()).build().buildException();
                }
            }
            int posOffset = 0;
            List<PColumn> pkColumns = dataTable.getPKColumns();
            Set<RowKeyColumnExpression> unusedPkColumns;
            if (dataTable.getBucketNum() != null) { // Ignore SALT column
                unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size() - 1);
                posOffset++;
            } else {
                unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size());
            }
            for (int i = posOffset; i < pkColumns.size(); i++) {
                PColumn column = pkColumns.get(i);
                unusedPkColumns.add(new RowKeyColumnExpression(column, new RowKeyValueAccessor(pkColumns, i),
                        "\"" + column.getName().getString() + "\""));
            }
            List<ColumnDefInPkConstraint> allPkColumns = Lists
                    .newArrayListWithExpectedSize(unusedPkColumns.size());
            List<ColumnDef> columnDefs = Lists.newArrayListWithExpectedSize(
                    includedColumns.size() + indexParseNodeAndSortOrderList.size());

            if (dataTable.isMultiTenant()) {
                // Add tenant ID column as first column in index
                PColumn col = dataTable.getPKColumns().get(posOffset);
                RowKeyColumnExpression columnExpression = new RowKeyColumnExpression(col,
                        new RowKeyValueAccessor(pkColumns, posOffset), col.getName().getString());
                unusedPkColumns.remove(columnExpression);
                PDataType dataType = IndexUtil.getIndexColumnDataType(col);
                ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                allPkColumns.add(new ColumnDefInPkConstraint(colName, col.getSortOrder(), false));
                columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(),
                        col.getMaxLength(), col.getScale(), false, SortOrder.getDefault(),
                        col.getName().getString(), col.isRowTimestamp()));
            }
            /*
             * Allocate an index ID in two circumstances:
             * 1) for a local index, as all local indexes will reside in the same HBase table
             * 2) for a view on an index.
             */
            if (isLocalIndex
                    || (dataTable.getType() == PTableType.VIEW && dataTable.getViewType() != ViewType.MAPPED)) {
                allocateIndexId = true;
                // Next add index ID column
                PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
                ColumnName colName = ColumnName
                        .caseSensitiveColumnName(MetaDataUtil.getViewIndexIdColumnName());
                allPkColumns.add(new ColumnDefInPkConstraint(colName, SortOrder.getDefault(), false));
                columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), false, null, null, false,
                        SortOrder.getDefault(), null, false));
            }

            PhoenixStatement phoenixStatment = new PhoenixStatement(connection);
            StatementContext context = new StatementContext(phoenixStatment, resolver);
            IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context);
            Set<ColumnName> indexedColumnNames = Sets
                    .newHashSetWithExpectedSize(indexParseNodeAndSortOrderList.size());
            for (Pair<ParseNode, SortOrder> pair : indexParseNodeAndSortOrderList) {
                ParseNode parseNode = pair.getFirst();
                // normalize the parse node
                parseNode = StatementNormalizer.normalize(parseNode, resolver);
                // compile the parseNode to get an expression
                expressionIndexCompiler.reset();
                Expression expression = parseNode.accept(expressionIndexCompiler);
                if (expressionIndexCompiler.isAggregate()) {
                    throw new SQLExceptionInfo.Builder(
                            SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX).build()
                                    .buildException();
                }
                if (expression.getDeterminism() != Determinism.ALWAYS) {
                    throw new SQLExceptionInfo.Builder(
                            SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX).build()
                                    .buildException();
                }
                if (expression.isStateless()) {
                    throw new SQLExceptionInfo.Builder(
                            SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX).build()
                                    .buildException();
                }
                unusedPkColumns.remove(expression);

                // Go through parse node to get string as otherwise we
                // can lose information during compilation
                StringBuilder buf = new StringBuilder();
                parseNode.toSQL(resolver, buf);
                // need to escape backslash as this expression will be re-parsed later
                String expressionStr = StringUtil.escapeBackslash(buf.toString());

                ColumnName colName = null;
                ColumnRef colRef = expressionIndexCompiler.getColumnRef();
                boolean isRowTimestamp = false;
                if (colRef != null) {
                    // if this is a regular column
                    PColumn column = colRef.getColumn();
                    String columnFamilyName = column.getFamilyName() != null
                            ? column.getFamilyName().getString()
                            : null;
                    colName = ColumnName.caseSensitiveColumnName(
                            IndexUtil.getIndexColumnName(columnFamilyName, column.getName().getString()));
                    isRowTimestamp = column.isRowTimestamp();
                } else {
                    // if this is an expression
                    // TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is fixed
                    String name = expressionStr.replaceAll("\"", "'");
                    colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, name));
                }
                indexedColumnNames.add(colName);
                PDataType dataType = IndexUtil.getIndexColumnDataType(expression.isNullable(),
                        expression.getDataType());
                allPkColumns.add(new ColumnDefInPkConstraint(colName, pair.getSecond(), isRowTimestamp));
                columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), expression.isNullable(),
                        expression.getMaxLength(), expression.getScale(), false, pair.getSecond(),
                        expressionStr, isRowTimestamp));
            }

            // Next all the PK columns from the data table that aren't indexed
            if (!unusedPkColumns.isEmpty()) {
                for (RowKeyColumnExpression colExpression : unusedPkColumns) {
                    PColumn col = dataTable.getPKColumns().get(colExpression.getPosition());
                    // Don't add columns with constant values from updatable views, as
                    // we don't need these in the index
                    if (col.getViewConstant() == null) {
                        ColumnName colName = ColumnName
                                .caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                        allPkColumns.add(new ColumnDefInPkConstraint(colName, colExpression.getSortOrder(),
                                col.isRowTimestamp()));
                        PDataType dataType = IndexUtil.getIndexColumnDataType(colExpression.isNullable(),
                                colExpression.getDataType());
                        columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(),
                                colExpression.isNullable(), colExpression.getMaxLength(),
                                colExpression.getScale(), false, colExpression.getSortOrder(),
                                colExpression.toString(), col.isRowTimestamp()));
                    }
                }
            }

            // Last all the included columns (minus any PK columns)
            for (ColumnName colName : includedColumns) {
                PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName())
                        .getColumn();
                colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                // Check for duplicates between indexed and included columns
                if (indexedColumnNames.contains(colName)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build()
                            .buildException();
                }
                if (!SchemaUtil.isPKColumn(col) && col.getViewConstant() == null) {
                    // Need to re-create ColumnName, since the above one won't have the column family name
                    colName = ColumnName.caseSensitiveColumnName(col.getFamilyName().getString(),
                            IndexUtil.getIndexColumnName(col));
                    columnDefs.add(FACTORY.columnDef(colName, col.getDataType().getSqlTypeName(),
                            col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder(),
                            null, col.isRowTimestamp()));
                }
            }

            // Don't re-allocate indexId on ConcurrentTableMutationException,
            // as there's no need to burn another sequence value.
            if (allocateIndexId && indexId == null) {
                Long scn = connection.getSCN();
                long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
                PName tenantId = connection.getTenantId();
                String tenantIdStr = tenantId == null ? null : connection.getTenantId().getString();
                PName physicalName = dataTable.getPhysicalName();
                int nSequenceSaltBuckets = connection.getQueryServices().getSequenceSaltBuckets();
                SequenceKey key = MetaDataUtil.getViewIndexSequenceKey(tenantIdStr, physicalName,
                        nSequenceSaltBuckets);
                // Create at parent timestamp as we know that will be earlier than now
                // and earlier than any SCN if one is set.
                createSequence(key.getTenantId(), key.getSchemaName(), key.getSequenceName(), true,
                        Short.MIN_VALUE, 1, 1, false, Long.MIN_VALUE, Long.MAX_VALUE, dataTable.getTimeStamp());
                long[] seqValues = new long[1];
                SQLException[] sqlExceptions = new SQLException[1];
                connection.getQueryServices().incrementSequences(
                        Collections.singletonList(new SequenceAllocation(key, 1)),
                        Math.max(timestamp, dataTable.getTimeStamp()), seqValues, sqlExceptions);
                if (sqlExceptions[0] != null) {
                    throw sqlExceptions[0];
                }
                long seqValue = seqValues[0];
                if (seqValue > Short.MAX_VALUE) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.TOO_MANY_INDEXES)
                            .setSchemaName(SchemaUtil.getSchemaNameFromFullName(physicalName.getString()))
                            .setTableName(SchemaUtil.getTableNameFromFullName(physicalName.getString())).build()
                            .buildException();
                }
                indexId = (short) seqValue;
            }
            // Set DEFAULT_COLUMN_FAMILY_NAME of index to match data table
            // We need this in the props so that the correct column family is created
            if (dataTable.getDefaultFamilyName() != null && dataTable.getType() != PTableType.VIEW
                    && indexId == null) {
                statement.getProps().put("", new Pair<String, Object>(DEFAULT_COLUMN_FAMILY_NAME,
                        dataTable.getDefaultFamilyName().getString()));
            }
            PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns);
            CreateTableStatement tableStatement = FACTORY.createTable(indexTableName, statement.getProps(),
                    columnDefs, pk, statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null,
                    null, statement.getBindCount());
            table = createTableInternal(tableStatement, splits, dataTable, null, null, null, null, indexId,
                    statement.getIndexType());
            break;
        } catch (ConcurrentTableMutationException e) { // Can happen if parent data table changes while above is in progress
            if (retry) {
                retry = false;
                continue;
            }
            throw e;
        }
    }
    if (table == null) {
        return new MutationState(0, connection);
    }

    // In async process, we return immediately as the MR job needs to be triggered .
    if (statement.isAsync()) {
        return new MutationState(0, connection);
    }

    // If our connection is at a fixed point-in-time, we need to open a new
    // connection so that our new index table is visible.
    if (connection.getSCN() != null) {
        return buildIndexAtTimeStamp(table, statement.getTable());
    }
    return buildIndex(table, tableRef);
}

From source file:com.cinchapi.concourse.server.ConcourseServer.java

@Override
@AutoRetry//  w  ww.j av  a2s . com
@Atomic
@ThrowsThriftExceptions
public long findOrAddKeyValue(String key, TObject value, AccessToken creds, TransactionToken transaction,
        String environment) throws TException {
    checkAccess(creds, transaction);
    AtomicSupport store = getStore(transaction, environment);
    AtomicOperation atomic = null;
    Set<Long> records = Sets.newLinkedHashSetWithExpectedSize(1);
    while (atomic == null || !atomic.commit()) {
        atomic = store.startAtomicOperation();
        try {
            records.addAll(atomic.find(key, Operator.EQUALS, value));
            if (records.isEmpty()) {
                long record = Time.now();
                addIfEmptyAtomic(key, value, record, atomic);
                records.add(record);
            }
        } catch (AtomicStateException e) {
            records.clear();
            atomic = null;
        }
    }
    if (records.size() == 1) {
        return Iterables.getOnlyElement(records);
    } else {
        throw new DuplicateEntryException(com.cinchapi.concourse.util.Strings.joinWithSpace("Found",
                records.size(), "records that match", key, "=", value));
    }
}