Example usage for com.google.common.primitives Longs compare

List of usage examples for com.google.common.primitives Longs compare

Introduction

In this page you can find the example usage for com.google.common.primitives Longs compare.

Prototype

public static int compare(long a, long b) 

Source Link

Document

Compares the two specified long values.

Usage

From source file:org.bitcoinj_extra.core.Coin.java

@Override
public int compareTo(final Coin other) {
    return Longs.compare(this.value, other.value);
}

From source file:org.obm.push.mail.EmailChanges.java

private Iterable<EmailPartitionEntry> toEntries() {
    return FluentIterable
            .from(Iterables.concat(/*w ww  .  j  a v  a  2  s .c om*/
                    FluentIterable.from(additions).transform(new EntryProducer(EmailPartitionEntry.Type.ADD)),
                    FluentIterable.from(changes).transform(new EntryProducer(EmailPartitionEntry.Type.CHANGE)),
                    FluentIterable.from(deletions)
                            .transform(new EntryProducer(EmailPartitionEntry.Type.DELETION))))
            .toSortedList(new Comparator<EmailPartitionEntry>() {

                @Override
                public int compare(EmailPartitionEntry o1, EmailPartitionEntry o2) {
                    return Longs.compare(o2.getEmail().getUid(), o1.getEmail().getUid());
                }
            });
}

From source file:org.apache.cassandra.io.compress.CompressionMetadata.java

/**
 * @param sections Collection of sections in uncompressed file
 * @return Array of chunks which corresponds to given sections of uncompressed file, sorted by chunk offset
 *///from w  w w  .  j a v  a2s  .co  m
public Chunk[] getChunksForSections(Collection<Pair<Long, Long>> sections) {
    // use SortedSet to eliminate duplicates and sort by chunk offset
    SortedSet<Chunk> offsets = new TreeSet<Chunk>(new Comparator<Chunk>() {
        public int compare(Chunk o1, Chunk o2) {
            return Longs.compare(o1.offset, o2.offset);
        }
    });
    for (Pair<Long, Long> section : sections) {
        int startIndex = (int) (section.left / parameters.chunkLength());
        int endIndex = (int) (section.right / parameters.chunkLength());
        endIndex = section.right % parameters.chunkLength() == 0 ? endIndex - 1 : endIndex;
        for (int i = startIndex; i <= endIndex; i++) {
            long offset = i * 8L;
            long chunkOffset = chunkOffsets.getLong(offset);
            long nextChunkOffset = offset + 8 == chunkOffsetsSize ? compressedFileLength
                    : chunkOffsets.getLong(offset + 8);
            offsets.add(new Chunk(chunkOffset, (int) (nextChunkOffset - chunkOffset - 4))); // "4" bytes reserved for checksum
        }
    }
    return offsets.toArray(new Chunk[offsets.size()]);
}

From source file:org.apache.phoenix.schema.types.PDataType.java

public final int compareTo(byte[] lhs, int lhsOffset, int lhsLength, SortOrder lhsSortOrder, byte[] rhs,
        int rhsOffset, int rhsLength, SortOrder rhsSortOrder, PDataType rhsType) {
    Preconditions.checkNotNull(lhsSortOrder);
    Preconditions.checkNotNull(rhsSortOrder);
    if (this.isBytesComparableWith(rhsType)) { // directly compare the bytes
        // Special case as we may be comparing two arrays that have different separator characters due to PHOENIX-2067
        if (!this.isArrayType() || !rhsType.isArrayType()
                || PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset,
                        lhsLength) == PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs,
                                rhsOffset, rhsLength)) {
            return compareTo(lhs, lhsOffset, lhsLength, lhsSortOrder, rhs, rhsOffset, rhsLength, rhsSortOrder);
        }//from   www. j av a2  s .co m
    }
    PDataCodec lhsCodec = this.getCodec();
    if (lhsCodec == null) {
        byte[] rhsConverted;
        Object o = this.toObject(rhs, rhsOffset, rhsLength, rhsType, rhsSortOrder);

        // No lhs native type representation, so convert rhsType to bytes representation of lhs type
        // Due to PHOENIX-2067, favor the array that is already in the new format so we don't have to convert both.
        if (this.isArrayType() && PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset,
                lhsLength) == PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs, rhsOffset,
                        rhsLength)) {
            rhsConverted = ((PArrayDataType) this).toBytes(o, PArrayDataType.arrayBaseType(this), lhsSortOrder,
                    PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs, lhsOffset, lhsLength));
        } else {
            rhsConverted = this.toBytes(o);
            if (rhsSortOrder == SortOrder.DESC) {
                rhsSortOrder = SortOrder.ASC;
            }
            if (lhsSortOrder == SortOrder.DESC) {
                lhs = SortOrder.invert(lhs, lhsOffset, new byte[lhsLength], 0, lhsLength);
            }
        }
        return Bytes.compareTo(lhs, lhsOffset, lhsLength, rhsConverted, 0, rhsConverted.length);
    }
    PDataCodec rhsCodec = rhsType.getCodec();
    if (rhsCodec == null) {
        byte[] lhsConverted;
        Object o = rhsType.toObject(lhs, lhsOffset, lhsLength, this, lhsSortOrder);

        // No rhs native type representation, so convert lhsType to bytes representation of rhs type
        // Due to PHOENIX-2067, favor the array that is already in the new format so we don't have to convert both.
        if (rhsType.isArrayType() && PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs,
                rhsOffset, rhsLength) == PArrayDataType.isRowKeyOrderOptimized(this, lhsSortOrder, lhs,
                        lhsOffset, lhsLength)) {
            lhsConverted = ((PArrayDataType) rhsType).toBytes(o, PArrayDataType.arrayBaseType(rhsType),
                    rhsSortOrder,
                    PArrayDataType.isRowKeyOrderOptimized(rhsType, rhsSortOrder, rhs, rhsOffset, rhsLength));
        } else {
            lhsConverted = rhsType.toBytes(o);
            if (lhsSortOrder == SortOrder.DESC) {
                lhsSortOrder = SortOrder.ASC;
            }
            if (rhsSortOrder == SortOrder.DESC) {
                rhs = SortOrder.invert(rhs, rhsOffset, new byte[rhsLength], 0, rhsLength);
            }
        }
        return Bytes.compareTo(lhsConverted, 0, lhsConverted.length, rhs, rhsOffset, rhsLength);
    }
    // convert to native and compare
    if (this.isCoercibleTo(PLong.INSTANCE) && rhsType.isCoercibleTo(PLong.INSTANCE)) { // native long to long
                                                                                       // comparison
        return Longs.compare(this.getCodec().decodeLong(lhs, lhsOffset, lhsSortOrder),
                rhsType.getCodec().decodeLong(rhs, rhsOffset, rhsSortOrder));
    } else if (isDoubleOrFloat(this) && isDoubleOrFloat(rhsType)) { // native double to double comparison
        return Doubles.compare(this.getCodec().decodeDouble(lhs, lhsOffset, lhsSortOrder),
                rhsType.getCodec().decodeDouble(rhs, rhsOffset, rhsSortOrder));
    } else { // native float/double to long comparison
        float fvalue = 0.0F;
        double dvalue = 0.0;
        long lvalue = 0;
        boolean isFloat = false;
        int invert = 1;

        if (this.isCoercibleTo(PLong.INSTANCE)) {
            lvalue = this.getCodec().decodeLong(lhs, lhsOffset, lhsSortOrder);
        } else if (this.getClass() == PFloat.class) {
            isFloat = true;
            fvalue = this.getCodec().decodeFloat(lhs, lhsOffset, lhsSortOrder);
        } else if (this.isCoercibleTo(PDouble.INSTANCE)) {
            dvalue = this.getCodec().decodeDouble(lhs, lhsOffset, lhsSortOrder);
        }
        if (rhsType.isCoercibleTo(PLong.INSTANCE)) {
            lvalue = rhsType.getCodec().decodeLong(rhs, rhsOffset, rhsSortOrder);
        } else if (rhsType == PFloat.INSTANCE) {
            invert = -1;
            isFloat = true;
            fvalue = rhsType.getCodec().decodeFloat(rhs, rhsOffset, rhsSortOrder);
        } else if (rhsType.isCoercibleTo(PDouble.INSTANCE)) {
            invert = -1;
            dvalue = rhsType.getCodec().decodeDouble(rhs, rhsOffset, rhsSortOrder);
        }
        // Invert the comparison if float/double value is on the RHS
        return invert * (isFloat ? compareFloatToLong(fvalue, lvalue) : compareDoubleToLong(dvalue, lvalue));
    }
}

From source file:org.jclouds.ecs.domain.ServerImage.java

@Override
public int compareTo(ServerImage that) {
    if (that == null)
        return 1;
    if (this == that)
        return 0;
    return Longs.compare(id, that.getId());
}

From source file:org.apache.bookkeeper.mledger.impl.EntryCacheImpl.java

@Override
public int compareTo(EntryCache other) {
    return Longs.compare(getSize(), other.getSize());
}

From source file:org.apache.phoenix.execute.PhoenixTxnIndexMutationGenerator.java

private void processRollback(IndexMaintainer maintainer, byte[] txRollbackAttribute, ResultScanner scanner,
        Set<ColumnReference> mutableColumns, Collection<Mutation> indexUpdates,
        Map<ImmutableBytesPtr, MultiMutation> mutations, boolean replyWrite, final PTable table)
        throws IOException, SQLException {
    if (scanner != null) {
        Result result;/* w  ww  .  j a va2 s .co  m*/
        // Loop through last committed row state plus all new rows associated with current transaction
        // to generate point delete markers for all index rows that were added. We don't have Tephra
        // manage index rows in change sets because we don't want to be hit with the additional
        // memory hit and do not need to do conflict detection on index rows.
        ColumnReference emptyColRef = new ColumnReference(maintainer.getDataEmptyKeyValueCF(),
                maintainer.getEmptyKeyValueQualifier());
        while ((result = scanner.next()) != null) {
            Mutation m = mutations.remove(new ImmutableBytesPtr(result.getRow()));
            // Sort by timestamp, type, cf, cq so we can process in time batches from oldest to newest
            // (as if we're "replaying" them in time order).
            List<Cell> cells = result.listCells();
            Collections.sort(cells, new Comparator<Cell>() {

                @Override
                public int compare(Cell o1, Cell o2) {
                    int c = Longs.compare(o1.getTimestamp(), o2.getTimestamp());
                    if (c != 0)
                        return c;
                    c = o1.getTypeByte() - o2.getTypeByte();
                    if (c != 0)
                        return c;
                    c = Bytes.compareTo(o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength(),
                            o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength());
                    if (c != 0)
                        return c;
                    return Bytes.compareTo(o1.getQualifierArray(), o1.getQualifierOffset(),
                            o1.getQualifierLength(), o1.getQualifierArray(), o1.getQualifierOffset(),
                            o1.getQualifierLength());
                }

            });
            int i = 0;
            int nCells = cells.size();
            Result oldResult = null, newResult;
            long readPtr = phoenixTransactionContext.getReadPointer();
            do {
                boolean hasPuts = false;
                LinkedList<Cell> singleTimeCells = Lists.newLinkedList();
                long writePtr;
                Cell cell = cells.get(i);
                do {
                    hasPuts |= cell.getTypeByte() == KeyValue.Type.Put.getCode();
                    writePtr = cell.getTimestamp();
                    ListIterator<Cell> it = singleTimeCells.listIterator();
                    do {
                        // Add at the beginning of the list to match the expected HBase
                        // newest to oldest sort order (which TxTableState relies on
                        // with the Result.getLatestColumnValue() calls). However, we
                        // still want to add Cells in the expected order for each time
                        // bound as otherwise we won't find it in our old state.
                        it.add(cell);
                    } while (++i < nCells && (cell = cells.get(i)).getTimestamp() == writePtr);
                } while (i < nCells && cell.getTimestamp() <= readPtr);

                // Generate point delete markers for the prior row deletion of the old index value.
                // The write timestamp is the next timestamp, not the current timestamp,
                // as the earliest cells are the current values for the row (and we don't
                // want to delete the current row).
                if (oldResult != null) {
                    TxTableState state = new TxTableState(mutableColumns, writePtr, m, emptyColRef, oldResult);
                    generateDeletes(indexUpdates, txRollbackAttribute, state, maintainer, replyWrite, table);
                }
                // Generate point delete markers for the new index value.
                // If our time batch doesn't have Puts (i.e. we have only Deletes), then do not
                // generate deletes. We would have generated the delete above based on the state
                // of the previous row. The delete markers do not give us the state we need to
                // delete.
                if (hasPuts) {
                    newResult = Result.create(singleTimeCells);
                    // First row may represent the current state which we don't want to delete
                    if (writePtr > readPtr) {
                        TxTableState state = new TxTableState(mutableColumns, writePtr, m, emptyColRef,
                                newResult);
                        generateDeletes(indexUpdates, txRollbackAttribute, state, maintainer, replyWrite,
                                table);
                    }
                    oldResult = newResult;
                } else {
                    oldResult = null;
                }
            } while (i < nCells);
        }
    }
}

From source file:org.apache.phoenix.execute.PhoenixTxIndexMutationGenerator.java

private void processRollback(PhoenixIndexMetaData indexMetaData, byte[] txRollbackAttribute,
        ResultScanner scanner, Set<ColumnReference> mutableColumns,
        Collection<Pair<Mutation, byte[]>> indexUpdates, Map<ImmutableBytesPtr, MultiMutation> mutations)
        throws IOException {
    if (scanner != null) {
        long readPtr = indexMetaData.getTransactionContext().getReadPointer();
        Result result;/*from  w  ww  .jav  a 2s.com*/
        // Loop through last committed row state plus all new rows associated with current transaction
        // to generate point delete markers for all index rows that were added. We don't have Tephra
        // manage index rows in change sets because we don't want to be hit with the additional
        // memory hit and do not need to do conflict detection on index rows.
        ColumnReference emptyColRef = new ColumnReference(
                indexMetaData.getIndexMaintainers().get(0).getDataEmptyKeyValueCF(),
                indexMetaData.getIndexMaintainers().get(0).getEmptyKeyValueQualifier());
        while ((result = scanner.next()) != null) {
            Mutation m = mutations.remove(new ImmutableBytesPtr(result.getRow()));
            // Sort by timestamp, type, cf, cq so we can process in time batches from oldest to newest
            // (as if we're "replaying" them in time order).
            List<Cell> cells = result.listCells();
            Collections.sort(cells, new Comparator<Cell>() {

                @Override
                public int compare(Cell o1, Cell o2) {
                    int c = Longs.compare(o1.getTimestamp(), o2.getTimestamp());
                    if (c != 0)
                        return c;
                    c = o1.getTypeByte() - o2.getTypeByte();
                    if (c != 0)
                        return c;
                    c = Bytes.compareTo(o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength(),
                            o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength());
                    if (c != 0)
                        return c;
                    return Bytes.compareTo(o1.getQualifierArray(), o1.getQualifierOffset(),
                            o1.getQualifierLength(), o1.getQualifierArray(), o1.getQualifierOffset(),
                            o1.getQualifierLength());
                }

            });
            int i = 0;
            int nCells = cells.size();
            Result oldResult = null, newResult;
            do {
                boolean hasPuts = false;
                LinkedList<Cell> singleTimeCells = Lists.newLinkedList();
                long writePtr;
                Cell cell = cells.get(i);
                do {
                    hasPuts |= cell.getTypeByte() == KeyValue.Type.Put.getCode();
                    writePtr = cell.getTimestamp();
                    ListIterator<Cell> it = singleTimeCells.listIterator();
                    do {
                        // Add at the beginning of the list to match the expected HBase
                        // newest to oldest sort order (which TxTableState relies on
                        // with the Result.getLatestColumnValue() calls). However, we
                        // still want to add Cells in the expected order for each time
                        // bound as otherwise we won't find it in our old state.
                        it.add(cell);
                    } while (++i < nCells && (cell = cells.get(i)).getTimestamp() == writePtr);
                } while (i < nCells && cell.getTimestamp() <= readPtr);

                // Generate point delete markers for the prior row deletion of the old index value.
                // The write timestamp is the next timestamp, not the current timestamp,
                // as the earliest cells are the current values for the row (and we don't
                // want to delete the current row).
                if (oldResult != null) {
                    TxTableState state = new TxTableState(mutableColumns, writePtr, m, emptyColRef, oldResult);
                    generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
                }
                // Generate point delete markers for the new index value.
                // If our time batch doesn't have Puts (i.e. we have only Deletes), then do not
                // generate deletes. We would have generated the delete above based on the state
                // of the previous row. The delete markers do not give us the state we need to
                // delete.
                if (hasPuts) {
                    newResult = Result.create(singleTimeCells);
                    // First row may represent the current state which we don't want to delete
                    if (writePtr > readPtr) {
                        TxTableState state = new TxTableState(mutableColumns, writePtr, m, emptyColRef,
                                newResult);
                        generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
                    }
                    oldResult = newResult;
                } else {
                    oldResult = null;
                }
            } while (i < nCells);
        }
    }
}

From source file:org.grouplens.lenskit.vectors.SparseVector.java

/**
 * Get the keys of this vector sorted by the value of the items
 * stored for each key.//from   w w  w .java 2  s .  c  om
 *
 * @param decreasing If {@code true}, sort in decreasing order.
 * @return The sorted list of keys of this vector.
 */
public LongArrayList keysByValue(boolean decreasing) {
    long[] skeys = keySet().toLongArray();

    LongComparator cmp;
    // Set up the comparator. We use the key as a secondary comparison to get
    // a reproducible sort irrespective of sorting algorithm.
    if (decreasing) {
        cmp = new AbstractLongComparator() {
            @Override
            public int compare(long k1, long k2) {
                int c = Double.compare(get(k2), get(k1));
                if (c != 0) {
                    return c;
                } else {
                    return Longs.compare(k1, k2);
                }
            }
        };
    } else {
        cmp = new AbstractLongComparator() {
            @Override
            public int compare(long k1, long k2) {
                int c = Double.compare(get(k1), get(k2));
                if (c != 0) {
                    return c;
                } else {
                    return Longs.compare(k1, k2);
                }
            }
        };
    }

    LongArrays.quickSort(skeys, cmp);
    return LongArrayList.wrap(skeys);
}

From source file:com.b2international.snowowl.snomed.exporter.server.net4j.SnomedExportServerIndication.java

private void executeDeltaExport(final RevisionIndex revisionIndex, final OMMonitor monitor) {

    if (startEffectiveTime == null && endEffectiveTime == null) {

        executeExport(revisionIndex, branchPath, true, monitor);

    } else {/*from   w  ww. ja v a2 s  .  c om*/

        List<CodeSystemVersionEntry> sortedVersions = FluentIterable.from(getCodeSystemVersions())
                .filter(new Predicate<CodeSystemVersionEntry>() {
                    @Override
                    public boolean apply(CodeSystemVersionEntry input) {

                        Date versionEffectiveDate = new Date(input.getEffectiveDate());

                        if (startEffectiveTime != null && endEffectiveTime != null) {
                            return (versionEffectiveDate.after(startEffectiveTime)
                                    || versionEffectiveDate.equals(startEffectiveTime))
                                    && (versionEffectiveDate.before(endEffectiveTime)
                                            || versionEffectiveDate.equals(endEffectiveTime));
                        } else if (startEffectiveTime == null) {
                            return versionEffectiveDate.before(endEffectiveTime)
                                    || versionEffectiveDate.equals(endEffectiveTime);
                        } else if (endEffectiveTime == null) {
                            return versionEffectiveDate.after(startEffectiveTime)
                                    || versionEffectiveDate.equals(startEffectiveTime);
                        }

                        return false;
                    }

                }).toSortedList((o1, o2) -> Longs.compare(o1.getEffectiveDate(), o2.getEffectiveDate()));

        if (sortedVersions.isEmpty()) {
            String message = null;
            if (startEffectiveTime != null && endEffectiveTime != null) {
                message = String.format("No version branch found to export between the effective dates %s - %s",
                        Dates.formatByHostTimeZone(startEffectiveTime),
                        Dates.formatByHostTimeZone(endEffectiveTime));
            } else if (startEffectiveTime == null) {
                message = String.format("No version branch found to export before the effective date %s",
                        Dates.formatByHostTimeZone(endEffectiveTime));
            } else if (endEffectiveTime == null) {
                message = String.format("No version branch found to export after the effective date %s",
                        Dates.formatByHostTimeZone(startEffectiveTime));
            }
            throw new BadRequestException(message);
        }

        List<String> versionBranchPaths = convertToBranchPaths(sortedVersions);

        for (String versionBranchPath : versionBranchPaths) {
            executeExport(revisionIndex, versionBranchPath, false, monitor);
        }

        if (includeUnpublished) {
            executeExport(revisionIndex, branchPath, true, monitor);
        }
    }

}