Example usage for com.google.common.primitives Longs compare

List of usage examples for com.google.common.primitives Longs compare

Introduction

In this page you can find the example usage for com.google.common.primitives Longs compare.

Prototype

public static int compare(long a, long b) 

Source Link

Document

Compares the two specified long values.

Usage

From source file:org.apache.druid.indexing.kafka.supervisor.KafkaSupervisor.java

/**
 * This method does two things -// ww w . j  av a 2 s  . c om
 * 1. Makes sure the checkpoints information in the taskGroup is consistent with that of the tasks, if not kill
 * inconsistent tasks.
 * 2. truncates the checkpoints in the taskGroup corresponding to which segments have been published, so that any newly
 * created tasks for the taskGroup start indexing from after the latest published offsets.
 */
private void verifyAndMergeCheckpoints(final TaskGroup taskGroup) {
    final int groupId = taskGroup.groupId;
    final List<Pair<String, TreeMap<Integer, Map<Integer, Long>>>> taskSequences = new ArrayList<>();
    final List<ListenableFuture<TreeMap<Integer, Map<Integer, Long>>>> futures = new ArrayList<>();
    final List<String> taskIds = new ArrayList<>();

    for (String taskId : taskGroup.taskIds()) {
        final ListenableFuture<TreeMap<Integer, Map<Integer, Long>>> checkpointsFuture = taskClient
                .getCheckpointsAsync(taskId, true);
        taskIds.add(taskId);
        futures.add(checkpointsFuture);
    }

    try {
        List<TreeMap<Integer, Map<Integer, Long>>> futuresResult = Futures.successfulAsList(futures)
                .get(futureTimeoutInSeconds, TimeUnit.SECONDS);

        for (int i = 0; i < futuresResult.size(); i++) {
            final TreeMap<Integer, Map<Integer, Long>> checkpoints = futuresResult.get(i);
            final String taskId = taskIds.get(i);
            if (checkpoints == null) {
                try {
                    // catch the exception in failed futures
                    futures.get(i).get();
                } catch (Exception e) {
                    log.error(e, "Problem while getting checkpoints for task [%s], killing the task", taskId);
                    killTask(taskId);
                    taskGroup.tasks.remove(taskId);
                }
            } else if (checkpoints.isEmpty()) {
                log.warn("Ignoring task [%s], as probably it is not started running yet", taskId);
            } else {
                taskSequences.add(new Pair<>(taskId, checkpoints));
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    final KafkaDataSourceMetadata latestDataSourceMetadata = (KafkaDataSourceMetadata) indexerMetadataStorageCoordinator
            .getDataSourceMetadata(dataSource);
    final boolean hasValidOffsetsFromDb = latestDataSourceMetadata != null
            && latestDataSourceMetadata.getKafkaPartitions() != null
            && ioConfig.getTopic().equals(latestDataSourceMetadata.getKafkaPartitions().getTopic());
    final Map<Integer, Long> latestOffsetsFromDb;
    if (hasValidOffsetsFromDb) {
        latestOffsetsFromDb = latestDataSourceMetadata.getKafkaPartitions().getPartitionOffsetMap();
    } else {
        latestOffsetsFromDb = null;
    }

    // order tasks of this taskGroup by the latest sequenceId
    taskSequences.sort((o1, o2) -> o2.rhs.firstKey().compareTo(o1.rhs.firstKey()));

    final Set<String> tasksToKill = new HashSet<>();
    final AtomicInteger earliestConsistentSequenceId = new AtomicInteger(-1);
    int taskIndex = 0;

    while (taskIndex < taskSequences.size()) {
        TreeMap<Integer, Map<Integer, Long>> taskCheckpoints = taskSequences.get(taskIndex).rhs;
        String taskId = taskSequences.get(taskIndex).lhs;
        if (earliestConsistentSequenceId.get() == -1) {
            // find the first replica task with earliest sequenceId consistent with datasource metadata in the metadata
            // store
            if (taskCheckpoints.entrySet().stream()
                    .anyMatch(sequenceCheckpoint -> sequenceCheckpoint.getValue().entrySet().stream()
                            .allMatch(partitionOffset -> Longs.compare(partitionOffset.getValue(),
                                    latestOffsetsFromDb == null ? partitionOffset.getValue()
                                            : latestOffsetsFromDb.getOrDefault(partitionOffset.getKey(),
                                                    partitionOffset.getValue())) == 0)
                            && earliestConsistentSequenceId.compareAndSet(-1, sequenceCheckpoint.getKey()))
                    || (pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() > 0
                            && earliestConsistentSequenceId.compareAndSet(-1, taskCheckpoints.firstKey()))) {
                final SortedMap<Integer, Map<Integer, Long>> latestCheckpoints = new TreeMap<>(
                        taskCheckpoints.tailMap(earliestConsistentSequenceId.get()));
                log.info("Setting taskGroup sequences to [%s] for group [%d]", latestCheckpoints, groupId);
                taskGroup.sequenceOffsets.clear();
                taskGroup.sequenceOffsets.putAll(latestCheckpoints);
            } else {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], latestoffsets from DB [%s]", taskId,
                        taskCheckpoints, latestOffsetsFromDb);
                tasksToKill.add(taskId);
            }
        } else {
            // check consistency with taskGroup sequences
            if (taskCheckpoints.get(taskGroup.sequenceOffsets.firstKey()) == null
                    || !(taskCheckpoints.get(taskGroup.sequenceOffsets.firstKey())
                            .equals(taskGroup.sequenceOffsets.firstEntry().getValue()))
                    || taskCheckpoints.tailMap(taskGroup.sequenceOffsets.firstKey())
                            .size() != taskGroup.sequenceOffsets.size()) {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], taskgroup checkpoints [%s]", taskId,
                        taskCheckpoints, taskGroup.sequenceOffsets);
                tasksToKill.add(taskId);
            }
        }
        taskIndex++;
    }

    if ((tasksToKill.size() > 0 && tasksToKill.size() == taskGroup.tasks.size()) || (taskGroup.tasks.size() == 0
            && pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() == 0)) {
        // killing all tasks or no task left in the group ?
        // clear state about the taskgroup so that get latest offset information is fetched from metadata store
        log.warn("Clearing task group [%d] information as no valid tasks left the group", groupId);
        taskGroups.remove(groupId);
        partitionGroups.get(groupId).replaceAll((partition, offset) -> NOT_SET);
    }

    taskSequences.stream().filter(taskIdSequences -> tasksToKill.contains(taskIdSequences.lhs))
            .forEach(sequenceCheckpoint -> {
                log.warn(
                        "Killing task [%s], as its checkpoints [%s] are not consistent with group checkpoints[%s] or latest "
                                + "persisted offsets in metadata store [%s]",
                        sequenceCheckpoint.lhs, sequenceCheckpoint.rhs, taskGroup.sequenceOffsets,
                        latestOffsetsFromDb);
                killTask(sequenceCheckpoint.lhs);
                taskGroup.tasks.remove(sequenceCheckpoint.lhs);
            });
}

From source file:com.google.bitcoin.core.PeerGroup.java

/**
 * Given a list of Peers, return a Peer to be used as the download peer. If you don't want PeerGroup to manage
 * download peer statuses for you, just override this and always return null.
 */// ww w.  j a v  a2  s  .c o  m
@Nullable
protected Peer selectDownloadPeer(List<Peer> peers) {
    // Characteristics to select for in order of importance:
    //  - Chain height is reasonable (majority of nodes)
    //  - High enough protocol version for the features we want (but we'll settle for less)
    //  - Ping time.
    if (peers.isEmpty())
        return null;
    // Make sure we don't select a peer that is behind/synchronizing itself.
    int mostCommonChainHeight = getMostCommonChainHeight(peers);
    List<Peer> candidates = new ArrayList<Peer>();
    for (Peer peer : peers) {
        if (peer.getBestHeight() == mostCommonChainHeight)
            candidates.add(peer);
    }
    // Of the candidates, find the peers that meet the minimum protocol version we want to target. We could select
    // the highest version we've seen on the assumption that newer versions are always better but we don't want to
    // zap peers if they upgrade early. If we can't find any peers that have our preferred protocol version or
    // better then we'll settle for the highest we found instead.
    int highestVersion = 0, preferredVersion = 0;
    // If/when PREFERRED_VERSION is not equal to vMinRequiredProtocolVersion, reenable the last test in PeerGroupTest.downloadPeerSelection
    final int PREFERRED_VERSION = FilteredBlock.MIN_PROTOCOL_VERSION;
    for (Peer peer : candidates) {
        highestVersion = Math.max(peer.getPeerVersionMessage().clientVersion, highestVersion);
        preferredVersion = Math.min(highestVersion, PREFERRED_VERSION);
    }
    List<PeerAndPing> candidates2 = new ArrayList<PeerAndPing>();
    for (Peer peer : candidates) {
        if (peer.getPeerVersionMessage().clientVersion >= preferredVersion) {
            PeerAndPing pap = new PeerAndPing();
            pap.peer = peer;
            pap.pingTime = peer.getPingTime();
            candidates2.add(pap);
        }
    }
    // Sort by ping time.
    Collections.sort(candidates2, new Comparator<PeerAndPing>() {
        public int compare(PeerAndPing peerAndPing, PeerAndPing peerAndPing2) {
            return Longs.compare(peerAndPing.pingTime, peerAndPing2.pingTime);
        }
    });
    return candidates2.get(0).peer;
}

From source file:org.neoscoinj.core.PeerGroup.java

/**
 * Given a list of Peers, return a Peer to be used as the download peer. If you don't want PeerGroup to manage
 * download peer statuses for you, just override this and always return null.
 *//*from w w w.  j  a  v  a 2s.c o  m*/
@Nullable
protected Peer selectDownloadPeer(List<Peer> peers) {
    // Characteristics to select for in order of importance:
    //  - Chain height is reasonable (majority of nodes)
    //  - High enough protocol version for the features we want (but we'll settle for less)
    //  - Ping time.
    if (peers.isEmpty())
        return null;
    // Make sure we don't select a peer that is behind/synchronizing itself.
    int mostCommonChainHeight = getMostCommonChainHeight(peers);
    List<Peer> candidates = new ArrayList<Peer>();
    for (Peer peer : peers) {
        if (peer.getBestHeight() == mostCommonChainHeight)
            candidates.add(peer);
    }
    // Of the candidates, find the peers that meet the minimum protocol version we want to target. We could select
    // the highest version we've seen on the assumption that newer versions are always better but we don't want to
    // zap peers if they upgrade early. If we can't find any peers that have our preferred protocol version or
    // better then we'll settle for the highest we found instead.
    int highestVersion = 0, preferredVersion = 0;
    // If/when PREFERRED_VERSION is not equal to vMinRequiredProtocolVersion, reenable the last test in PeerGroupTest.downloadPeerSelection
    final int PREFERRED_VERSION = FilteredBlock.MIN_PROTOCOL_VERSION;
    for (Peer peer : candidates) {
        highestVersion = Math.max(peer.getPeerVersionMessage().clientVersion, highestVersion);
        preferredVersion = Math.min(highestVersion, PREFERRED_VERSION);
    }
    List<PeerAndPing> candidates2 = new ArrayList<PeerAndPing>();
    for (Peer peer : candidates) {
        if (peer.getPeerVersionMessage().clientVersion >= preferredVersion) {
            PeerAndPing pap = new PeerAndPing();
            pap.peer = peer;
            pap.pingTime = peer.getPingTime();
            candidates2.add(pap);
        }
    }
    // Sort by ping time.
    Collections.sort(candidates2, new Comparator<PeerAndPing>() {
        @Override
        public int compare(PeerAndPing peerAndPing, PeerAndPing peerAndPing2) {
            return Longs.compare(peerAndPing.pingTime, peerAndPing2.pingTime);
        }
    });
    return candidates2.get(0).peer;
}

From source file:org.apache.phoenix.schema.PDataType.java

public final int compareTo(byte[] lhs, int lhsOffset, int lhsLength, SortOrder lhsSortOrder, byte[] rhs,
        int rhsOffset, int rhsLength, SortOrder rhsSortOrder, PDataType rhsType) {
    Preconditions.checkNotNull(lhsSortOrder);
    Preconditions.checkNotNull(rhsSortOrder);
    if (this.isBytesComparableWith(rhsType)) { // directly compare the bytes
        return compareTo(lhs, lhsOffset, lhsLength, lhsSortOrder, rhs, rhsOffset, rhsLength, rhsSortOrder);
    }//w  w  w .j  av a2 s .  c  o m
    PDataCodec lhsCodec = this.getCodec();
    if (lhsCodec == null) { // no lhs native type representation, so convert rhsType to bytes representation of lhsType
        byte[] rhsConverted = this.toBytes(this.toObject(rhs, rhsOffset, rhsLength, rhsType, rhsSortOrder));
        if (rhsSortOrder == SortOrder.DESC) {
            rhsSortOrder = SortOrder.ASC;
        }
        if (lhsSortOrder == SortOrder.DESC) {
            lhs = SortOrder.invert(lhs, lhsOffset, new byte[lhsLength], 0, lhsLength);
        }
        return Bytes.compareTo(lhs, lhsOffset, lhsLength, rhsConverted, 0, rhsConverted.length);
    }
    PDataCodec rhsCodec = rhsType.getCodec();
    if (rhsCodec == null) {
        byte[] lhsConverted = rhsType.toBytes(rhsType.toObject(lhs, lhsOffset, lhsLength, this, lhsSortOrder));
        if (lhsSortOrder == SortOrder.DESC) {
            lhsSortOrder = SortOrder.ASC;
        }
        if (rhsSortOrder == SortOrder.DESC) {
            rhs = SortOrder.invert(rhs, rhsOffset, new byte[rhsLength], 0, rhsLength);
        }
        return Bytes.compareTo(lhsConverted, 0, lhsConverted.length, rhs, rhsOffset, rhsLength);
    }
    // convert to native and compare
    if (this.isCoercibleTo(PDataType.LONG) && rhsType.isCoercibleTo(PDataType.LONG)) { // native long to long comparison
        return Longs.compare(this.getCodec().decodeLong(lhs, lhsOffset, lhsSortOrder),
                rhsType.getCodec().decodeLong(rhs, rhsOffset, rhsSortOrder));
    } else if (isDoubleOrFloat(this) && isDoubleOrFloat(rhsType)) { // native double to double comparison
        return Doubles.compare(this.getCodec().decodeDouble(lhs, lhsOffset, lhsSortOrder),
                rhsType.getCodec().decodeDouble(rhs, rhsOffset, rhsSortOrder));
    } else { // native float/double to long comparison
        float fvalue = 0.0F;
        double dvalue = 0.0;
        long lvalue = 0;
        boolean isFloat = false;
        int invert = 1;

        if (this.isCoercibleTo(PDataType.LONG)) {
            lvalue = this.getCodec().decodeLong(lhs, lhsOffset, lhsSortOrder);
        } else if (this == PDataType.FLOAT) {
            isFloat = true;
            fvalue = this.getCodec().decodeFloat(lhs, lhsOffset, lhsSortOrder);
        } else if (this.isCoercibleTo(PDataType.DOUBLE)) {
            dvalue = this.getCodec().decodeDouble(lhs, lhsOffset, lhsSortOrder);
        }
        if (rhsType.isCoercibleTo(PDataType.LONG)) {
            lvalue = rhsType.getCodec().decodeLong(rhs, rhsOffset, rhsSortOrder);
        } else if (rhsType == PDataType.FLOAT) {
            invert = -1;
            isFloat = true;
            fvalue = rhsType.getCodec().decodeFloat(rhs, rhsOffset, rhsSortOrder);
        } else if (rhsType.isCoercibleTo(PDataType.DOUBLE)) {
            invert = -1;
            dvalue = rhsType.getCodec().decodeDouble(rhs, rhsOffset, rhsSortOrder);
        }
        // Invert the comparison if float/double value is on the RHS
        return invert * (isFloat ? compareFloatToLong(fvalue, lvalue) : compareDoubleToLong(dvalue, lvalue));
    }
}