Example usage for com.google.common.primitives Longs compare

List of usage examples for com.google.common.primitives Longs compare

Introduction

In this page you can find the example usage for com.google.common.primitives Longs compare.

Prototype

public static int compare(long a, long b) 

Source Link

Document

Compares the two specified long values.

Usage

From source file:com.comphenix.protocol.async.AsyncMarker.java

@Override
public int compareTo(AsyncMarker o) {
    if (o == null)
        return 1;
    else//from  w w  w.  j av a  2  s  .c om
        return Longs.compare(getNewSendingIndex(), o.getNewSendingIndex());
}

From source file:org.apache.tajo.datum.IntervalDatum.java

@Override
public int compareTo(Datum datum) {
    if (datum.kind() == TajoDataTypes.Type.INTERVAL) {
        return Longs.compare(asInt8(), datum.asInt8());
    } else if (datum instanceof NullDatum || datum.isNull()) {
        return -1;
    } else {//ww  w  . j  a va 2  s . co  m
        throw new InvalidOperationException(datum.type());
    }
}

From source file:org.apache.druid.indexing.kafka.IncrementalPublishingKafkaIndexTaskRunner.java

private TaskStatus runInternal(TaskToolbox toolbox) throws Exception {
    log.info("Starting up!");

    startTime = DateTimes.nowUtc();/*from  w  w w  . j av  a2 s .co  m*/
    status = Status.STARTING;
    this.toolbox = toolbox;

    if (!restoreSequences()) {
        final TreeMap<Integer, Map<Integer, Long>> checkpoints = getCheckPointsFromContext(toolbox, task);
        if (checkpoints != null) {
            Iterator<Entry<Integer, Map<Integer, Long>>> sequenceOffsets = checkpoints.entrySet().iterator();
            Map.Entry<Integer, Map<Integer, Long>> previous = sequenceOffsets.next();
            while (sequenceOffsets.hasNext()) {
                Map.Entry<Integer, Map<Integer, Long>> current = sequenceOffsets.next();
                sequences.add(new SequenceMetadata(previous.getKey(),
                        StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()),
                        previous.getValue(), current.getValue(), true));
                previous = current;
            }
            sequences.add(new SequenceMetadata(previous.getKey(),
                    StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()),
                    previous.getValue(), endOffsets, false));
        } else {
            sequences
                    .add(new SequenceMetadata(0, StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), 0),
                            ioConfig.getStartPartitions().getPartitionOffsetMap(), endOffsets, false));
        }
    }
    log.info("Starting with sequences:  %s", sequences);

    if (chatHandlerProvider.isPresent()) {
        log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
        chatHandlerProvider.get().register(task.getId(), this, false);
    } else {
        log.warn("No chat handler detected");
    }

    runThread = Thread.currentThread();

    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(task.getDataSchema(),
            new RealtimeIOConfig(null, null, null), null);
    fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.getMonitorScheduler().addMonitor(
            TaskRealtimeMetricsMonitorBuilder.build(task, fireDepartmentForMetrics, rowIngestionMeters));

    final String lookupTier = task.getContextValue(RealtimeIndexTask.CTX_KEY_LOOKUP_TIER);
    LookupNodeService lookupNodeService = lookupTier == null ? toolbox.getLookupNodeService()
            : new LookupNodeService(lookupTier);
    DiscoveryDruidNode discoveryDruidNode = new DiscoveryDruidNode(toolbox.getDruidNode(),
            DruidNodeDiscoveryProvider.NODE_TYPE_PEON, ImmutableMap.of(toolbox.getDataNodeService().getName(),
                    toolbox.getDataNodeService(), lookupNodeService.getName(), lookupNodeService));

    Throwable caughtExceptionOuter = null;
    try (final KafkaConsumer<byte[], byte[]> consumer = task.newConsumer()) {
        toolbox.getDataSegmentServerAnnouncer().announce();
        toolbox.getDruidNodeAnnouncer().announce(discoveryDruidNode);

        appenderator = task.newAppenderator(fireDepartmentMetrics, toolbox);
        driver = task.newDriver(appenderator, toolbox, fireDepartmentMetrics);

        final String topic = ioConfig.getStartPartitions().getTopic();

        // Start up, set up initial offsets.
        final Object restoredMetadata = driver.startJob();
        if (restoredMetadata == null) {
            // no persist has happened so far
            // so either this is a brand new task or replacement of a failed task
            Preconditions.checkState(
                    sequences.get(0).startOffsets.entrySet().stream()
                            .allMatch(partitionOffsetEntry -> Longs.compare(partitionOffsetEntry.getValue(),
                                    ioConfig.getStartPartitions().getPartitionOffsetMap()
                                            .get(partitionOffsetEntry.getKey())) >= 0),
                    "Sequence offsets are not compatible with start offsets of task");
            nextOffsets.putAll(sequences.get(0).startOffsets);
        } else {
            final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final KafkaPartitions restoredNextPartitions = toolbox.getObjectMapper()
                    .convertValue(restoredMetadataMap.get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class);
            nextOffsets.putAll(restoredNextPartitions.getPartitionOffsetMap());

            // Sanity checks.
            if (!restoredNextPartitions.getTopic().equals(ioConfig.getStartPartitions().getTopic())) {
                throw new ISE("WTF?! Restored topic[%s] but expected topic[%s]",
                        restoredNextPartitions.getTopic(), ioConfig.getStartPartitions().getTopic());
            }

            if (!nextOffsets.keySet().equals(ioConfig.getStartPartitions().getPartitionOffsetMap().keySet())) {
                throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets.keySet(),
                        ioConfig.getStartPartitions().getPartitionOffsetMap().keySet());
            }
            // sequences size can be 0 only when all sequences got published and task stopped before it could finish
            // which is super rare
            if (sequences.size() == 0 || sequences.get(sequences.size() - 1).isCheckpointed()) {
                this.endOffsets.putAll(sequences.size() == 0 ? nextOffsets
                        : sequences.get(sequences.size() - 1).getEndOffsets());
                log.info("End offsets changed to [%s]", endOffsets);
            }
        }

        // Set up committer.
        final Supplier<Committer> committerSupplier = () -> {
            final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);
            lastPersistedOffsets.clear();
            lastPersistedOffsets.putAll(snapshot);

            return new Committer() {
                @Override
                public Object getMetadata() {
                    return ImmutableMap.of(METADATA_NEXT_PARTITIONS,
                            new KafkaPartitions(ioConfig.getStartPartitions().getTopic(), snapshot));
                }

                @Override
                public void run() {
                    // Do nothing.
                }
            };
        };

        // restart publishing of sequences (if any)
        maybePersistAndPublishSequences(committerSupplier);

        Set<Integer> assignment = assignPartitionsAndSeekToNext(consumer, topic);

        ingestionState = IngestionState.BUILD_SEGMENTS;

        // Main loop.
        // Could eventually support leader/follower mode (for keeping replicas more in sync)
        boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        Throwable caughtExceptionInner = null;
        try {
            while (stillReading) {
                if (possiblyPause()) {
                    // The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
                    // partitions upon resuming. This is safe even if the end offsets have not been modified.
                    assignment = assignPartitionsAndSeekToNext(consumer, topic);

                    if (assignment.isEmpty()) {
                        log.info("All partitions have been fully read");
                        publishOnStop.set(true);
                        stopRequested.set(true);
                    }
                }

                // if stop is requested or task's end offset is set by call to setEndOffsets method with finish set to true
                if (stopRequested.get() || sequences.get(sequences.size() - 1).isCheckpointed()) {
                    status = Status.PUBLISHING;
                    break;
                }

                if (backgroundThreadException != null) {
                    throw new RuntimeException(backgroundThreadException);
                }

                checkPublishAndHandoffFailure();

                maybePersistAndPublishSequences(committerSupplier);

                // The retrying business is because the KafkaConsumer throws OffsetOutOfRangeException if the seeked-to
                // offset is not present in the topic-partition. This can happen if we're asking a task to read from data
                // that has not been written yet (which is totally legitimate). So let's wait for it to show up.
                ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty();
                try {
                    records = consumer.poll(KafkaIndexTask.POLL_TIMEOUT_MILLIS);
                } catch (OffsetOutOfRangeException e) {
                    log.warn("OffsetOutOfRangeException with message [%s]", e.getMessage());
                    possiblyResetOffsetsOrWait(e.offsetOutOfRangePartitions(), consumer, toolbox);
                    stillReading = !assignment.isEmpty();
                }

                SequenceMetadata sequenceToCheckpoint = null;
                for (ConsumerRecord<byte[], byte[]> record : records) {
                    log.trace("Got topic[%s] partition[%d] offset[%,d].", record.topic(), record.partition(),
                            record.offset());

                    if (record.offset() < endOffsets.get(record.partition())) {
                        if (record.offset() != nextOffsets.get(record.partition())) {
                            if (ioConfig.isSkipOffsetGaps()) {
                                log.warn("Skipped to offset[%,d] after offset[%,d] in partition[%d].",
                                        record.offset(), nextOffsets.get(record.partition()),
                                        record.partition());
                            } else {
                                throw new ISE("WTF?! Got offset[%,d] after offset[%,d] in partition[%d].",
                                        record.offset(), nextOffsets.get(record.partition()),
                                        record.partition());
                            }
                        }

                        try {
                            final byte[] valueBytes = record.value();
                            final List<InputRow> rows = valueBytes == null
                                    ? Utils.nullableListOf((InputRow) null)
                                    : parser.parseBatch(ByteBuffer.wrap(valueBytes));
                            boolean isPersistRequired = false;

                            final SequenceMetadata sequenceToUse = sequences.stream()
                                    .filter(sequenceMetadata -> sequenceMetadata.canHandle(record)).findFirst()
                                    .orElse(null);

                            if (sequenceToUse == null) {
                                throw new ISE(
                                        "WTH?! cannot find any valid sequence for record with partition [%d] and offset [%d]. Current sequences: %s",
                                        record.partition(), record.offset(), sequences);
                            }

                            for (InputRow row : rows) {
                                if (row != null && task.withinMinMaxRecordTime(row)) {
                                    final AppenderatorDriverAddResult addResult = driver.add(row,
                                            sequenceToUse.getSequenceName(), committerSupplier,
                                            // skip segment lineage check as there will always be one segment
                                            // for combination of sequence and segment granularity.
                                            // It is necessary to skip it as the task puts messages polled from all the
                                            // assigned Kafka partitions into a single Druid segment, thus ordering of
                                            // messages among replica tasks across assigned partitions is not guaranteed
                                            // which may cause replica tasks to ask for segments with different interval
                                            // in different order which might cause SegmentAllocateAction to fail.
                                            true,
                                            // do not allow incremental persists to happen until all the rows from this batch
                                            // of rows are indexed
                                            false);

                                    if (addResult.isOk()) {
                                        // If the number of rows in the segment exceeds the threshold after adding a row,
                                        // move the segment out from the active segments of BaseAppenderatorDriver to make a new segment.
                                        if (addResult.isPushRequired(tuningConfig)
                                                && !sequenceToUse.isCheckpointed()) {
                                            sequenceToCheckpoint = sequenceToUse;
                                        }
                                        isPersistRequired |= addResult.isPersistRequired();
                                    } else {
                                        // Failure to allocate segment puts determinism at risk, bail out to be safe.
                                        // May want configurable behavior here at some point.
                                        // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                                        throw new ISE("Could not allocate segment for row with timestamp[%s]",
                                                row.getTimestamp());
                                    }

                                    if (addResult.getParseException() != null) {
                                        handleParseException(addResult.getParseException(), record);
                                    } else {
                                        rowIngestionMeters.incrementProcessed();
                                    }
                                } else {
                                    rowIngestionMeters.incrementThrownAway();
                                }
                            }
                            if (isPersistRequired) {
                                Futures.addCallback(driver.persistAsync(committerSupplier.get()),
                                        new FutureCallback<Object>() {
                                            @Override
                                            public void onSuccess(@Nullable Object result) {
                                                log.info("Persist completed with metadata [%s]", result);
                                            }

                                            @Override
                                            public void onFailure(Throwable t) {
                                                log.error("Persist failed, dying");
                                                backgroundThreadException = t;
                                            }
                                        });
                            }
                        } catch (ParseException e) {
                            handleParseException(e, record);
                        }

                        nextOffsets.put(record.partition(), record.offset() + 1);
                    }

                    if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition()))
                            && assignment.remove(record.partition())) {
                        log.info("Finished reading topic[%s], partition[%,d].", record.topic(),
                                record.partition());
                        KafkaIndexTask.assignPartitions(consumer, topic, assignment);
                        stillReading = !assignment.isEmpty();
                    }
                }

                if (System.currentTimeMillis() > nextCheckpointTime) {
                    sequenceToCheckpoint = sequences.get(sequences.size() - 1);
                }

                if (sequenceToCheckpoint != null && stillReading) {
                    Preconditions.checkArgument(
                            sequences.get(sequences.size() - 1).getSequenceName()
                                    .equals(sequenceToCheckpoint.getSequenceName()),
                            "Cannot checkpoint a sequence [%s] which is not the latest one, sequences %s",
                            sequenceToCheckpoint, sequences);
                    requestPause();
                    final CheckPointDataSourceMetadataAction checkpointAction = new CheckPointDataSourceMetadataAction(
                            task.getDataSource(), ioConfig.getTaskGroupId(),
                            task.getIOConfig().getBaseSequenceName(),
                            new KafkaDataSourceMetadata(
                                    new KafkaPartitions(topic, sequenceToCheckpoint.getStartOffsets())),
                            new KafkaDataSourceMetadata(new KafkaPartitions(topic, nextOffsets)));
                    if (!toolbox.getTaskActionClient().submit(checkpointAction)) {
                        throw new ISE("Checkpoint request with offsets [%s] failed, dying", nextOffsets);
                    }
                }
            }
            ingestionState = IngestionState.COMPLETED;
        } catch (Exception e) {
            // (1) catch all exceptions while reading from kafka
            caughtExceptionInner = e;
            log.error(e, "Encountered exception in run() before persisting.");
            throw e;
        } finally {
            log.info("Persisting all pending data");
            try {
                driver.persist(committerSupplier.get()); // persist pending data
            } catch (Exception e) {
                if (caughtExceptionInner != null) {
                    caughtExceptionInner.addSuppressed(e);
                } else {
                    throw e;
                }
            }
        }

        synchronized (statusLock) {
            if (stopRequested.get() && !publishOnStop.get()) {
                throw new InterruptedException("Stopping without publishing");
            }

            status = Status.PUBLISHING;
        }

        for (SequenceMetadata sequenceMetadata : sequences) {
            if (!publishingSequences.contains(sequenceMetadata.getSequenceName())) {
                // this is done to prevent checks in sequence specific commit supplier from failing
                sequenceMetadata.setEndOffsets(nextOffsets);
                sequenceMetadata.updateAssignments(nextOffsets);
                publishingSequences.add(sequenceMetadata.getSequenceName());
                // persist already done in finally, so directly add to publishQueue
                publishAndRegisterHandoff(sequenceMetadata);
            }
        }

        if (backgroundThreadException != null) {
            throw new RuntimeException(backgroundThreadException);
        }

        // Wait for publish futures to complete.
        Futures.allAsList(publishWaitList).get();

        // Wait for handoff futures to complete.
        // Note that every publishing task (created by calling AppenderatorDriver.publish()) has a corresponding
        // handoffFuture. handoffFuture can throw an exception if 1) the corresponding publishFuture failed or 2) it
        // failed to persist sequences. It might also return null if handoff failed, but was recoverable.
        // See publishAndRegisterHandoff() for details.
        List<SegmentsAndMetadata> handedOffList = Collections.emptyList();
        if (tuningConfig.getHandoffConditionTimeout() == 0) {
            handedOffList = Futures.allAsList(handOffWaitList).get();
        } else {
            try {
                handedOffList = Futures.allAsList(handOffWaitList)
                        .get(tuningConfig.getHandoffConditionTimeout(), TimeUnit.MILLISECONDS);
            } catch (TimeoutException e) {
                // Handoff timeout is not an indexing failure, but coordination failure. We simply ignore timeout exception
                // here.
                log.makeAlert("Timed out after [%d] millis waiting for handoffs",
                        tuningConfig.getHandoffConditionTimeout()).addData("TaskId", task.getId()).emit();
            }
        }

        for (SegmentsAndMetadata handedOff : handedOffList) {
            log.info("Handoff completed for segments[%s] with metadata[%s].",
                    Joiner.on(", ")
                            .join(handedOff.getSegments().stream().map(DataSegment::getIdentifier)
                                    .collect(Collectors.toList())),
                    Preconditions.checkNotNull(handedOff.getCommitMetadata(), "commitMetadata"));
        }

        appenderator.close();
    } catch (InterruptedException | RejectedExecutionException e) {
        // (2) catch InterruptedException and RejectedExecutionException thrown for the whole ingestion steps including
        // the final publishing.
        caughtExceptionOuter = e;
        try {
            Futures.allAsList(publishWaitList).cancel(true);
            Futures.allAsList(handOffWaitList).cancel(true);
            if (appenderator != null) {
                appenderator.closeNow();
            }
        } catch (Exception e2) {
            e.addSuppressed(e2);
        }

        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException
                && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }

        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested.get()) {
            Thread.currentThread().interrupt();
            throw e;
        }

        log.info("The task was asked to stop before completing");
    } catch (Exception e) {
        // (3) catch all other exceptions thrown for the whole ingestion steps including the final publishing.
        caughtExceptionOuter = e;
        try {
            Futures.allAsList(publishWaitList).cancel(true);
            Futures.allAsList(handOffWaitList).cancel(true);
            if (appenderator != null) {
                appenderator.closeNow();
            }
        } catch (Exception e2) {
            e.addSuppressed(e2);
        }
        throw e;
    } finally {
        try {
            if (driver != null) {
                driver.close();
            }
            if (chatHandlerProvider.isPresent()) {
                chatHandlerProvider.get().unregister(task.getId());
            }

            toolbox.getDruidNodeAnnouncer().unannounce(discoveryDruidNode);
            toolbox.getDataSegmentServerAnnouncer().unannounce();
        } catch (Exception e) {
            if (caughtExceptionOuter != null) {
                caughtExceptionOuter.addSuppressed(e);
            } else {
                throw e;
            }
        }
    }

    toolbox.getTaskReportFileWriter().write(getTaskCompletionReports(null));
    return TaskStatus.success(task.getId());
}

From source file:com.b2international.snowowl.server.console.MaintenanceCommandProvider.java

private void print(final Branch branch, final int parentDepth, CommandInterpreter interpreter) {

    printBranch(branch, getDepthOfBranch(branch) - parentDepth, interpreter);

    List<? extends Branch> children = FluentIterable.from(branch.children()).filter(new Predicate<Branch>() {
        @Override//from  w w w  .  ja va2 s .  com
        public boolean apply(Branch input) {
            return input.parentPath().equals(branch.path());
        }
    }).toSortedList(new Comparator<Branch>() {
        @Override
        public int compare(Branch o1, Branch o2) {
            return Longs.compare(o1.baseTimestamp(), o2.baseTimestamp());
        }
    });

    if (children.size() != 0) {
        for (Branch child : children) {
            print(child, parentDepth, interpreter);
        }
    }

}

From source file:org.apache.phoenix.index.PhoenixTransactionalIndexer.java

private void processRollback(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData,
        byte[] txRollbackAttribute, ResultScanner scanner, Transaction tx, Set<ColumnReference> mutableColumns,
        Collection<Pair<Mutation, byte[]>> indexUpdates, Map<ImmutableBytesPtr, MultiMutation> mutations)
        throws IOException {
    if (scanner != null) {
        Result result;//from w w  w . j a va 2s. co  m
        // Loop through last committed row state plus all new rows associated with current transaction
        // to generate point delete markers for all index rows that were added. We don't have Tephra
        // manage index rows in change sets because we don't want to be hit with the additional
        // memory hit and do not need to do conflict detection on index rows.
        ColumnReference emptyColRef = new ColumnReference(
                indexMetaData.getIndexMaintainers().get(0).getDataEmptyKeyValueCF(),
                indexMetaData.getIndexMaintainers().get(0).getEmptyKeyValueQualifier());
        while ((result = scanner.next()) != null) {
            Mutation m = mutations.remove(new ImmutableBytesPtr(result.getRow()));
            // Sort by timestamp, type, cf, cq so we can process in time batches from oldest to newest
            // (as if we're "replaying" them in time order).
            List<Cell> cells = result.listCells();
            Collections.sort(cells, new Comparator<Cell>() {

                @Override
                public int compare(Cell o1, Cell o2) {
                    int c = Longs.compare(o1.getTimestamp(), o2.getTimestamp());
                    if (c != 0)
                        return c;
                    c = o1.getTypeByte() - o2.getTypeByte();
                    if (c != 0)
                        return c;
                    c = Bytes.compareTo(o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength(),
                            o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength());
                    if (c != 0)
                        return c;
                    return Bytes.compareTo(o1.getQualifierArray(), o1.getQualifierOffset(),
                            o1.getQualifierLength(), o1.getQualifierArray(), o1.getQualifierOffset(),
                            o1.getQualifierLength());
                }

            });
            int i = 0;
            int nCells = cells.size();
            Result oldResult = null, newResult;
            long readPtr = tx.getReadPointer();
            do {
                boolean hasPuts = false;
                LinkedList<Cell> singleTimeCells = Lists.newLinkedList();
                long writePtr;
                Cell cell = cells.get(i);
                do {
                    hasPuts |= cell.getTypeByte() == KeyValue.Type.Put.getCode();
                    writePtr = cell.getTimestamp();
                    ListIterator<Cell> it = singleTimeCells.listIterator();
                    do {
                        // Add at the beginning of the list to match the expected HBase
                        // newest to oldest sort order (which TxTableState relies on
                        // with the Result.getLatestColumnValue() calls). However, we
                        // still want to add Cells in the expected order for each time
                        // bound as otherwise we won't find it in our old state.
                        it.add(cell);
                    } while (++i < nCells && (cell = cells.get(i)).getTimestamp() == writePtr);
                } while (i < nCells && cell.getTimestamp() <= readPtr);

                // Generate point delete markers for the prior row deletion of the old index value.
                // The write timestamp is the next timestamp, not the current timestamp,
                // as the earliest cells are the current values for the row (and we don't
                // want to delete the current row).
                if (oldResult != null) {
                    TxTableState state = new TxTableState(env, mutableColumns, indexMetaData.getAttributes(),
                            writePtr, m, emptyColRef, oldResult);
                    generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
                }
                // Generate point delete markers for the new index value.
                // If our time batch doesn't have Puts (i.e. we have only Deletes), then do not
                // generate deletes. We would have generated the delete above based on the state
                // of the previous row. The delete markers do not give us the state we need to
                // delete.
                if (hasPuts) {
                    newResult = Result.create(singleTimeCells);
                    // First row may represent the current state which we don't want to delete
                    if (writePtr > readPtr) {
                        TxTableState state = new TxTableState(env, mutableColumns,
                                indexMetaData.getAttributes(), writePtr, m, emptyColRef, newResult);
                        generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
                    }
                    oldResult = newResult;
                } else {
                    oldResult = null;
                }
            } while (i < nCells);
        }
    }
}

From source file:com.b2international.snowowl.snomed.exporter.server.net4j.SnomedExportServerIndication.java

private void executeFullExport(RevisionIndex revisionIndex, OMMonitor monitor) {

    List<CodeSystemVersionEntry> sortedVersions = FluentIterable.from(getCodeSystemVersions())
            .toSortedList(new Comparator<CodeSystemVersionEntry>() {
                @Override/*from  w w w .jav  a2  s.  c om*/
                public int compare(CodeSystemVersionEntry o1, CodeSystemVersionEntry o2) {
                    return Longs.compare(o1.getEffectiveDate(), o2.getEffectiveDate());
                }
            });

    long startTime = 0L;

    for (CodeSystemVersionEntry version : sortedVersions) {

        exportContext.setStartEffectiveTime(new Date(startTime));
        exportContext.setEndEffectiveTime(new Date(version.getEffectiveDate()));

        String versionBranchPath = convertToBranchPath(version);
        executeExport(revisionIndex, versionBranchPath, false, monitor);

        startTime = version.getEffectiveDate();
    }

    if (includeUnpublished) {
        executeExport(revisionIndex, branchPath, true, monitor);
    }

}

From source file:com.google.gerrit.server.schema.Schema_65.java

private List<AccountGroupAgreement> getAccountGroupAgreements(ReviewDb db,
        Map<Integer, ContributorAgreement> agreements) throws SQLException {

    Statement stmt = ((JdbcSchema) db).getConnection().createStatement();
    try {/*from  ww w.j  av a  2 s.c  o  m*/
        ResultSet rs = stmt.executeQuery("SELECT group_id, cla_id, accepted_on, reviewed_by, reviewed_on, "
                + "       review_comments " + "FROM account_group_agreements");
        try {
            List<AccountGroupAgreement> groupAgreements = Lists.newArrayList();
            while (rs.next()) {
                AccountGroupAgreement a = new AccountGroupAgreement();
                a.groupId = new AccountGroup.Id(rs.getInt(1));
                a.claId = rs.getInt(2);
                if (!agreements.containsKey(a.claId)) {
                    continue; // Agreement is invalid
                }
                a.acceptedOn = rs.getTimestamp(3);
                a.reviewedBy = new Account.Id(rs.getInt(4));
                if (rs.wasNull()) {
                    a.reviewedBy = null;
                }

                a.reviewedOn = rs.getTimestamp(5);
                if (rs.wasNull()) {
                    a.reviewedOn = null;
                }

                a.reviewComments = rs.getString(6);
                if (rs.wasNull()) {
                    a.reviewComments = null;
                }
                groupAgreements.add(a);
            }
            Collections.sort(groupAgreements, new Comparator<AccountGroupAgreement>() {
                @Override
                public int compare(AccountGroupAgreement a1, AccountGroupAgreement a2) {
                    return Longs.compare(a1.getTime(), a2.getTime());
                }
            });
            return groupAgreements;
        } finally {
            rs.close();
        }
    } finally {
        stmt.close();
    }
}

From source file:org.apache.druid.query.groupby.GroupByQuery.java

@Nullable
private Comparator<Row> getTimeComparator(boolean granular) {
    if (Granularities.ALL.equals(getGranularity())) {
        return null;
    } else if (granular) {
        return (lhs, rhs) -> Longs.compare(getGranularity().bucketStart(lhs.getTimestamp()).getMillis(),
                getGranularity().bucketStart(rhs.getTimestamp()).getMillis());
    } else {//from w  w w .ja  v a  2 s .com
        return NON_GRANULAR_TIME_COMP;
    }
}

From source file:org.apache.pig.impl.util.Utils.java

/**
 * Finds a valid path for a file from an array of FileStatus objects.
 * @param statusArray Array of FileStatus objects in which to search
 * for the file./* w  ww.  ja  v a 2  s.c  o  m*/
 * @param fileSystem FileSystem in which to search for the first file.
 * @return The first file found.
 * @throws IOException
 */
public static Path depthFirstSearchForFile(final FileStatus[] statusArray, final FileSystem fileSystem,
        PathFilter filter) throws IOException {

    // Most recent files first
    Arrays.sort(statusArray, new Comparator<FileStatus>() {
        @Override
        public int compare(final FileStatus fs1, final FileStatus fs2) {
            return Longs.compare(fs2.getModificationTime(), fs1.getModificationTime());
        }
    });

    for (FileStatus f : statusArray) {
        if (fileSystem.isFile(f.getPath())) {
            if (filter == null || filter.accept(f.getPath())) {
                return f.getPath();
            } else {
                continue;
            }
        } else {
            return depthFirstSearchForFile(fileSystem.listStatus(f.getPath(), VISIBLE_FILES), fileSystem,
                    filter);
        }
    }

    return null;

}

From source file:org.apache.cassandra.index.SecondaryIndexManager.java

/**
 * Called at query time to choose which (if any) of the registered index implementations to use for a given query.
 *
 * This is a two step processes, firstly compiling the set of searchable indexes then choosing the one which reduces
 * the search space the most./*from  w w w.  java  2  s  . c om*/
 *
 * In the first phase, if the command's RowFilter contains any custom index expressions, the indexes that they
 * specify are automatically included. Following that, the registered indexes are filtered to include only those
 * which support the standard expressions in the RowFilter.
 *
 * The filtered set then sorted by selectivity, as reported by the Index implementations' getEstimatedResultRows
 * method.
 *
 * Implementation specific validation of the target expression, either custom or standard, by the selected
 * index should be performed in the searcherFor method to ensure that we pick the right index regardless of
 * the validity of the expression.
 *
 * This method is only called once during the lifecycle of a ReadCommand and the result is
 * cached for future use when obtaining a Searcher, getting the index's underlying CFS for
 * ReadOrderGroup, or an estimate of the result size from an average index query.
 *
 * @param command ReadCommand to be executed
 * @return an Index instance, ready to use during execution of the command, or null if none
 * of the registered indexes can support the command.
 */
public Index getBestIndexFor(ReadCommand command) {
    if (indexes.isEmpty() || command.rowFilter().isEmpty())
        return null;

    Set<Index> searchableIndexes = new HashSet<>();
    for (RowFilter.Expression expression : command.rowFilter()) {
        if (expression.isCustom()) {
            // Only a single custom expression is allowed per query and, if present,
            // we want to always favour the index specified in such an expression
            RowFilter.CustomExpression customExpression = (RowFilter.CustomExpression) expression;
            logger.trace("Command contains a custom index expression, using target index {}",
                    customExpression.getTargetIndex().name);
            Tracing.trace("Command contains a custom index expression, using target index {}",
                    customExpression.getTargetIndex().name);
            return indexes.get(customExpression.getTargetIndex().name);
        } else if (!expression.isUserDefined()) {
            indexes.values().stream()
                    .filter(index -> index.supportsExpression(expression.column(), expression.operator()))
                    .forEach(searchableIndexes::add);
        }
    }

    if (searchableIndexes.isEmpty()) {
        logger.trace("No applicable indexes found");
        Tracing.trace("No applicable indexes found");
        return null;
    }

    Index selected = searchableIndexes.size() == 1 ? Iterables.getOnlyElement(searchableIndexes)
            : searchableIndexes.stream()
                    .min((a, b) -> Longs.compare(a.getEstimatedResultRows(), b.getEstimatedResultRows()))
                    .orElseThrow(() -> new AssertionError("Could not select most selective index"));

    // pay for an additional threadlocal get() rather than build the strings unnecessarily
    if (Tracing.isTracing()) {
        Tracing.trace("Index mean cardinalities are {}. Scanning with {}.",
                searchableIndexes.stream()
                        .map(i -> i.getIndexMetadata().name + ':' + i.getEstimatedResultRows())
                        .collect(Collectors.joining(",")),
                selected.getIndexMetadata().name);
    }
    return selected;
}