Example usage for java.lang Long compare

List of usage examples for java.lang Long compare

Introduction

In this page you can find the example usage for java.lang Long compare.

Prototype

public static int compare(long x, long y) 

Source Link

Document

Compares two long values numerically.

Usage

From source file:de.dentrassi.pm.rpm.internal.RpmExtractor.java

private RpmInformation makeInformation(final RpmInputStream in) throws IOException {
    final RpmHeader<RpmTag> header = in.getPayloadHeader();
    final RpmHeader<RpmSignatureTag> signature = in.getSignatureHeader();

    try {//ww w  . j  a  v  a  2s . c o m
        final RpmInformation result = new RpmInformation();

        result.setHeaderStart(header.getStart());
        result.setHeaderEnd(header.getStart() + header.getLength());

        result.setName(asString(header.getTag(RpmTag.NAME)));
        result.setArchitecture(asString(header.getTag(RpmTag.ARCH)));
        result.setSummary(asString(header.getTag(RpmTag.SUMMARY)));
        result.setDescription(asString(header.getTag(RpmTag.DESCRIPTION)));
        result.setPackager(asString(header.getTag(RpmTag.PACKAGER)));
        result.setUrl(asString(header.getTag(RpmTag.URL)));
        result.setLicense(asString(header.getTag(RpmTag.LICENSE)));
        result.setVendor(asString(header.getTag(RpmTag.VENDOR)));
        result.setGroup(asString(header.getTag(RpmTag.GROUP)));

        result.setBuildHost(asString(header.getTag(RpmTag.BUILDHOST)));
        result.setBuildTimestamp(asLong(header.getTag(RpmTag.BUILDTIME)));
        result.setSourcePackage(asString(header.getTag(RpmTag.SOURCE_PACKAGE)));

        result.setInstalledSize(asLong(header.getTag(RpmTag.INSTALLED_SIZE)));
        result.setArchiveSize(asLong(header.getTag(RpmTag.ARCHIVE_SIZE)));
        if (result.getArchiveSize() == null) {
            result.setArchiveSize(asLong(signature.getTag(RpmSignatureTag.PAYLOAD_SIZE)));
        }

        // version

        final RpmInformation.Version ver = new RpmInformation.Version(asString(header.getTag(RpmTag.VERSION)),
                asString(header.getTag(RpmTag.RELEASE)), asString(header.getTag(RpmTag.EPOCH)));
        result.setVersion(ver);

        // changelog

        final Object val = header.getTag(RpmTag.CHANGELOG_TIMESTAMP);
        if (val instanceof Long[]) {
            final Long[] ts = (Long[]) val;
            final String[] authors = (String[]) header.getTag(RpmTag.CHANGELOG_AUTHOR);
            final String[] texts = (String[]) header.getTag(RpmTag.CHANGELOG_TEXT);

            final List<RpmInformation.Changelog> changes = new ArrayList<>(ts.length);

            for (int i = 0; i < ts.length; i++) {
                changes.add(new RpmInformation.Changelog(ts[i], authors[i], texts[i]));
            }

            Collections.sort(changes, (o1, o2) -> Long.compare(o1.getTimestamp(), o2.getTimestamp()));

            result.setChangelog(changes);
        }

        // dependencies

        result.setProvides(
                makeDependencies(header, RpmTag.PROVIDE_NAME, RpmTag.PROVIDE_VERSION, RpmTag.PROVIDE_FLAGS));
        result.setRequires(
                makeDependencies(header, RpmTag.REQUIRE_NAME, RpmTag.REQUIRE_VERSION, RpmTag.REQUIRE_FLAGS));
        result.setConflicts(
                makeDependencies(header, RpmTag.CONFLICT_NAME, RpmTag.CONFLICT_VERSION, RpmTag.CONFLICT_FLAGS));
        result.setObsoletes(
                makeDependencies(header, RpmTag.OBSOLETE_NAME, RpmTag.OBSOLETE_VERSION, RpmTag.OBSOLETE_FLAGS));

        // files

        final CpioArchiveInputStream cpio = in.getCpioStream();
        CpioArchiveEntry cpioEntry;
        while ((cpioEntry = cpio.getNextCPIOEntry()) != null) {
            final String name = normalize(cpioEntry.getName());

            if (cpioEntry.isRegularFile()) {
                result.getFiles().add(name);
            } else if (cpioEntry.isDirectory()) {
                result.getDirectories().add(name);
            }
        }

        return result;
    } catch (final Exception e) {
        logger.info("Failed to create RPM information", e);
        return null;
    }
}

From source file:org.apache.distributedlog.service.placement.ServerLoad.java

@Override
public synchronized int compareTo(Object o) {
    ServerLoad other = (ServerLoad) o;//from   w w w  . j  a v  a  2 s  .  c  o  m
    if (load == other.getLoad()) {
        return server.compareTo(other.getServer());
    } else {
        return Long.compare(load, other.getLoad());
    }
}

From source file:org.eclipse.packagedrone.repo.adapter.rpm.internal.RpmExtractor.java

private RpmInformation makeInformation(final RpmInputStream in) throws IOException {
    final RpmHeader<RpmTag> header = in.getPayloadHeader();
    final RpmHeader<RpmSignatureTag> signature = in.getSignatureHeader();

    try {/*from  w w w  .  j a v a2s.  co  m*/
        final RpmInformation result = new RpmInformation();

        result.setHeaderStart(header.getStart());
        result.setHeaderEnd(header.getStart() + header.getLength());

        result.setName(asString(header.getTag(RpmTag.NAME)));
        result.setArchitecture(asString(header.getTag(RpmTag.ARCH)));
        result.setSummary(asString(header.getTag(RpmTag.SUMMARY)));
        result.setDescription(asString(header.getTag(RpmTag.DESCRIPTION)));
        result.setPackager(asString(header.getTag(RpmTag.PACKAGER)));
        result.setUrl(asString(header.getTag(RpmTag.URL)));
        result.setLicense(asString(header.getTag(RpmTag.LICENSE)));
        result.setVendor(asString(header.getTag(RpmTag.VENDOR)));
        result.setGroup(asString(header.getTag(RpmTag.GROUP)));

        result.setBuildHost(asString(header.getTag(RpmTag.BUILDHOST)));
        result.setBuildTimestamp(asLong(header.getTag(RpmTag.BUILDTIME)));
        result.setSourcePackage(asString(header.getTag(RpmTag.SOURCE_PACKAGE)));

        result.setInstalledSize(asLong(header.getTag(RpmTag.INSTALLED_SIZE)));
        result.setArchiveSize(asLong(header.getTag(RpmTag.ARCHIVE_SIZE)));
        if (result.getArchiveSize() == null) {
            result.setArchiveSize(asLong(signature.getTag(RpmSignatureTag.PAYLOAD_SIZE)));
        }

        // version

        final RpmInformation.Version ver = new RpmInformation.Version(asString(header.getTag(RpmTag.VERSION)),
                asString(header.getTag(RpmTag.RELEASE)), asString(header.getTag(RpmTag.EPOCH)));
        result.setVersion(ver);

        // changelog

        final Object val = header.getTag(RpmTag.CHANGELOG_TIMESTAMP);
        if (val instanceof Long[]) {
            final Long[] ts = (Long[]) val;
            final String[] authors = (String[]) header.getTag(RpmTag.CHANGELOG_AUTHOR);
            final String[] texts = (String[]) header.getTag(RpmTag.CHANGELOG_TEXT);

            final List<RpmInformation.Changelog> changes = new ArrayList<>(ts.length);

            for (int i = 0; i < ts.length; i++) {
                changes.add(new RpmInformation.Changelog(ts[i], authors[i], texts[i]));
            }

            Collections.sort(changes, (o1, o2) -> Long.compare(o1.getTimestamp(), o2.getTimestamp()));

            result.setChangelog(changes);
        }

        // dependencies

        result.setProvides(
                makeDependencies(header, RpmTag.PROVIDE_NAME, RpmTag.PROVIDE_VERSION, RpmTag.PROVIDE_FLAGS));
        result.setRequires(
                makeDependencies(header, RpmTag.REQUIRE_NAME, RpmTag.REQUIRE_VERSION, RpmTag.REQUIRE_FLAGS));
        result.setConflicts(
                makeDependencies(header, RpmTag.CONFLICT_NAME, RpmTag.CONFLICT_VERSION, RpmTag.CONFLICT_FLAGS));
        result.setObsoletes(
                makeDependencies(header, RpmTag.OBSOLETE_NAME, RpmTag.OBSOLETE_VERSION, RpmTag.OBSOLETE_FLAGS));

        // files

        final CpioArchiveInputStream cpio = in.getCpioStream();
        CpioArchiveEntry cpioEntry;
        while ((cpioEntry = cpio.getNextCPIOEntry()) != null) {
            final String name = normalize(cpioEntry.getName());

            if (cpioEntry.isRegularFile()) {
                result.getFiles().add(name);
            } else if (cpioEntry.isDirectory()) {
                result.getDirectories().add(name);
            }
        }
        cpio.close();

        return result;
    } catch (final Exception e) {
        logger.info("Failed to create RPM information", e);
        return null;
    }
}

From source file:im.ene.mxmo.presentation.game.GamePresenterImpl.java

@Override
public void onUserName(String userName) {
    // Query an available game on Firebase, create new if there is no one.
    this.gameDb.addListenerForSingleValueEvent(new ValueEventListenerAdapter() {
        @Override//w  w w .ja  v  a2s .c  o m
        public void onDataChange(DataSnapshot snapshot) {
            //noinspection Convert2MethodRef,ConstantConditions
            Observable.just(snapshot).filter(s1 -> s1 != null && s1.getValue() instanceof HashMap)
                    .map(s2 -> (HashMap) s2.getValue())
                    .flatMapIterable((Function<HashMap, Iterable<Map.Entry>>) map -> map.entrySet())
                    .filter(entry -> entry.getValue() instanceof HashMap)
                    .map(entry -> Pair.create(entry.getKey().toString(), (HashMap) entry.getValue()))
                    .filter(pair -> Boolean.FALSE.equals(pair.second.get("finished")) // not finished yet
                            && !Boolean.TRUE.equals(pair.second.get("started")) // not started yet
                            && (pair.second.get("secondUser") == null || pair.second.get("firstUser") == null))
                    .map(pair -> {
                        JsonElement json = getApp().getGson().toJsonTree(pair.second);
                        return Pair.create(pair.first, getApp().getGson().fromJson(json, TicTacToe.class));
                    }).sorted((o1, o2) -> Long.compare(o2.second.getCreatedAt(), o1.second.getCreatedAt()))
                    .first(new Pair<>(null, TicTacToe.DEFAULT)).filter(pair -> pair.first != null)
                    .map(game -> new GameChangedEvent(userName, gameDb.child(game.first), game.second))
                    .defaultIfEmpty(new GameChangedEvent(userName, null, TicTacToe.DEFAULT))
                    .subscribe(event -> RxBus.getBus().send(event));
        }
    });
}

From source file:com.olacabs.fabric.compute.sources.kafka.impl.TransactionManager.java

private List<EventSetMeta> readExistingTransactions() throws Exception {
    final String txnLocation = path();
    List<String> txnIds = curator.getChildren().forPath(txnLocation);
    Collections.sort(txnIds, (lhs, rhs) -> Long.compare(Long.parseLong(lhs), Long.parseLong(rhs)));
    return txnIds.stream().map(txnId -> {
        try {//from   w w w .j  a v  a  2 s  .c o m
            return mapper.readValue(curator.getData().forPath(txnPath(txnId)), EventSetMeta.class);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }).collect(Collectors.toCollection(ArrayList::new));
}

From source file:org.haiku.haikudepotserver.job.model.Job.java

@Override
public int compareTo(JobSnapshot o) {
    Date qThis = getQueuedTimestamp();
    Date qOther = o.getQueuedTimestamp();
    int cmp = Long.compare(null == qOther ? 0 : qOther.getTime(), null == qThis ? 0 : qThis.getTime());

    if (0 == cmp) {
        cmp = getGuid().compareTo(o.getGuid());
    }//from   www  .j  av  a 2 s.co  m

    return cmp;
}

From source file:org.veronicadb.core.memorygraph.storage.SimpleLocalFileStorageSink.java

@SuppressWarnings("resource")
@Override/*  w ww.jav a2  s . c  om*/
public VSubGraph readGraphBlock(long graphId) throws VStorageFailureException {
    logger.info("Read requsted for graphid:" + graphId);
    VSubGraph graph = null;
    File[] files = storageDirectory.listFiles((dir, name) -> name.startsWith(graphId + ""));
    if (files.length == 0) {
        throw new VStorageFailureException(SimpleLocalFileStorageSink.class,
                "Invalid graphId:" + graphId + " can't read block from disk");
    }
    logger.info("Found:" + files.length + " versions of shard for graphid:" + graphId);
    File latestDataFile = Arrays.asList(files).stream()
            .sorted((o1, o2) -> Long.compare(o2.lastModified(), o1.lastModified())).findFirst().get();
    logger.info("Latest shard for graphid:" + graphId + " is " + latestDataFile.getName());
    String flushTime = latestDataFile.getName().split("\\.")[0].split("_")[1];
    DataInputStream stream = null;
    InputStream baseStream = null;
    try {
        baseStream = new BufferedInputStream(new FileInputStream(latestDataFile), BUFFER_SIZE);
        if (compress) {
            baseStream = decompressor.getDeclaredConstructor(InputStream.class).newInstance(baseStream);
        }
        stream = new DataInputStream(baseStream);
    } catch (FileNotFoundException e) {
        throw new VStorageFailureException(SimpleLocalFileStorageSink.class,
                "Graph block file doesn't exist for:" + graphId + " file:" + latestDataFile.getPath(), e);
    } catch (InstantiationException | IllegalAccessException | IllegalArgumentException
            | InvocationTargetException | NoSuchMethodException | SecurityException e) {
        throw new VStorageFailureException(SimpleLocalFileStorageSink.class,
                "Failed to initialize pluggable de-compressor", e);
    }
    try {
        long readGraphId = readGraphId(stream);
        int vertexCount = stream.readInt();
        byte[] bloomBytes = null;
        if (getGraph() != null) {
            graph = getGraph().getGraphShard(readGraphId);
            // skip bloom bytes
            skipBloom(stream);
        } else {
            graph = new VSubGraph(readGraphId, vertexCount);
            bloomBytes = readGraphBloom(stream);
        }
        List<VVertex> vertices = readVertices(graph, stream);
        if (getGraph() != null) {
            graph.loadVertices(vertices);
        } else {
            graph.reinit(bloomBytes, vertices);
        }
        graph.getLastFlush().set(Long.parseLong(flushTime));
    } catch (IOException e) {
        throw new VStorageFailureException(SimpleLocalFileStorageSink.class,
                "Failure to read graphId:" + graphId + " file:" + latestDataFile.getPath() + " from disk", e);
    } finally {
        try {
            stream.close();
            baseStream.close();
        } catch (IOException e) {
            throw new VStorageFailureException(SimpleLocalFileStorageSink.class,
                    "Failed to close shard file stream", e);
        }
    }
    return graph;
}

From source file:com.yahoo.druid.hadoop.HiveDatasourceInputFormat.java

@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
    logger.info("checkPost #5");

    String overlordUrl = jobConf.get(CONF_DRUID_OVERLORD_HOSTPORT);
    Preconditions.checkArgument(overlordUrl != null && !overlordUrl.isEmpty(),
            CONF_DRUID_OVERLORD_HOSTPORT + " not defined");

    logger.info("druid overlord url = " + overlordUrl);

    String schemaStr = jobConf.get(CONF_DRUID_SCHEMA);

    Preconditions.checkArgument(schemaStr != null && !schemaStr.isEmpty(),
            "schema undefined,  provide " + CONF_DRUID_SCHEMA);
    logger.info("schema = " + schemaStr);

    DatasourceIngestionSpec ingestionSpec = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(schemaStr,
            DatasourceIngestionSpec.class);
    String segmentsStr = getSegmentsToLoad(ingestionSpec.getDataSource(), ingestionSpec.getIntervals(),
            overlordUrl);/*  w  ww .  j  a v  a2 s. c  o  m*/
    logger.info("segments list received from overlord = " + segmentsStr);

    List<DataSegment> segmentsList = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(segmentsStr,
            new TypeReference<List<DataSegment>>() {
            });
    VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<>(
            Ordering.natural());
    for (DataSegment segment : segmentsList) {
        timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
    }
    final List<TimelineObjectHolder<String, DataSegment>> timeLineSegments = timeline
            .lookup(ingestionSpec.getIntervals().get(0));
    final List<WindowedDataSegment> windowedSegments = new ArrayList<>();
    for (TimelineObjectHolder<String, DataSegment> holder : timeLineSegments) {
        for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
            windowedSegments.add(new WindowedDataSegment(chunk.getObject(), holder.getInterval()));
        }
    }

    jobConf.set(CONF_INPUT_SEGMENTS, HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsString(windowedSegments));

    segmentsStr = Preconditions.checkNotNull(jobConf.get(CONF_INPUT_SEGMENTS), "No segments found to read");
    List<WindowedDataSegment> segments = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(segmentsStr,
            new TypeReference<List<WindowedDataSegment>>() {
            });
    if (segments == null || segments.size() == 0) {
        throw new ISE("No segments found to read");
    }

    logger.info("segments to read " + segmentsStr);

    long maxSize = numSplits;

    if (maxSize > 0) {
        // combining is to happen, let us sort the segments list by size so that
        // they
        // are combined appropriately
        Collections.sort(segments, new Comparator<WindowedDataSegment>() {
            @Override
            public int compare(WindowedDataSegment s1, WindowedDataSegment s2) {
                return Long.compare(s1.getSegment().getSize(), s2.getSegment().getSize());
            }
        });
    }

    List<InputSplit> splits = Lists.newArrayList();

    List<WindowedDataSegment> list = new ArrayList<>();
    long size = 0;

    // JobConf dummyConf = new JobConf();
    Job job = new Job(jobConf);
    JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job);
    Path[] paths = org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getInputPaths(jobContext);
    logger.info("dummyPath : " + paths);

    jobConf.set("druid.hive.dummyfilename", paths[0].toString());

    InputFormat fio = supplier.get();
    for (WindowedDataSegment segment : segments) {
        if (size + segment.getSegment().getSize() > maxSize && size > 0) {
            splits.add(toDataSourceSplit(list, fio, jobConf, paths[0]));
            list = Lists.newArrayList();
            size = 0;
        }

        list.add(segment);
        size += segment.getSegment().getSize();
    }

    if (list.size() > 0) {
        splits.add(toDataSourceSplit(list, fio, jobConf, paths[0]));
    }

    logger.info("Number of splits: " + splits.size());
    for (InputSplit split : splits) {
        logger.info(split.getClass().getName());
        for (String location : split.getLocations())
            logger.info(location);
    }
    return Iterables.toArray(splits, InputSplit.class);
}

From source file:com.google.android.apps.forscience.whistlepunk.metadata.TriggerListFragment.java

@Override
public void onResume() {
    super.onResume();
    getDataController().getSensorLayouts(mExperimentId,
            new LoggingConsumer<List<GoosciSensorLayout.SensorLayout>>(TAG, "get layout") {
                @Override// w  w  w.j  av  a 2 s . co  m
                public void success(List<GoosciSensorLayout.SensorLayout> value) {
                    for (GoosciSensorLayout.SensorLayout layout : value) {
                        if (TextUtils.equals(layout.sensorId, mSensorId)) {
                            mSensorLayout = layout;
                        }
                    }
                }
            });
    getDataController().getSensorTriggersForSensor(mSensorId,
            new LoggingConsumer<List<SensorTrigger>>(TAG, "get triggers for sensor") {
                @Override
                public void success(List<SensorTrigger> triggers) {
                    Comparator<SensorTrigger> cp;
                    if (mTriggerOrder != null) {
                        // If this is not the first load, use the saved order to define a new
                        // order, but insert new triggers at the top.
                        cp = new Comparator<SensorTrigger>() {
                            @Override
                            public int compare(SensorTrigger lhs, SensorTrigger rhs) {
                                int lhsIndex = mTriggerOrder.indexOf(lhs.getTriggerId());
                                int rhsIndex = mTriggerOrder.indexOf(rhs.getTriggerId());
                                if (lhsIndex == rhsIndex && lhsIndex == -1) {
                                    // If they are both not found, they are both new.
                                    return Long.compare(rhs.getLastUsed(), lhs.getLastUsed());
                                }
                                return Integer.compare(lhsIndex, rhsIndex);
                            }
                        };
                    } else {
                        // Only do this sort on the first load.
                        cp = new Comparator<SensorTrigger>() {
                            @Override
                            public int compare(SensorTrigger lhs, SensorTrigger rhs) {
                                boolean lhsIsActive = isTriggerActive(lhs);
                                boolean rhsIsActive = isTriggerActive(rhs);
                                if (lhsIsActive && !rhsIsActive) {
                                    return -1;
                                }
                                if (!lhsIsActive && rhsIsActive) {
                                    return 1;
                                }
                                return Long.compare(rhs.getLastUsed(), lhs.getLastUsed());

                            }
                        };
                    }
                    // Sort sensor triggers
                    Collections.sort(triggers, cp);
                    mTriggerAdapter.setSensorTriggers(triggers);
                }
            });
    WhistlePunkApplication.getUsageTracker(getActivity()).trackScreenView(TrackerConstants.SCREEN_TRIGGER_LIST);
}

From source file:org.talend.dataprep.transformation.actions.date.ChangeDatePattern.java

/**
 * Return the count of the most used pattern.
 *
 * @param column the column to work on.//  w  w w  .  j  a  va 2  s.  c om
 * @return the count of the most used pattern.
 */
private long getMostUsedPatternCount(ColumnMetadata column) {
    final List<PatternFrequency> patternFrequencies = column.getStatistics().getPatternFrequencies();
    if (patternFrequencies.isEmpty()) {
        return 1;
    }
    patternFrequencies.sort((p1, p2) -> Long.compare(p2.getOccurrences(), p1.getOccurrences()));
    return patternFrequencies.get(0).getOccurrences();
}