Example usage for java.util TreeMap tailMap

List of usage examples for java.util TreeMap tailMap

Introduction

In this page you can find the example usage for java.util TreeMap tailMap.

Prototype

public SortedMap<K, V> tailMap(K fromKey) 

Source Link

Usage

From source file:Main.java

public static void main(String[] args) {
    TreeMap<String, String> treeMap = new TreeMap<String, String>();
    treeMap.put("1", "One");
    treeMap.put("3", "Three");
    treeMap.put("2", "Two");
    treeMap.put("5", "Five");
    treeMap.put("4", "Four");

    SortedMap sortedMap = treeMap.tailMap("2");
    System.out.println("Tail Map Contains : " + sortedMap);
}

From source file:Main.java

public static void main(String[] args) {

    TreeMap<Integer, String> treemap = new TreeMap<Integer, String>();

    // populating tree map
    treemap.put(2, "two");
    treemap.put(1, "one");
    treemap.put(3, "three");
    treemap.put(6, "six");
    treemap.put(5, "from java2s.com");

    System.out.println("Getting tail map");
    SortedMap<Integer, String> treemapincl = treemap.tailMap(3);
    System.out.println("Tail map values: " + treemapincl);
}

From source file:Main.java

public static void main(String[] args) {
    TreeMap<Integer, Product> db = new TreeMap<Integer, Product>();
    db.put(1000, new Product("D", 350));
    db.put(1011, new Product("p", 15.75));
    db.put(1102, new Product("M", 8.50));
    db.put(2023, new Product("A", 150));
    db.put(2034, new Product("T", 9.99));

    System.out.println(db.subMap(1000, 1999) + "\n");

    System.out.println(db.tailMap(1011) + "\n");

    System.out.println(db.headMap(2023));

    System.out.println("First key higher than 2034: " + db.higherKey(2034));
    System.out.println("First key lower than 2034: " + db.lowerKey(2034));
}

From source file:SortedMapDemo.java

public static void main(String[] args) {
    TreeMap sortedMap = new TreeMap();
    sortedMap.put("Adobe", "Mountain View, CA");
    sortedMap.put("IBM", "White Plains, NY");
    sortedMap.put("Learning Tree", "Los Angeles, CA");
    sortedMap.put("Microsoft", "Redmond, WA");
    sortedMap.put("Netscape", "Mountain View, CA");
    sortedMap.put("O'Reilly", "Sebastopol, CA");
    sortedMap.put("Sun", "Mountain View, CA");
    System.out.println(sortedMap);
    Object low = sortedMap.firstKey(), high = sortedMap.lastKey();
    System.out.println(low);/*w  w w. ja va2 s.  com*/
    System.out.println(high);
    Iterator it = sortedMap.keySet().iterator();
    for (int i = 0; i <= 6; i++) {
        if (i == 3)
            low = it.next();
        if (i == 6)
            high = it.next();
        else
            it.next();
    }
    System.out.println(low);
    System.out.println(high);
    System.out.println(sortedMap.subMap(low, high));
    System.out.println(sortedMap.headMap(high));
    System.out.println(sortedMap.tailMap(low));
}

From source file:disAMS.AMRMClient.Impl.AMRMClientImpl.java

@Override
public synchronized List<? extends Collection<T>> getMatchingRequests(Priority priority, String resourceName,
        Resource capability) {//from   w w w  .ja  v a2 s .c  o m
    Preconditions.checkArgument(capability != null, "The Resource to be requested should not be null ");
    Preconditions.checkArgument(priority != null,
            "The priority at which to request containers should not be null ");
    List<LinkedHashSet<T>> list = new LinkedList<LinkedHashSet<T>>();
    Map<String, TreeMap<Resource, ResourceRequestInfo>> remoteRequests = this.remoteRequestsTable.get(priority);
    if (remoteRequests == null) {
        return list;
    }
    TreeMap<Resource, ResourceRequestInfo> reqMap = remoteRequests.get(resourceName);
    if (reqMap == null) {
        return list;
    }

    ResourceRequestInfo resourceRequestInfo = reqMap.get(capability);
    if (resourceRequestInfo != null && !resourceRequestInfo.containerRequests.isEmpty()) {
        list.add(resourceRequestInfo.containerRequests);
        return list;
    }

    // no exact match. Container may be larger than what was requested.
    // get all resources <= capability. map is reverse sorted. 
    SortedMap<Resource, ResourceRequestInfo> tailMap = reqMap.tailMap(capability);
    for (Map.Entry<Resource, ResourceRequestInfo> entry : tailMap.entrySet()) {
        if (canFit(entry.getKey(), capability) && !entry.getValue().containerRequests.isEmpty()) {
            // match found that fits in the larger resource
            list.add(entry.getValue().containerRequests);
        }
    }

    // no match found
    return list;
}

From source file:io.github.jeddict.jpa.spec.sync.JavaClassSyncHandler.java

private void syncHeaderJavaDoc(TypeDeclaration<?> type) {
    TreeMap<Integer, Comment> comments = new TreeMap<>();
    int packagePosition = 1;
    if (type.getParentNode().isPresent()) {
        Node parentNode = type.getParentNode().get();
        parentNode.getComment().ifPresent(comment -> comments.put(comment.getBegin().get().line, comment));
        for (Node node : parentNode.getChildNodes()) {
            if (node instanceof PackageDeclaration) {
                PackageDeclaration packageDeclaration = (PackageDeclaration) node;
                if (packageDeclaration.getBegin().isPresent()) {
                    packagePosition = packageDeclaration.getBegin().get().line;
                }/* w  w  w.  jav a  2 s.co  m*/
                if (packageDeclaration.getComment().isPresent()) {
                    Comment comment = packageDeclaration.getComment().get();
                    comments.put(comment.getBegin().get().line, comment);
                }
            } else if (node instanceof Comment) {
                Comment comment = (Comment) node;
                comments.put(comment.getBegin().get().line, comment);
            }
        }
    }
    type.getComment().ifPresent(comment -> comments.put(comment.getBegin().get().line, comment));
    comments.headMap(packagePosition).values().forEach(this::syncHeader);
    comments.tailMap(packagePosition).values().forEach(this::syncJavadoc);
}

From source file:ffx.potential.nonbonded.VanDerWaalsForm.java

public VanDerWaalsForm(ForceField forceField) {

    /**//from ww w . j  av a 2  s  .  c om
     * Set-up default rules.
     */
    vdwType = VDW_TYPE.BUFFERED_14_7;
    epsilonRule = EPSILON_RULE.HHG;
    radiusRule = RADIUS_RULE.CUBIC_MEAN;
    radiusSize = RADIUS_SIZE.DIAMETER;
    radiusType = RADIUS_TYPE.R_MIN;

    /**
     * Define functional form.
     */
    String value = forceField.getString(VDWTYPE, vdwType.toString());
    try {
        vdwType = VDW_TYPE.valueOf(toEnumForm(value));
    } catch (Exception e) {
        logger.info(format(" Unrecognized VDWTYPE %s; defaulting to %s.", value, vdwType));
    }

    switch (vdwType) {
    case BUFFERED_14_7:
        vdwPowers = new Buffered_14_7();
        break;
    case LENNARD_JONES:
        vdwPowers = new LJ_6_12();
        break;
    default:
        vdwPowers = new VDWPowers();
        break;
    }

    /**
     * Define epsilon combining rule.
     */
    value = forceField.getString(EPSILONRULE, epsilonRule.toString());
    try {
        epsilonRule = EPSILON_RULE.valueOf(toEnumForm(value));
    } catch (Exception e) {
        logger.info(format(" Unrecognized EPSILONRULE %s; defaulting to %s.", value, epsilonRule));
    }

    /**
     * Define radius combining rule.
     */
    value = forceField.getString(RADIUSRULE, radiusRule.toString());
    try {
        radiusRule = RADIUS_RULE.valueOf(toEnumForm(value));
    } catch (Exception e) {
        logger.info(format(" Unrecognized RADIUSRULE %s; defaulting to %s.", value, radiusRule));
    }

    /**
     * Define radius size.
     */
    value = forceField.getString(RADIUSSIZE, radiusSize.toString());
    try {
        radiusSize = RADIUS_SIZE.valueOf(toEnumForm(value));
    } catch (Exception e) {
        logger.info(format(" Unrecognized RADIUSSIZE %s; defaulting to %s.", value, radiusSize));
    }

    /**
     * Define radius type.
     */
    value = forceField.getString(RADIUSTYPE, radiusType.toString());
    try {
        radiusType = RADIUS_TYPE.valueOf(toEnumForm(value));
    } catch (Exception e) {
        logger.info(format(" Unrecognized RADIUSTYPE %s; defaulting to %s", value, radiusType));
    }

    /**
     * Configure van der Waals well shape parameters.
     */
    switch (vdwType) {
    case LENNARD_JONES:
        repulsivePower = 12;
        dispersivePower = 6;
        delta = 0.0;
        gamma = 0.0;
        break;
    case BUFFERED_14_7:
    default:
        repulsivePower = 14;
        dispersivePower = 7;
        delta = 0.07;
        gamma = 0.12;
        break;
    }

    repDispPower = repulsivePower - dispersivePower;
    dispersivePower1 = dispersivePower - 1;
    repDispPower1 = repDispPower - 1;
    delta1 = 1.0 + delta;
    t1n = pow(delta1, dispersivePower);
    gamma1 = 1.0 + gamma;

    scale12 = forceField.getDouble(ForceField.ForceFieldDouble.VDW_12_SCALE, 0.0);
    scale13 = forceField.getDouble(ForceField.ForceFieldDouble.VDW_13_SCALE, 0.0);
    scale14 = forceField.getDouble(ForceField.ForceFieldDouble.VDW_14_SCALE, 1.0);
    scale15 = forceField.getDouble(ForceField.ForceFieldDouble.VDW_15_SCALE, 1.0);

    /**
     * The convention in TINKER is a vdw-14-scale factor of 2.0 means to
     * scale by 0.5.
     */
    if (scale12 > 1.0) {
        scale12 = 1.0 / scale12;
    }
    if (scale13 > 1.0) {
        scale13 = 1.0 / scale13;
    }
    if (scale14 > 1.0) {
        scale14 = 1.0 / scale14;
    }
    if (scale15 != 1.0) {
        logger.severe(" Van Der Waals 1-5 masking rules are not supported.");
    }

    Map<String, VDWType> map = forceField.getVDWTypes();
    TreeMap<String, VDWType> vdwTypes = new TreeMap<>(map);
    maxClass = 0;
    for (VDWType currentType : vdwTypes.values()) {
        if (currentType.atomClass > maxClass) {
            maxClass = currentType.atomClass;
        }
    }
    radEps = new double[maxClass + 1][2 * (maxClass + 1)];

    /**
     * Scale factor to convert to vdW size to Rmin.
     */
    double radScale;
    switch (radiusSize) {
    case DIAMETER:
        radScale = 0.5;
        break;
    case RADIUS:
    default:
        radScale = 1.0;
        break;
    }
    switch (radiusType) {
    case SIGMA:
        radScale *= 1.122462048309372981;
        break;
    case R_MIN:
    default:
        break;

    }

    /**
     * Atom Class numbering starts at 1.
     */
    for (VDWType vdwi : vdwTypes.values()) {
        int i = vdwi.atomClass;
        double ri = radScale * vdwi.radius;
        double ri2 = ri * ri;
        double ri3 = ri * ri2;
        double e1 = vdwi.wellDepth;
        double se1 = sqrt(e1);
        for (VDWType vdwj : vdwTypes.tailMap(vdwi.getKey()).values()) {
            int j = vdwj.atomClass;
            double rj = radScale * vdwj.radius;
            double rj2 = rj * rj;
            double rj3 = rj * rj2;
            double e2 = vdwj.wellDepth;
            double se2 = sqrt(e2);
            double radmin;
            double eps;
            switch (radiusRule) {
            case ARITHMETIC:
                radmin = ri + rj;
                break;
            case GEOMETRIC:
                radmin = 2.0 * sqrt(ri) * sqrt(rj);
                break;
            default:
            case CUBIC_MEAN:
                radmin = 2.0 * (ri3 + rj3) / (ri2 + rj2);
            }
            switch (epsilonRule) {
            case GEOMETRIC:
                eps = se1 * se2;
                break;
            default:
            case HHG:
                eps = 4.0 * (e1 * e2) / ((se1 + se2) * (se1 + se2));
                break;
            }
            if (radmin > 0) {
                radEps[i][j * 2 + RADMIN] = 1.0 / radmin;
            } else {
                radEps[i][j * 2 + RADMIN] = 0.0;
            }

            radEps[i][j * 2 + EPS] = eps;
            if (radmin > 0) {
                radEps[j][i * 2 + RADMIN] = 1.0 / radmin;
            } else {
                radEps[j][i * 2 + RADMIN] = 0.0;
            }
            radEps[j][i * 2 + EPS] = eps;
        }
    }

}

From source file:org.apache.druid.indexing.kafka.supervisor.KafkaSupervisor.java

/**
 * This method does two things -/*  w w  w .j  ava  2 s . com*/
 * 1. Makes sure the checkpoints information in the taskGroup is consistent with that of the tasks, if not kill
 * inconsistent tasks.
 * 2. truncates the checkpoints in the taskGroup corresponding to which segments have been published, so that any newly
 * created tasks for the taskGroup start indexing from after the latest published offsets.
 */
private void verifyAndMergeCheckpoints(final TaskGroup taskGroup) {
    final int groupId = taskGroup.groupId;
    final List<Pair<String, TreeMap<Integer, Map<Integer, Long>>>> taskSequences = new ArrayList<>();
    final List<ListenableFuture<TreeMap<Integer, Map<Integer, Long>>>> futures = new ArrayList<>();
    final List<String> taskIds = new ArrayList<>();

    for (String taskId : taskGroup.taskIds()) {
        final ListenableFuture<TreeMap<Integer, Map<Integer, Long>>> checkpointsFuture = taskClient
                .getCheckpointsAsync(taskId, true);
        taskIds.add(taskId);
        futures.add(checkpointsFuture);
    }

    try {
        List<TreeMap<Integer, Map<Integer, Long>>> futuresResult = Futures.successfulAsList(futures)
                .get(futureTimeoutInSeconds, TimeUnit.SECONDS);

        for (int i = 0; i < futuresResult.size(); i++) {
            final TreeMap<Integer, Map<Integer, Long>> checkpoints = futuresResult.get(i);
            final String taskId = taskIds.get(i);
            if (checkpoints == null) {
                try {
                    // catch the exception in failed futures
                    futures.get(i).get();
                } catch (Exception e) {
                    log.error(e, "Problem while getting checkpoints for task [%s], killing the task", taskId);
                    killTask(taskId);
                    taskGroup.tasks.remove(taskId);
                }
            } else if (checkpoints.isEmpty()) {
                log.warn("Ignoring task [%s], as probably it is not started running yet", taskId);
            } else {
                taskSequences.add(new Pair<>(taskId, checkpoints));
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    final KafkaDataSourceMetadata latestDataSourceMetadata = (KafkaDataSourceMetadata) indexerMetadataStorageCoordinator
            .getDataSourceMetadata(dataSource);
    final boolean hasValidOffsetsFromDb = latestDataSourceMetadata != null
            && latestDataSourceMetadata.getKafkaPartitions() != null
            && ioConfig.getTopic().equals(latestDataSourceMetadata.getKafkaPartitions().getTopic());
    final Map<Integer, Long> latestOffsetsFromDb;
    if (hasValidOffsetsFromDb) {
        latestOffsetsFromDb = latestDataSourceMetadata.getKafkaPartitions().getPartitionOffsetMap();
    } else {
        latestOffsetsFromDb = null;
    }

    // order tasks of this taskGroup by the latest sequenceId
    taskSequences.sort((o1, o2) -> o2.rhs.firstKey().compareTo(o1.rhs.firstKey()));

    final Set<String> tasksToKill = new HashSet<>();
    final AtomicInteger earliestConsistentSequenceId = new AtomicInteger(-1);
    int taskIndex = 0;

    while (taskIndex < taskSequences.size()) {
        TreeMap<Integer, Map<Integer, Long>> taskCheckpoints = taskSequences.get(taskIndex).rhs;
        String taskId = taskSequences.get(taskIndex).lhs;
        if (earliestConsistentSequenceId.get() == -1) {
            // find the first replica task with earliest sequenceId consistent with datasource metadata in the metadata
            // store
            if (taskCheckpoints.entrySet().stream()
                    .anyMatch(sequenceCheckpoint -> sequenceCheckpoint.getValue().entrySet().stream()
                            .allMatch(partitionOffset -> Longs.compare(partitionOffset.getValue(),
                                    latestOffsetsFromDb == null ? partitionOffset.getValue()
                                            : latestOffsetsFromDb.getOrDefault(partitionOffset.getKey(),
                                                    partitionOffset.getValue())) == 0)
                            && earliestConsistentSequenceId.compareAndSet(-1, sequenceCheckpoint.getKey()))
                    || (pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() > 0
                            && earliestConsistentSequenceId.compareAndSet(-1, taskCheckpoints.firstKey()))) {
                final SortedMap<Integer, Map<Integer, Long>> latestCheckpoints = new TreeMap<>(
                        taskCheckpoints.tailMap(earliestConsistentSequenceId.get()));
                log.info("Setting taskGroup sequences to [%s] for group [%d]", latestCheckpoints, groupId);
                taskGroup.sequenceOffsets.clear();
                taskGroup.sequenceOffsets.putAll(latestCheckpoints);
            } else {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], latestoffsets from DB [%s]", taskId,
                        taskCheckpoints, latestOffsetsFromDb);
                tasksToKill.add(taskId);
            }
        } else {
            // check consistency with taskGroup sequences
            if (taskCheckpoints.get(taskGroup.sequenceOffsets.firstKey()) == null
                    || !(taskCheckpoints.get(taskGroup.sequenceOffsets.firstKey())
                            .equals(taskGroup.sequenceOffsets.firstEntry().getValue()))
                    || taskCheckpoints.tailMap(taskGroup.sequenceOffsets.firstKey())
                            .size() != taskGroup.sequenceOffsets.size()) {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], taskgroup checkpoints [%s]", taskId,
                        taskCheckpoints, taskGroup.sequenceOffsets);
                tasksToKill.add(taskId);
            }
        }
        taskIndex++;
    }

    if ((tasksToKill.size() > 0 && tasksToKill.size() == taskGroup.tasks.size()) || (taskGroup.tasks.size() == 0
            && pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() == 0)) {
        // killing all tasks or no task left in the group ?
        // clear state about the taskgroup so that get latest offset information is fetched from metadata store
        log.warn("Clearing task group [%d] information as no valid tasks left the group", groupId);
        taskGroups.remove(groupId);
        partitionGroups.get(groupId).replaceAll((partition, offset) -> NOT_SET);
    }

    taskSequences.stream().filter(taskIdSequences -> tasksToKill.contains(taskIdSequences.lhs))
            .forEach(sequenceCheckpoint -> {
                log.warn(
                        "Killing task [%s], as its checkpoints [%s] are not consistent with group checkpoints[%s] or latest "
                                + "persisted offsets in metadata store [%s]",
                        sequenceCheckpoint.lhs, sequenceCheckpoint.rhs, taskGroup.sequenceOffsets,
                        latestOffsetsFromDb);
                killTask(sequenceCheckpoint.lhs);
                taskGroup.tasks.remove(sequenceCheckpoint.lhs);
            });
}

From source file:org.apache.james.mailbox.maildir.MaildirFolder.java

/**
 * Sorts the given map and returns a subset which is constricted by a lower and an upper limit.
 * @param source The source map// w  ww  .  j a  va2s.  c o  m
 * @param from The lower limit
 * @param to The upper limit; <code>-1</code> disables the upper limit.
 * @return The sorted subset
 */
private SortedMap<Long, MaildirMessageName> truncateMap(Map<Long, MaildirMessageName> source, long from,
        long to) {
    TreeMap<Long, MaildirMessageName> sortedMap;
    if (source instanceof TreeMap<?, ?>)
        sortedMap = (TreeMap<Long, MaildirMessageName>) source;
    else
        sortedMap = new TreeMap<Long, MaildirMessageName>(source);
    if (to != -1)
        return sortedMap.subMap(from, to + 1);
    return sortedMap.tailMap(from);
}

From source file:org.cloudata.core.client.TabletLocationCache.java

private void removeFromCache(TreeMap<Row.Key, TabletInfo> cache, Row.Key cacheRowKey, TabletInfo removeTablet) {
    if (cache.containsKey(cacheRowKey)) {
        cache.remove(cacheRowKey);/* w w w .jav a 2 s  . c o  m*/
    }

    SortedMap<Row.Key, TabletInfo> tailMap = cache.tailMap(cacheRowKey);
    if (tailMap.isEmpty()) {
        return;
    }
    Row.Key tailFirst = tailMap.firstKey();

    TabletInfo tabletInfo = tailMap.get(tailFirst);

    if (tabletInfo.equals(removeTablet)) {
        cache.remove(tailFirst);
    }
}