Example usage for java.util TreeMap size

List of usage examples for java.util TreeMap size

Introduction

In this page you can find the example usage for java.util TreeMap size.

Prototype

int size

To view the source code for java.util TreeMap size.

Click Source Link

Document

The number of entries in the tree

Usage

From source file:org.jszip.pseudo.io.PseudoFileSystem.java

public PseudoFile[] listChildren(PseudoFile dir, PseudoFileFilter filter) {
    TreeMap<String, Layer> names = new TreeMap<String, Layer>();
    final String path = dir.getAbsolutePath(this);
    for (int i = layers.length - 1; i >= 0; i--) {
        for (String name : layers[i].listChildren(path)) {
            names.put(name, layers[i]);//  w  w  w  . j ava 2 s  . c  om
        }
    }
    List<PseudoFile> result = new ArrayList<PseudoFile>(names.size());
    for (Map.Entry<String, Layer> entry : names.entrySet()) {
        if (filter.accept(entry.getKey())) {
            result.add(entry.getValue().makeChild(this, dir, entry.getKey()));
        }
    }
    return result.toArray(new PseudoFile[result.size()]);
}

From source file:com.inmobi.databus.local.LocalStreamService.java

private void populateCheckpointPathForCollector(Map<String, FileStatus> checkpointPaths,
        TreeMap<String, FileStatus> collectorPaths, String checkpointKey) {
    // Last file in sorted ascending order to be checkpointed for this collector
    if (collectorPaths != null && collectorPaths.size() > 0) {
        Entry<String, FileStatus> entry = collectorPaths.lastEntry();
        checkpointPaths.put(checkpointKey, entry.getValue());
    }/*  w ww  . j  av  a 2  s. c  om*/
}

From source file:com.github.dozermapper.core.functional_tests.MapTypeTest.java

@Test
public void testTreeMap() {
    TreeMap map = new TreeMap();
    map.put("a", "b");

    TreeMap result = mapper.map(map, TreeMap.class);

    assertNotNull(result);/*  ww  w.  ja v  a2s.  com*/
    assertEquals(1, result.size());
}

From source file:org.cloudata.core.client.Row.java

public boolean deepEquals(Row row) {
    if (row == null || row.key == null) {
        return false;
    }//from   ww  w  .j  a  v a  2  s.  c o  m
    if (!key.equals(row.key)) {
        return false;
    }

    if (cells.size() != row.cells.size()) {
        return false;
    }

    for (Map.Entry<String, TreeMap<Cell.Key, Cell>> entry : cells.entrySet()) {
        String columnName = entry.getKey();
        TreeMap<Cell.Key, Cell> columnCells = entry.getValue();

        TreeMap<Cell.Key, Cell> targetColumnCells = row.getCellMap(columnName);

        int columnCellsSize = columnCells == null ? 0 : columnCells.size();
        int targetColumnCellsSize = targetColumnCells == null ? 0 : targetColumnCells.size();
        if (columnCellsSize != targetColumnCellsSize) {
            return false;
        }

        if (columnCellsSize > 0) {
            for (Cell eachCell : columnCells.values()) {
                Cell targetCell = targetColumnCells.get(eachCell.getKey());
                if (!eachCell.equals(targetCell)) {
                    return false;
                }

                List<Cell.Value> values = eachCell.getValues();
                List<Cell.Value> targetValues = targetCell.getValues();

                int valueSize = values == null ? 0 : values.size();
                int targetValueSize = targetValues == null ? 0 : targetValues.size();
                if (valueSize != targetValueSize) {
                    return false;
                }

                for (int i = 0; i < valueSize; i++) {
                    Cell.Value value = values.get(i);
                    Cell.Value targetValue = values.get(i);

                    if (!StringUtils.equalsBytes(value.getBytes(), targetValue.getBytes())) {
                        return false;
                    }

                    if (value.isDeleted() != targetValue.isDeleted()) {
                        return false;
                    }
                }
            }
        }
    }

    return true;
}

From source file:com.opengamma.analytics.financial.interestrate.CashFlowEquivalentCalculator.java

@Override
public AnnuityPaymentFixed visitGenericAnnuity(final Annuity<? extends Payment> annuity,
        final YieldCurveBundle curves) {
    Validate.notNull(curves);//from   w w  w.j  av a 2 s.  c  om
    Validate.notNull(annuity);
    TreeMap<Double, Double> flow = new TreeMap<Double, Double>();
    Currency ccy = annuity.getCurrency();
    for (final Payment p : annuity.getPayments()) {
        AnnuityPaymentFixed cfe = visit(p, curves);
        for (int loopcf = 0; loopcf < cfe.getNumberOfPayments(); loopcf++) {
            addcf(flow, cfe.getNthPayment(loopcf).getPaymentTime(), cfe.getNthPayment(loopcf).getAmount());
        }
    }
    PaymentFixed[] agregatedCfe = new PaymentFixed[flow.size()];
    int loopcf = 0;
    for (double time : flow.keySet()) {
        agregatedCfe[loopcf++] = new PaymentFixed(ccy, time, flow.get(time), annuity.getDiscountCurve());
    }
    return new AnnuityPaymentFixed(agregatedCfe);
}

From source file:com.opengamma.analytics.financial.provider.calculator.discounting.CashFlowEquivalentCalculator.java

@Override
public AnnuityPaymentFixed visitGenericAnnuity(final Annuity<? extends Payment> annuity,
        final MulticurveProviderInterface multicurves) {
    ArgumentChecker.notNull(annuity, "Annuity");
    ArgumentChecker.notNull(multicurves, "Multicurves provider");
    final TreeMap<Double, Double> flow = new TreeMap<>();
    final Currency ccy = annuity.getCurrency();
    for (final Payment p : annuity.getPayments()) {
        final AnnuityPaymentFixed cfe = p.accept(this, multicurves);
        for (int loopcf = 0; loopcf < cfe.getNumberOfPayments(); loopcf++) {
            addcf(flow, cfe.getNthPayment(loopcf).getPaymentTime(), cfe.getNthPayment(loopcf).getAmount());
        }/*from  w w  w . j a v a 2 s  .c om*/
    }
    final PaymentFixed[] agregatedCfe = new PaymentFixed[flow.size()];
    int loopcf = 0;
    for (final double time : flow.keySet()) {
        agregatedCfe[loopcf++] = new PaymentFixed(ccy, time, flow.get(time));
    }
    return new AnnuityPaymentFixed(agregatedCfe);
}

From source file:org.loklak.geo.GeoNames.java

/**
 * Match a given sequence mix with geolocations. First all locations matching with sequences larger than one
 * word are collected. If the result of this collection is not empty, the largest plase (measured by population)
 * is returned. If no such location can be found, matching with single-word locations is attempted and then also
 * the largest place is returned.//from w  w w. ja  va  2  s  .  co m
 * @param mix the sequence mix
 * @return the largest place, matching with the mix, several-word matchings preferred
 */
private GeoMatch geomatch(LinkedHashMap<Integer, String> mix, final boolean preferLargePopulation) {
    TreeMap<Long, GeoMatch> cand = new TreeMap<>();
    int hitcount = 0;
    for (Map.Entry<Integer, String> entry : mix.entrySet()) {
        if (cand.size() > 0 && entry.getValue().indexOf(' ') < 0)
            return preferNonStopwordLocation(cand.values(), preferLargePopulation); // if we have location matches for place names with more than one word, return the largest place (measured by the population)
        List<Integer> locs = this.hash2ids.get(entry.getKey());
        if (locs == null || locs.size() == 0)
            continue;
        for (Integer i : locs) {
            GeoLocation loc = this.id2loc.get(i);
            if (loc != null) {
                for (String name : loc.getNames()) {
                    if (normalize(entry.getValue()).equals(normalize(name))) {
                        cand.put(hitcount++ - loc.getPopulation(), new GeoMatch(entry.getValue(), loc));
                        break;
                    }
                }
            }
        }
    }
    // finally return the largest place (if any found)
    return cand.size() > 0 ? preferNonStopwordLocation(cand.values(), preferLargePopulation) : null;
}

From source file:com.opengamma.analytics.financial.interestrate.CashFlowEquivalentCalculator.java

@Override
public AnnuityPaymentFixed visitBondFixedSecurity(final BondFixedSecurity bond, final YieldCurveBundle curves) {
    Validate.notNull(curves);/*from  w  w w . j av  a2  s .co m*/
    Validate.notNull(bond);
    Currency ccy = bond.getCurrency();
    TreeMap<Double, Double> flow = new TreeMap<Double, Double>();
    AnnuityPaymentFixed cfeNom = visit(bond.getNominal(), curves);
    AnnuityPaymentFixed cfeCpn = visit(bond.getCoupon(), curves);
    for (final PaymentFixed p : cfeNom.getPayments()) {
        flow.put(p.getPaymentTime(), p.getAmount());
    }
    for (final PaymentFixed p : cfeCpn.getPayments()) {
        addcf(flow, p.getPaymentTime(), p.getAmount());
    }
    PaymentFixed[] agregatedCfe = new PaymentFixed[flow.size()];
    int loopcf = 0;
    for (double time : flow.keySet()) {
        agregatedCfe[loopcf++] = new PaymentFixed(ccy, time, flow.get(time), cfeCpn.getDiscountCurve());
    }
    return new AnnuityPaymentFixed(agregatedCfe);

}

From source file:org.apache.oozie.service.ZKXLogStreamingService.java

/**
 * Contacts each of the other Oozie servers, gets their logs for the job, collates them, and sends them to the user via the
 * Writer.  It will make sure to not read all of the log messages into memory at the same time to not use up the heap.  If there
 * is a problem talking to one of the other servers, it will ignore that server and prepend a message to the Writer about it.
 * For getting the logs from this server, it won't use the REST API and instead get them directly to be more efficient.
 *
 * @param logStreamer the XLogStreamer//from  w  ww . j ava 2 s . co m
 * @param startTime the job start time
 * @param endTime the job end time
 * @param writer the writer
 * @throws IOException Signals that an I/O exception has occurred.
 */
private void collateLogs(XLogStreamer logStreamer, Date startTime, Date endTime, Writer writer)
        throws IOException {
    List<String> badOozies = new ArrayList<String>();
    List<ServiceInstance<Map>> oozies = null;
    try {
        oozies = zk.getAllMetaData();
    } catch (Exception ex) {
        throw new IOException("Issue communicating with ZooKeeper: " + ex.getMessage(), ex);
    }
    List<TimestampedMessageParser> parsers = new ArrayList<TimestampedMessageParser>(oozies.size());
    try {
        // Create a BufferedReader for getting the logs of each server and put them in a TimestampedMessageParser
        for (ServiceInstance<Map> oozie : oozies) {
            Map<String, String> oozieMeta = oozie.getPayload();
            String otherId = oozieMeta.get(ZKUtils.ZKMetadataKeys.OOZIE_ID);
            // If it's this server, we can just get them directly
            if (otherId.equals(zk.getZKId())) {
                BufferedReader reader = logStreamer.makeReader(startTime, endTime);
                parsers.add(new TimestampedMessageParser(reader, logStreamer.getXLogFilter()));
            }
            // If it's another server, we'll have to use the REST API
            else {
                String otherUrl = oozieMeta.get(ZKUtils.ZKMetadataKeys.OOZIE_URL);
                String jobId = logStreamer.getXLogFilter().getFilterParams().get(DagXLogInfoService.JOB);
                try {
                    // It's important that we specify ALL_SERVERS_PARAM=false in the GET request to prevent the other Oozie
                    // Server from trying aggregate logs from the other Oozie servers (and creating an infinite recursion)
                    final String url = otherUrl + "/v" + OozieClient.WS_PROTOCOL_VERSION + "/"
                            + RestConstants.JOB + "/" + jobId + "?" + RestConstants.JOB_SHOW_PARAM + "="
                            + logStreamer.getLogType() + "&" + RestConstants.ALL_SERVER_REQUEST + "=false"
                            + AuthUrlClient.getQueryParamString(logStreamer.getRequestParam());
                    // remove doAs from url to avoid failure while fetching
                    // logs in case of HA mode
                    String key = "doAs";
                    String[] value = null;
                    if (logStreamer.getRequestParam() != null) {
                        value = logStreamer.getRequestParam().get(key);
                    }
                    String urlWithoutdoAs = null;
                    if (value != null && value.length > 0 && value[0] != null && value[0].length() > 0) {
                        urlWithoutdoAs = url.replace("&" + key + "=" + URLEncoder.encode(value[0], "UTF-8"),
                                "");
                    } else {
                        urlWithoutdoAs = url;
                    }
                    BufferedReader reader = AuthUrlClient.callServer(urlWithoutdoAs);
                    parsers.add(new SimpleTimestampedMessageParser(reader, logStreamer.getXLogFilter()));
                } catch (IOException ioe) {
                    log.warn(
                            "Failed to retrieve logs for job [" + jobId + "] from Oozie server with ID ["
                                    + otherId + "] at [" + otherUrl + "]; log information may be incomplete",
                            ioe);
                    badOozies.add(otherId);
                }
            }
        }

        //If log param debug is set, we need to write start date and end date to outputstream.
        if (!StringUtils.isEmpty(logStreamer.getXLogFilter().getTruncatedMessage())) {
            writer.write(logStreamer.getXLogFilter().getTruncatedMessage());
        }

        if (logStreamer.getXLogFilter().isDebugMode()) {
            writer.write(logStreamer.getXLogFilter().getDebugMessage());
        }
        // Add a message about any servers we couldn't contact
        if (!badOozies.isEmpty()) {
            writer.write(
                    "Unable to contact the following Oozie Servers for logs (log information may be incomplete):\n");
            for (String badOozie : badOozies) {
                writer.write("     ");
                writer.write(badOozie);
                writer.write("\n");
            }
            writer.write("\n");
            writer.flush();
        }

        // If it's just the one server (this server), then we don't need to do any more processing and can just copy it directly
        if (parsers.size() == 1) {
            TimestampedMessageParser parser = parsers.get(0);
            parser.processRemaining(writer, logStreamer);
        } else {
            // Now that we have a Reader for each server to get the logs from that server, we have to collate them.  Within each
            // server, the logs should already be in the correct order, so we can take advantage of that.  We'll use the
            // BufferedReaders to read the messages from the logs of each server and put them in order without having to bring
            // every message into memory at the same time.
            TreeMap<String, TimestampedMessageParser> timestampMap = new TreeMap<String, TimestampedMessageParser>();
            // populate timestampMap with initial values
            for (TimestampedMessageParser parser : parsers) {
                if (parser.increment()) {
                    timestampMap.put(parser.getLastTimestamp(), parser);
                }
            }
            while (timestampMap.size() > 1) {
                // The first entry will be the earliest based on the timestamp (also removes it) from the map
                TimestampedMessageParser earliestParser = timestampMap.pollFirstEntry().getValue();
                // Write the message from that parser at that timestamp
                writer.write(earliestParser.getLastMessage());
                if (logStreamer.shouldFlushOutput(earliestParser.getLastMessage().length())) {
                    writer.flush();
                }
                // Increment that parser to read the next message
                if (earliestParser.increment()) {
                    // If it still has messages left, put it back in the map with the new last timestamp for it
                    timestampMap.put(earliestParser.getLastTimestamp(), earliestParser);
                }
            }
            // If there's only one parser left in the map, then we can simply copy the rest of its lines directly to be faster
            if (timestampMap.size() == 1) {
                TimestampedMessageParser parser = timestampMap.values().iterator().next();
                writer.write(parser.getLastMessage()); // don't forget the last message read by the parser
                parser.processRemaining(writer, logStreamer);
            }
        }
    } finally {
        for (TimestampedMessageParser parser : parsers) {
            parser.closeReader();
        }
    }
}

From source file:com.opengamma.analytics.financial.interestrate.CashFlowEquivalentCalculator.java

@Override
public AnnuityPaymentFixed visitSwap(final Swap<?, ?> swap, final YieldCurveBundle curves) {
    Validate.notNull(curves);/*w ww  .j  a va  2  s  .  c o  m*/
    Validate.notNull(swap);
    Currency ccy = swap.getFirstLeg().getCurrency();
    Validate.isTrue(ccy.equals(swap.getSecondLeg().getCurrency()),
            "Cash flow equivalent available only for single currency swaps.");
    TreeMap<Double, Double> flow = new TreeMap<Double, Double>();
    AnnuityPaymentFixed cfeLeg1 = visit(swap.getFirstLeg(), curves);
    AnnuityPaymentFixed cfeLeg2 = visit(swap.getSecondLeg(), curves);
    for (final PaymentFixed p : cfeLeg1.getPayments()) {
        flow.put(p.getPaymentTime(), p.getAmount());
    }
    for (final PaymentFixed p : cfeLeg2.getPayments()) {
        addcf(flow, p.getPaymentTime(), p.getAmount());
    }
    PaymentFixed[] agregatedCfe = new PaymentFixed[flow.size()];
    int loopcf = 0;
    for (double time : flow.keySet()) {
        agregatedCfe[loopcf++] = new PaymentFixed(ccy, time, flow.get(time), cfeLeg1.getDiscountCurve());
    }
    return new AnnuityPaymentFixed(agregatedCfe);
}