Example usage for java.util SortedMap putAll

List of usage examples for java.util SortedMap putAll

Introduction

In this page you can find the example usage for java.util SortedMap putAll.

Prototype

void putAll(Map<? extends K, ? extends V> m);

Source Link

Document

Copies all of the mappings from the specified map to this map (optional operation).

Usage

From source file:org.polymap.kaps.ui.form.VertragsdatenAgrarBodenwertFormEditorPage.java

private SortedMap<String, Object> searchZonen() {
    // #343, ich weiss nicht wieso das null sein kann
    SortedMap<String, Object> ret = new TreeMap<String, Object>();
    if (!vb.vertrag().get().richtwertZonenAgrar().isEmpty()) {
        Set<GemeindeComposite> gemeinden = new HashSet<GemeindeComposite>();
        for (RichtwertzoneComposite zone : vb.vertrag().get().richtwertZonenAgrar()) {
            gemeinden.add(zone.gemeinde().get());
        }/*from www.j a va 2s.com*/
        for (GemeindeComposite gemeinde : gemeinden) {
            ret.putAll(RichtwertzoneProvider.findFor(gemeinde, vb.vertrag().get().vertragsDatum().get()));
        }
    }
    return ret;
}

From source file:org.apache.james.mailbox.maildir.MaildirFolder.java

/**
 * Reads all uids between the two boundaries from the folder and returns them as
 * a sorted map together with their corresponding {@link MaildirMessageName}s.
 *
 * @param session/*from w  w  w .j a  v  a2  s.  c  o m*/
 * @param from The lower uid limit
 * @param to The upper uid limit. <code>-1</code> disables the upper limit
 * @return a {@link Map} whith all uids in the given range and associated {@link MaildirMessageName}s
 * @throws MailboxException if there is a problem with the uid list file
 */
public SortedMap<Long, MaildirMessageName> getUidMap(final MailboxSession session, final long from,
        final long to) throws MailboxException {
    return locker.executeWithLock(session, path, new LockAwareExecution<SortedMap<Long, MaildirMessageName>>() {

        @Override
        public SortedMap<Long, MaildirMessageName> execute() throws MailboxException {
            final SortedMap<Long, MaildirMessageName> uidMap = new TreeMap<Long, MaildirMessageName>();

            File uidList = uidFile;

            if (uidList.isFile()) {
                if (isModified()) {
                    try {
                        uidMap.putAll(truncateMap(updateUidFile(), from, to));
                    } catch (MailboxException e) {
                        // weird case if someone deleted the uidlist after
                        // checking its
                        // existence and before trying to update it.
                        uidMap.putAll(truncateMap(createUidFile(), from, to));
                    }
                } else {
                    // the uidList is up to date
                    uidMap.putAll(readUidFile(session, from, to));
                }
            } else {
                // the uidList does not exist
                uidMap.putAll(truncateMap(createUidFile(), from, to));
            }
            return uidMap;
        }
    }, true);
}

From source file:org.torproject.ernie.web.ExoneraTorServlet.java

public void doGet(HttpServletRequest request, HttpServletResponse response)
        throws IOException, ServletException {

    /* Start writing response. */
    PrintWriter out = response.getWriter();
    writeHeader(out);//from w w  w. j  a  v  a 2 s .c  om

    /* Look up first and last consensus in the database. */
    long firstValidAfter = -1L, lastValidAfter = -1L;
    try {
        Connection conn = this.ds.getConnection();
        Statement statement = conn.createStatement();
        String query = "SELECT MIN(validafter) AS first, " + "MAX(validafter) AS last FROM consensus";
        ResultSet rs = statement.executeQuery(query);
        if (rs.next()) {
            firstValidAfter = rs.getTimestamp(1).getTime();
            lastValidAfter = rs.getTimestamp(2).getTime();
        }
        rs.close();
        statement.close();
        conn.close();
    } catch (SQLException e) {
        /* Looks like we don't have any consensuses. */
    }
    if (firstValidAfter < 0L || lastValidAfter < 0L) {
        out.println("<p><font color=\"red\"><b>Warning: </b></font>This "
                + "server doesn't have any relay lists available. If this " + "problem persists, please "
                + "<a href=\"mailto:tor-assistants@freehaven.net\">let us " + "know</a>!</p>\n");
        writeFooter(out);
        return;
    }

    out.println("<a name=\"relay\"></a><h3>Was there a Tor relay running " + "on this IP address?</h3>");

    /* Parse IP parameter. */
    Pattern ipAddressPattern = Pattern
            .compile("^([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\." + "([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\."
                    + "([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\." + "([01]?\\d\\d?|2[0-4]\\d|25[0-5])$");
    String ipParameter = request.getParameter("ip");
    String relayIP = "", ipWarning = "";
    if (ipParameter != null && ipParameter.length() > 0) {
        Matcher ipParameterMatcher = ipAddressPattern.matcher(ipParameter);
        if (ipParameterMatcher.matches()) {
            String[] ipParts = ipParameter.split("\\.");
            relayIP = Integer.parseInt(ipParts[0]) + "." + Integer.parseInt(ipParts[1]) + "."
                    + Integer.parseInt(ipParts[2]) + "." + Integer.parseInt(ipParts[3]);
        } else {
            ipWarning = "\""
                    + (ipParameter.length() > 20 ? ipParameter.substring(0, 20) + "[...]" : ipParameter)
                    + "\" is not a valid IP address.";
        }
    }

    /* Parse timestamp parameter. */
    String timestampParameter = request.getParameter("timestamp");
    long timestamp = 0L;
    String timestampStr = "", timestampWarning = "";
    SimpleDateFormat shortDateTimeFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");
    shortDateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
    if (timestampParameter != null && timestampParameter.length() > 0) {
        try {
            timestamp = shortDateTimeFormat.parse(timestampParameter).getTime();
            timestampStr = shortDateTimeFormat.format(timestamp);
            if (timestamp < firstValidAfter || timestamp > lastValidAfter) {
                timestampWarning = "Please pick a value between \""
                        + shortDateTimeFormat.format(firstValidAfter) + "\" and \""
                        + shortDateTimeFormat.format(lastValidAfter) + "\".";
            }
        } catch (ParseException e) {
            /* We have no way to handle this exception, other than leaving
               timestampStr at "". */
            timestampWarning = "\""
                    + (timestampParameter.length() > 20 ? timestampParameter.substring(0, 20) + "[...]"
                            : timestampParameter)
                    + "\" is not a valid timestamp.";
        }
    }

    /* If either IP address or timestamp is provided, the other one must
     * be provided, too. */
    if (relayIP.length() < 1 && timestampStr.length() > 0 && ipWarning.length() < 1) {
        ipWarning = "Please provide an IP address.";
    }
    if (relayIP.length() > 0 && timestampStr.length() < 1 && timestampWarning.length() < 1) {
        timestampWarning = "Please provide a timestamp.";
    }

    /* Parse target IP parameter. */
    String targetIP = "", targetPort = "", target = "";
    String[] targetIPParts = null;
    String targetAddrParameter = request.getParameter("targetaddr");
    String targetAddrWarning = "";
    if (targetAddrParameter != null && targetAddrParameter.length() > 0) {
        Matcher targetAddrParameterMatcher = ipAddressPattern.matcher(targetAddrParameter);
        if (targetAddrParameterMatcher.matches()) {
            String[] targetAddrParts = targetAddrParameter.split("\\.");
            targetIP = Integer.parseInt(targetAddrParts[0]) + "." + Integer.parseInt(targetAddrParts[1]) + "."
                    + Integer.parseInt(targetAddrParts[2]) + "." + Integer.parseInt(targetAddrParts[3]);
            target = targetIP;
            targetIPParts = targetIP.split("\\.");
        } else {
            targetAddrWarning = "\""
                    + (targetAddrParameter.length() > 20 ? timestampParameter.substring(0, 20) + "[...]"
                            : timestampParameter)
                    + "\" is not a valid IP address.";
        }
    }

    /* Parse target port parameter. */
    String targetPortParameter = request.getParameter("targetport");
    String targetPortWarning = "";
    if (targetPortParameter != null && targetPortParameter.length() > 0) {
        Pattern targetPortPattern = Pattern.compile("\\d+");
        if (targetPortParameter.length() < 5 && targetPortPattern.matcher(targetPortParameter).matches()
                && !targetPortParameter.equals("0") && Integer.parseInt(targetPortParameter) < 65536) {
            targetPort = targetPortParameter;
            if (target != null) {
                target += ":" + targetPort;
            } else {
                target = targetPort;
            }
        } else {
            targetPortWarning = "\""
                    + (targetPortParameter.length() > 8 ? targetPortParameter.substring(0, 8) + "[...]"
                            : targetPortParameter)
                    + "\" is not a valid TCP port.";
        }
    }

    /* If target port is provided, a target address must be provided,
     * too. */
    if (targetPort.length() > 0 && targetIP.length() < 1 && targetAddrWarning.length() < 1) {
        targetAddrWarning = "Please provide an IP address.";
    }

    /* Write form with IP address and timestamp. */
    out.println("        <form action=\"exonerator.html#relay\">\n"
            + "          <input type=\"hidden\" name=\"targetaddr\" "
            + (targetIP.length() > 0 ? " value=\"" + targetIP + "\"" : "") + ">\n"
            + "          <input type=\"hidden\" name=\"targetPort\""
            + (targetPort.length() > 0 ? " value=\"" + targetPort + "\"" : "") + ">\n" + "          <table>\n"
            + "            <tr>\n" + "              <td align=\"right\">IP address in question:" + "</td>\n"
            + "              <td><input type=\"text\" name=\"ip\""
            + (relayIP.length() > 0 ? " value=\"" + relayIP + "\"" : "") + ">"
            + (ipWarning.length() > 0 ? "<br><font color=\"red\">" + ipWarning + "</font>" : "") + "</td>\n"
            + "              <td><i>(Ex.: 1.2.3.4)</i></td>\n" + "            </tr>\n" + "            <tr>\n"
            + "              <td align=\"right\">Timestamp, in UTC:</td>\n"
            + "              <td><input type=\"text\" name=\"timestamp\""
            + (timestampStr.length() > 0 ? " value=\"" + timestampStr + "\"" : "") + ">"
            + (timestampWarning.length() > 0 ? "<br><font color=\"red\">" + timestampWarning + "</font>" : "")
            + "</td>\n" + "              <td><i>(Ex.: 2010-01-01 12:00)</i></td>\n" + "            </tr>\n"
            + "            <tr>\n" + "              <td></td>\n" + "              <td>\n"
            + "                <input type=\"submit\">\n" + "                <input type=\"reset\">\n"
            + "              </td>\n" + "              <td></td>\n" + "            </tr>\n"
            + "          </table>\n" + "        </form>\n");

    if (relayIP.length() < 1 || timestampStr.length() < 1) {
        writeFooter(out);
        return;
    }

    /* Look up relevant consensuses. */
    long timestampTooOld = timestamp - 15L * 60L * 60L * 1000L;
    long timestampFrom = timestamp - 3L * 60L * 60L * 1000L;
    long timestampTooNew = timestamp + 12L * 60L * 60L * 1000L;
    out.printf(
            "<p>Looking up IP address %s in the relay lists published " + "between %s and %s. "
                    + "Clients could have used any of these relay lists to "
                    + "select relays for their paths and build circuits using them. "
                    + "You may follow the links to relay lists and relay descriptors "
                    + "to grep for the lines printed below and confirm that results " + "are correct.<br>",
            relayIP, shortDateTimeFormat.format(timestampFrom), timestampStr);
    SimpleDateFormat validAfterTimeFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    validAfterTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
    String fromValidAfter = validAfterTimeFormat.format(timestampTooOld);
    String toValidAfter = validAfterTimeFormat.format(timestampTooNew);
    SortedMap<Long, String> tooOldConsensuses = new TreeMap<Long, String>();
    SortedMap<Long, String> relevantConsensuses = new TreeMap<Long, String>();
    SortedMap<Long, String> tooNewConsensuses = new TreeMap<Long, String>();
    try {
        Connection conn = this.ds.getConnection();
        Statement statement = conn.createStatement();
        String query = "SELECT validafter, rawdesc FROM consensus " + "WHERE validafter >= '" + fromValidAfter
                + "' AND validafter <= '" + toValidAfter + "'";
        ResultSet rs = statement.executeQuery(query);
        while (rs.next()) {
            long consensusTime = rs.getTimestamp(1).getTime();
            String rawConsensusString = new String(rs.getBytes(2), "US-ASCII");
            if (consensusTime < timestampFrom) {
                tooOldConsensuses.put(consensusTime, rawConsensusString);
            } else if (consensusTime > timestamp) {
                tooNewConsensuses.put(consensusTime, rawConsensusString);
            } else {
                relevantConsensuses.put(consensusTime, rawConsensusString);
            }
        }
        rs.close();
        statement.close();
        conn.close();
    } catch (SQLException e) {
        /* Looks like we don't have any consensuses in the requested
           interval. */
    }
    SortedMap<Long, String> allConsensuses = new TreeMap<Long, String>();
    allConsensuses.putAll(tooOldConsensuses);
    allConsensuses.putAll(relevantConsensuses);
    allConsensuses.putAll(tooNewConsensuses);
    if (allConsensuses.isEmpty()) {
        out.println("        <p>No relay lists found!</p>\n" + "        <p>Result is INDECISIVE!</p>\n"
                + "        <p>We cannot make any statement whether there was "
                + "a Tor relay running on IP address " + relayIP + " at " + timestampStr + "! We "
                + "did not find any relevant relay lists preceding the given "
                + "time. If you think this is an error on our side, please "
                + "<a href=\"mailto:tor-assistants@freehaven.net\">contact " + "us</a>!</p>\n");
        writeFooter(out);
        return;
    }

    /* Parse consensuses to find descriptors belonging to the IP
       address. */
    SortedSet<Long> positiveConsensusesNoTarget = new TreeSet<Long>();
    Set<String> addressesInSameNetwork = new HashSet<String>();
    SortedMap<String, Set<Long>> relevantDescriptors = new TreeMap<String, Set<Long>>();
    SimpleDateFormat validAfterUrlFormat = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss");
    validAfterUrlFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
    for (Map.Entry<Long, String> e : allConsensuses.entrySet()) {
        long consensus = e.getKey();
        if (relevantConsensuses.containsKey(consensus)) {
            long validAfterTime = -1L;
            String validAfterDatetime = validAfterTimeFormat.format(consensus);
            String validAfterString = validAfterUrlFormat.format(consensus);
            out.println("        <br><tt>valid-after <b>" + "<a href=\"consensus?valid-after="
                    + validAfterString + "\" target=\"_blank\">" + validAfterDatetime + "</b></a></tt><br>");
        }
        String rawConsensusString = e.getValue();
        BufferedReader br = new BufferedReader(new StringReader(rawConsensusString));
        String line = null;
        while ((line = br.readLine()) != null) {
            if (!line.startsWith("r ")) {
                continue;
            }
            String[] parts = line.split(" ");
            String address = parts[6];
            if (address.equals(relayIP)) {
                String hex = String.format("%040x", new BigInteger(1, Base64.decodeBase64(parts[3] + "==")));
                if (!relevantDescriptors.containsKey(hex)) {
                    relevantDescriptors.put(hex, new HashSet<Long>());
                }
                relevantDescriptors.get(hex).add(consensus);
                positiveConsensusesNoTarget.add(consensus);
                if (relevantConsensuses.containsKey(consensus)) {
                    out.println("    <tt>r " + parts[1] + " " + parts[2] + " " + "<a href=\"serverdesc?desc-id="
                            + hex + "\" " + "target=\"_blank\">" + parts[3] + "</a> " + parts[4] + " "
                            + parts[5] + " <b>" + parts[6] + "</b> " + parts[7] + " " + parts[8] + "</tt><br>");
                }
            } else {
                if (relayIP.startsWith(address.substring(0, address.lastIndexOf(".")))) {
                    addressesInSameNetwork.add(address);
                }
            }
        }
        br.close();
    }
    if (relevantDescriptors.isEmpty()) {
        out.printf(
                "        <p>None found!</p>\n" + "        <p>Result is NEGATIVE with moderate certainty!</p>\n"
                        + "        <p>We did not find IP " + "address " + relayIP
                        + " in any of the relay lists that were "
                        + "published between %s and %s.\n\nA possible "
                        + "reason for false negatives is that the relay is using a "
                        + "different IP address when generating a descriptor than for "
                        + "exiting to the Internet. We hope to provide better checks "
                        + "for this case in the future.</p>\n",
                shortDateTimeFormat.format(timestampTooOld), shortDateTimeFormat.format(timestampTooNew));
        if (!addressesInSameNetwork.isEmpty()) {
            out.println("        <p>The following other IP addresses of Tor "
                    + "relays were found in the mentioned relay lists that "
                    + "are in the same /24 network and that could be related to " + "IP address " + relayIP
                    + ":</p>\n");
            for (String s : addressesInSameNetwork) {
                out.println("        <p>" + s + "</p>\n");
            }
        }
        writeFooter(out);
        return;
    }

    /* Print out result. */
    Set<Long> matches = positiveConsensusesNoTarget;
    if (matches.contains(relevantConsensuses.lastKey())) {
        out.println("        <p>Result is POSITIVE with high certainty!" + "</p>\n"
                + "        <p>We found one or more relays on IP address " + relayIP
                + " in the most recent relay list preceding " + timestampStr
                + " that clients were likely to know.</p>\n");
    } else {
        boolean inOtherRelevantConsensus = false, inTooOldConsensuses = false, inTooNewConsensuses = false;
        for (long match : matches) {
            if (relevantConsensuses.containsKey(match)) {
                inOtherRelevantConsensus = true;
            } else if (tooOldConsensuses.containsKey(match)) {
                inTooOldConsensuses = true;
            } else if (tooNewConsensuses.containsKey(match)) {
                inTooNewConsensuses = true;
            }
        }
        if (inOtherRelevantConsensus) {
            out.println("        <p>Result is POSITIVE " + "with moderate certainty!</p>\n");
            out.println("<p>We found one or more relays on IP address " + relayIP
                    + ", but not in the relay list immediately " + "preceding " + timestampStr
                    + ". A possible reason for the "
                    + "relay being missing in the last relay list preceding the "
                    + "given time might be that some of the directory "
                    + "authorities had difficulties connecting to the relay. "
                    + "However, clients might still have used the relay.</p>\n");
        } else {
            out.println("        <p>Result is NEGATIVE " + "with high certainty!</p>\n");
            out.println("        <p>We did not find any relay on IP address " + relayIP
                    + " in the relay lists 3 hours preceding " + timestampStr + ".</p>\n");
            if (inTooOldConsensuses || inTooNewConsensuses) {
                if (inTooOldConsensuses && !inTooNewConsensuses) {
                    out.println("        <p>Note that we found a matching relay "
                            + "in relay lists that were published between 5 and 3 " + "hours before "
                            + timestampStr + ".</p>\n");
                } else if (!inTooOldConsensuses && inTooNewConsensuses) {
                    out.println("        <p>Note that we found a matching relay "
                            + "in relay lists that were published up to 2 hours " + "after " + timestampStr
                            + ".</p>\n");
                } else {
                    out.println("        <p>Note that we found a matching relay "
                            + "in relay lists that were published between 5 and 3 "
                            + "hours before and in relay lists that were published " + "up to 2 hours after "
                            + timestampStr + ".</p>\n");
                }
                out.println("<p>Make sure that the timestamp you provided is "
                        + "in the correct timezone: UTC (or GMT).</p>");
            }
            writeFooter(out);
            return;
        }
    }

    /* Second part: target */
    out.println("<br><a name=\"exit\"></a><h3>Was this relay configured "
            + "to permit exiting to a given target?</h3>");

    out.println("        <form action=\"exonerator.html#exit\">\n"
            + "              <input type=\"hidden\" name=\"timestamp\"\n" + "                         value=\""
            + timestampStr + "\">\n" + "              <input type=\"hidden\" name=\"ip\" " + "value=\""
            + relayIP + "\">\n" + "          <table>\n" + "            <tr>\n"
            + "              <td align=\"right\">Target address:</td>\n"
            + "              <td><input type=\"text\" name=\"targetaddr\""
            + (targetIP.length() > 0 ? " value=\"" + targetIP + "\"" : "") + "\">"
            + (targetAddrWarning.length() > 0 ? "<br><font color=\"red\">" + targetAddrWarning + "</font>" : "")
            + "</td>\n" + "              <td><i>(Ex.: 4.3.2.1)</i></td>\n" + "            </tr>\n"
            + "            <tr>\n" + "              <td align=\"right\">Target port:</td>\n"
            + "              <td><input type=\"text\" name=\"targetport\""
            + (targetPort.length() > 0 ? " value=\"" + targetPort + "\"" : "") + ">"
            + (targetPortWarning.length() > 0 ? "<br><font color=\"red\">" + targetPortWarning + "</font>" : "")
            + "</td>\n" + "              <td><i>(Ex.: 80)</i></td>\n" + "            </tr>\n"
            + "            <tr>\n" + "              <td></td>\n" + "              <td>\n"
            + "                <input type=\"submit\">\n" + "                <input type=\"reset\">\n"
            + "              </td>\n" + "              <td></td>\n" + "            </tr>\n"
            + "          </table>\n" + "        </form>\n");

    if (targetIP.length() < 1) {
        writeFooter(out);
        return;
    }

    /* Parse router descriptors to check exit policies. */
    out.println("<p>Searching the relay descriptors published by the " + "relay on IP address " + relayIP
            + " to find out whether this " + "relay permitted exiting to " + target + ". You may follow the "
            + "links above to the relay descriptors and grep them for the "
            + "lines printed below to confirm that results are correct.</p>");
    SortedSet<Long> positiveConsensuses = new TreeSet<Long>();
    Set<String> missingDescriptors = new HashSet<String>();
    Set<String> descriptors = relevantDescriptors.keySet();
    for (String descriptor : descriptors) {
        byte[] rawDescriptor = null;
        try {
            Connection conn = this.ds.getConnection();
            Statement statement = conn.createStatement();
            String query = "SELECT rawdesc FROM descriptor " + "WHERE descriptor = '" + descriptor + "'";
            ResultSet rs = statement.executeQuery(query);
            if (rs.next()) {
                rawDescriptor = rs.getBytes(1);
            }
            rs.close();
            statement.close();
            conn.close();
        } catch (SQLException e) {
            /* Consider this descriptors as 'missing'. */
            continue;
        }
        if (rawDescriptor != null && rawDescriptor.length > 0) {
            missingDescriptors.remove(descriptor);
            String rawDescriptorString = new String(rawDescriptor, "US-ASCII");
            try {
                BufferedReader br = new BufferedReader(new StringReader(rawDescriptorString));
                String line = null, routerLine = null, publishedLine = null;
                StringBuilder acceptRejectLines = new StringBuilder();
                boolean foundMatch = false;
                while ((line = br.readLine()) != null) {
                    if (line.startsWith("router ")) {
                        routerLine = line;
                    } else if (line.startsWith("published ")) {
                        publishedLine = line;
                    } else if (line.startsWith("reject ") || line.startsWith("accept ")) {
                        if (foundMatch) {
                            out.println("<tt> " + line + "</tt><br>");
                            continue;
                        }
                        boolean ruleAccept = line.split(" ")[0].equals("accept");
                        String ruleAddress = line.split(" ")[1].split(":")[0];
                        if (!ruleAddress.equals("*")) {
                            if (!ruleAddress.contains("/") && !ruleAddress.equals(targetIP)) {
                                /* IP address does not match. */
                                acceptRejectLines.append("<tt> " + line + "</tt><br>\n");
                                continue;
                            }
                            String[] ruleIPParts = ruleAddress.split("/")[0].split("\\.");
                            int ruleNetwork = ruleAddress.contains("/")
                                    ? Integer.parseInt(ruleAddress.split("/")[1])
                                    : 32;
                            for (int i = 0; i < 4; i++) {
                                if (ruleNetwork == 0) {
                                    break;
                                } else if (ruleNetwork >= 8) {
                                    if (ruleIPParts[i].equals(targetIPParts[i])) {
                                        ruleNetwork -= 8;
                                    } else {
                                        break;
                                    }
                                } else {
                                    int mask = 255 ^ 255 >>> ruleNetwork;
                                    if ((Integer.parseInt(ruleIPParts[i])
                                            & mask) == (Integer.parseInt(targetIPParts[i]) & mask)) {
                                        ruleNetwork = 0;
                                    }
                                    break;
                                }
                            }
                            if (ruleNetwork > 0) {
                                /* IP address does not match. */
                                acceptRejectLines.append("<tt> " + line + "</tt><br>\n");
                                continue;
                            }
                        }
                        String rulePort = line.split(" ")[1].split(":")[1];
                        if (targetPort.length() < 1 && !ruleAccept && !rulePort.equals("*")) {
                            /* With no port given, we only consider reject :* rules as
                               matching. */
                            acceptRejectLines.append("<tt> " + line + "</tt><br>\n");
                            continue;
                        }
                        if (targetPort.length() > 0 && !rulePort.equals("*") && rulePort.contains("-")) {
                            int fromPort = Integer.parseInt(rulePort.split("-")[0]);
                            int toPort = Integer.parseInt(rulePort.split("-")[1]);
                            int targetPortInt = Integer.parseInt(targetPort);
                            if (targetPortInt < fromPort || targetPortInt > toPort) {
                                /* Port not contained in interval. */
                                continue;
                            }
                        }
                        if (targetPort.length() > 0) {
                            if (!rulePort.equals("*") && !rulePort.contains("-")
                                    && !targetPort.equals(rulePort)) {
                                /* Ports do not match. */
                                acceptRejectLines.append("<tt> " + line + "</tt><br>\n");
                                continue;
                            }
                        }
                        boolean relevantMatch = false;
                        for (long match : relevantDescriptors.get(descriptor)) {
                            if (relevantConsensuses.containsKey(match)) {
                                relevantMatch = true;
                            }
                        }
                        if (relevantMatch) {
                            String[] routerParts = routerLine.split(" ");
                            out.println("<br><tt>" + routerParts[0] + " " + routerParts[1] + " <b>"
                                    + routerParts[2] + "</b> " + routerParts[3] + " " + routerParts[4] + " "
                                    + routerParts[5] + "</tt><br>");
                            String[] publishedParts = publishedLine.split(" ");
                            out.println("<tt>" + publishedParts[0] + " <b>" + publishedParts[1] + " "
                                    + publishedParts[2] + "</b></tt><br>");
                            out.println(acceptRejectLines.toString());
                            out.println("<tt><b>" + line + "</b></tt><br>");
                            foundMatch = true;
                        }
                        if (ruleAccept) {
                            positiveConsensuses.addAll(relevantDescriptors.get(descriptor));
                        }
                    }
                }
                br.close();
            } catch (IOException e) {
                /* Could not read descriptor string. */
                continue;
            }
        }
    }

    /* Print out result. */
    matches = positiveConsensuses;
    if (matches.contains(relevantConsensuses.lastKey())) {
        out.println("        <p>Result is POSITIVE with high certainty!</p>" + "\n"
                + "        <p>We found one or more relays on IP address " + relayIP + " permitting exit to "
                + target + " in the most recent relay list preceding " + timestampStr
                + " that clients were likely to know.</p>\n");
        writeFooter(out);
        return;
    }
    boolean resultIndecisive = target.length() > 0 && !missingDescriptors.isEmpty();
    if (resultIndecisive) {
        out.println("        <p>Result is INDECISIVE!</p>\n"
                + "        <p>At least one referenced descriptor could not be "
                + "found. This is a rare case, but one that (apparently) "
                + "happens. We cannot make any good statement about exit "
                + "relays without these descriptors. The following descriptors " + "are missing:</p>");
        for (String desc : missingDescriptors)
            out.println("        <p>" + desc + "</p>\n");
    }
    boolean inOtherRelevantConsensus = false, inTooOldConsensuses = false, inTooNewConsensuses = false;
    for (long match : matches) {
        if (relevantConsensuses.containsKey(match)) {
            inOtherRelevantConsensus = true;
        } else if (tooOldConsensuses.containsKey(match)) {
            inTooOldConsensuses = true;
        } else if (tooNewConsensuses.containsKey(match)) {
            inTooNewConsensuses = true;
        }
    }
    if (inOtherRelevantConsensus) {
        if (!resultIndecisive) {
            out.println("        <p>Result is POSITIVE " + "with moderate certainty!</p>\n");
        }
        out.println("<p>We found one or more relays on IP address " + relayIP + " permitting exit to " + target
                + ", but not in " + "the relay list immediately preceding " + timestampStr
                + ". A possible reason for the relay being missing in the last "
                + "relay list preceding the given time might be that some of "
                + "the directory authorities had difficulties connecting to "
                + "the relay. However, clients might still have used the " + "relay.</p>\n");
    } else {
        if (!resultIndecisive) {
            out.println("        <p>Result is NEGATIVE " + "with high certainty!</p>\n");
        }
        out.println("        <p>We did not find any relay on IP address " + relayIP + " permitting exit to "
                + target + " in the relay list 3 hours preceding " + timestampStr + ".</p>\n");
        if (inTooOldConsensuses || inTooNewConsensuses) {
            if (inTooOldConsensuses && !inTooNewConsensuses) {
                out.println("        <p>Note that we found a matching relay in "
                        + "relay lists that were published between 5 and 3 " + "hours before " + timestampStr
                        + ".</p>\n");
            } else if (!inTooOldConsensuses && inTooNewConsensuses) {
                out.println("        <p>Note that we found a matching relay in "
                        + "relay lists that were published up to 2 hours after " + timestampStr + ".</p>\n");
            } else {
                out.println("        <p>Note that we found a matching relay in "
                        + "relay lists that were published between 5 and 3 "
                        + "hours before and in relay lists that were published up " + "to 2 hours after "
                        + timestampStr + ".</p>\n");
            }
            out.println("<p>Make sure that the timestamp you provided is "
                    + "in the correct timezone: UTC (or GMT).</p>");
        }
    }
    if (target != null) {
        if (positiveConsensuses.isEmpty() && !positiveConsensusesNoTarget.isEmpty()) {
            out.println("        <p>Note that although the found relay(s) did " + "not permit exiting to "
                    + target + ", there have been one " + "or more relays running at the given time.</p>");
        }
    }

    /* Finish writing response. */
    writeFooter(out);
}

From source file:com.opengamma.integration.copier.portfolio.rowparser.JodaBeanRowParser.java

/**
 * Extract a map of column (field) names and types from the properties of the specified direct bean class.
 * Appropriate member classes (such as swap legs) are recursively traversed and their columns also extracted 
 * and added to the map.// w ww  .  j av a 2s.  co m
 * @param clazz   The bean type from which to extract properties
 * @param prefix  The class membership path traced from the top-level bean class to the current class
 * @return        A map of the column names and their types
 */
private SortedMap<String, Class<?>> recursiveGetColumnMap(Class<?> clazz, String prefix) {

    // Scan through and capture the list of relevant properties and their types
    SortedMap<String, Class<?>> columns = new TreeMap<String, Class<?>>();

    for (MetaProperty<?> metaProperty : JodaBeanUtils.metaBean(clazz).metaPropertyIterable()) {

        // Skip any undesired properties, process the rest
        if (!ignoreMetaProperty(metaProperty)) {

            // Add a column for the property (used either for the actual value
            // or for the class name in the case of a non-convertible bean
            columns.put(prefix + metaProperty.name(), metaProperty.propertyType());

            // If this is a bean without a converter recursively extract all 
            // columns for the metabean and all its subclasses
            if (isBean(metaProperty.propertyType()) && !isConvertible(metaProperty.propertyType())) {

                // This is the bean (might be an abstract class/subclassed)
                Class<? extends Bean> beanClass = metaProperty.propertyType().asSubclass(Bean.class);

                // Recursively extract this bean's properties
                columns.putAll(recursiveGetColumnMap(beanClass, prefix + metaProperty.name() + ":"));

                // Identify ALL subclasses of this bean and extract all their properties
                for (Class<?> subClass : getSubClasses(beanClass)) {
                    columns.putAll(recursiveGetColumnMap(subClass, prefix + metaProperty.name() + ":"));
                }
            }
        }
    }
    return columns;
}

From source file:com.google.cloud.dns.testing.LocalDnsHelper.java

/**
 * Applies changes to a zone. Repeatedly tries until succeeds. Thread safe and deadlock safe.
 *///  w  w w.  j av a  2  s  .  com
private void applyExistingChange(String projectId, String zoneName, String changeId) {
    Change change = findChange(projectId, zoneName, changeId);
    if (change == null) {
        return; // no such change exists, nothing to do
    }
    ZoneContainer wrapper = findZone(projectId, zoneName);
    if (wrapper == null) {
        return; // no such zone exists; it might have been deleted by another thread
    }
    AtomicReference<ImmutableSortedMap<String, ResourceRecordSet>> dnsRecords = wrapper.dnsRecords();
    while (true) {
        // managed zone must have a set of records which is not null
        ImmutableSortedMap<String, ResourceRecordSet> original = dnsRecords.get();
        // the copy will be populated when handling deletions
        SortedMap<String, ResourceRecordSet> copy = new TreeMap<>();
        // apply deletions first
        List<ResourceRecordSet> deletions = change.getDeletions();
        if (deletions != null) {
            for (Map.Entry<String, ResourceRecordSet> entry : original.entrySet()) {
                if (!deletions.contains(entry.getValue())) {
                    copy.put(entry.getKey(), entry.getValue());
                }
            }
        } else {
            copy.putAll(original);
        }
        // apply additions
        List<ResourceRecordSet> additions = change.getAdditions();
        if (additions != null) {
            for (ResourceRecordSet addition : additions) {
                ResourceRecordSet rrset = new ResourceRecordSet();
                rrset.setName(addition.getName());
                rrset.setRrdatas(ImmutableList.copyOf(addition.getRrdatas()));
                rrset.setTtl(addition.getTtl());
                rrset.setType(addition.getType());
                String id = getUniqueId(copy.keySet());
                copy.put(id, rrset);
            }
        }
        boolean success = dnsRecords.compareAndSet(original, ImmutableSortedMap.copyOf(copy));
        if (success) {
            break; // success if no other thread modified the value in the meantime
        }
    }
    change.setStatus("done");
}

From source file:org.apache.nifi.provenance.MiNiFiPersistentProvenanceRepository.java

/**
 * Purges old events from the repository
 *
 * @throws IOException if unable to purge old events due to an I/O problem
 *//*from  ww w  . j a v a 2 s  .c o  m*/
synchronized void purgeOldEvents() throws IOException {
    while (!recoveryFinished.get()) {
        try {
            Thread.sleep(100L);
        } catch (final InterruptedException ie) {
        }
    }

    final List<File> toPurge = new ArrayList<>();
    final long timeCutoff = System.currentTimeMillis() - configuration.getMaxRecordLife(TimeUnit.MILLISECONDS);

    final List<File> sortedByBasename = getLogFiles();
    long bytesUsed = getSize(sortedByBasename, timeCutoff);

    for (final Path path : idToPathMap.get().values()) {
        final File file = path.toFile();
        final long lastModified = file.lastModified();
        if (lastModified > 0L && lastModified < timeCutoff) {
            toPurge.add(file);
        }
    }

    // This comparator sorts the data based on the "basename" of the files. I.e., the numeric portion.
    // We do this because the numeric portion represents the ID of the first event in the log file.
    // As a result, we are sorting based on time, since the ID is monotonically increasing. By doing this,
    // are able to avoid hitting disk continually to check timestamps
    final Comparator<File> sortByBasenameComparator = new Comparator<File>() {
        @Override
        public int compare(final File o1, final File o2) {
            final String baseName1 = StringUtils.substringBefore(o1.getName(), ".");
            final String baseName2 = StringUtils.substringBefore(o2.getName(), ".");

            Long id1 = null;
            Long id2 = null;
            try {
                id1 = Long.parseLong(baseName1);
            } catch (final NumberFormatException nfe) {
                id1 = null;
            }

            try {
                id2 = Long.parseLong(baseName2);
            } catch (final NumberFormatException nfe) {
                id2 = null;
            }

            if (id1 == null && id2 == null) {
                return 0;
            }
            if (id1 == null) {
                return 1;
            }
            if (id2 == null) {
                return -1;
            }

            return Long.compare(id1, id2);
        }
    };

    // If we have too much data (at least 90% of our max capacity), start aging it off
    if (bytesUsed > configuration.getMaxStorageCapacity() * 0.9) {
        Collections.sort(sortedByBasename, sortByBasenameComparator);

        for (final File file : sortedByBasename) {
            toPurge.add(file);
            bytesUsed -= file.length();
            if (bytesUsed < configuration.getMaxStorageCapacity()) {
                // we've shrunk the repo size down enough to stop
                break;
            }
        }
    }

    // Sort all of the files that we want to purge such that the oldest events are aged off first
    Collections.sort(toPurge, sortByBasenameComparator);
    logger.debug("Purging old event files: {}", toPurge);

    // Remove any duplicates that we may have.
    final Set<File> uniqueFilesToPurge = new LinkedHashSet<>(toPurge);

    // Age off the data.
    final Set<String> removed = new LinkedHashSet<>();
    for (File file : uniqueFilesToPurge) {
        final String baseName = StringUtils.substringBefore(file.getName(), ".");
        ExpirationAction currentAction = null;
        try {
            for (final ExpirationAction action : expirationActions) {
                currentAction = action;
                if (!action.hasBeenPerformed(file)) {
                    final File fileBeforeAction = file;
                    final StopWatch stopWatch = new StopWatch(true);
                    file = action.execute(file);
                    stopWatch.stop();
                    logger.info("Successfully performed Expiration Action {} on Provenance Event file {} in {}",
                            action, fileBeforeAction, stopWatch.getDuration());
                }
            }

            removed.add(baseName);
        } catch (final FileNotFoundException fnf) {
            logger.warn(
                    "Failed to perform Expiration Action {} on Provenance Event file {} because the file no longer exists; will not "
                            + "perform additional Expiration Actions on this file",
                    currentAction, file);
            removed.add(baseName);
        } catch (final Throwable t) {
            logger.warn(
                    "Failed to perform Expiration Action {} on Provenance Event file {} due to {}; will not perform additional "
                            + "Expiration Actions on this file at this time",
                    currentAction, file, t.toString());
            logger.warn("", t);
            eventReporter.reportEvent(Severity.WARNING, EVENT_CATEGORY,
                    "Failed to perform Expiration Action " + currentAction + " on Provenance Event file " + file
                            + " due to " + t.toString() + "; will not perform additional Expiration Actions "
                            + "on this file at this time");
        }
    }

    // Update the Map ID to Path map to not include the removed file
    // We cannot obtain the write lock here because there may be a need for the lock in the rollover method,
    // if we have 'backpressure applied'. This would result in a deadlock because the rollover method would be
    // waiting for purgeOldEvents, and purgeOldEvents would be waiting for the write lock held by rollover.
    boolean updated = false;
    while (!updated) {
        final SortedMap<Long, Path> existingPathMap = idToPathMap.get();
        final SortedMap<Long, Path> newPathMap = new TreeMap<>(new PathMapComparator());
        newPathMap.putAll(existingPathMap);

        final Iterator<Map.Entry<Long, Path>> itr = newPathMap.entrySet().iterator();
        while (itr.hasNext()) {
            final Map.Entry<Long, Path> entry = itr.next();
            final String filename = entry.getValue().toFile().getName();
            final String baseName = StringUtils.substringBefore(filename, ".");

            if (removed.contains(baseName)) {
                itr.remove();
            }
        }

        updated = idToPathMap.compareAndSet(existingPathMap, newPathMap);
        logger.debug("After expiration, path map: {}", newPathMap);
    }
}

From source file:org.apache.nifi.provenance.MiNiFiPersistentProvenanceRepository.java

/**
 * <p>//  www.j a  v a 2s. c o  m
 * MUST be called with the write lock held.
 * </p>
 * <p>
 * Rolls over the data in the journal files, merging them into a single Provenance Event Log File, and
 * compressing as needed.
 *
 * @param force if true, will force a rollover regardless of whether or not data has been written
 * @throws IOException if unable to complete rollover
 */
private void rollover(final boolean force) throws IOException {
    if (!configuration.isAllowRollover()) {
        return;
    }

    // If this is the first time we're creating the out stream, or if we
    // have written something to the stream, then roll over
    if (force || recordsWrittenSinceRollover.get() > 0L || dirtyWriterCount.get() > 0) {
        final List<File> journalsToMerge = new ArrayList<>();
        for (final RecordWriter writer : writers) {
            if (!writer.isClosed()) {
                final File writerFile = writer.getFile();
                journalsToMerge.add(writerFile);
                try {
                    writer.close();
                } catch (final IOException ioe) {
                    logger.warn("Failed to close {} due to {}", writer, ioe.toString());
                    if (logger.isDebugEnabled()) {
                        logger.warn("", ioe);
                    }
                }
            }
        }

        if (logger.isDebugEnabled()) {
            if (journalsToMerge.isEmpty()) {
                logger.debug("No journals to merge; all RecordWriters were already closed");
            } else {
                logger.debug("Going to merge {} files for journals starting with ID {}", journalsToMerge.size(),
                        StringUtils.substringBefore(journalsToMerge.get(0).getName(), "."));
            }
        }

        // Choose a storage directory to store the merged file in.
        final long storageDirIdx = storageDirectoryIndex.getAndIncrement();
        final List<File> storageDirs = configuration.getStorageDirectories();
        final File storageDir = storageDirs.get((int) (storageDirIdx % storageDirs.size()));

        Future<?> future = null;
        if (!journalsToMerge.isEmpty()) {
            // Run the rollover logic in a background thread.
            final AtomicReference<Future<?>> futureReference = new AtomicReference<>();
            final int recordsWritten = recordsWrittenSinceRollover.getAndSet(0);
            final Runnable rolloverRunnable = new Runnable() {
                @Override
                public void run() {
                    try {
                        final File fileRolledOver;

                        try {
                            fileRolledOver = mergeJournals(journalsToMerge,
                                    getMergeFile(journalsToMerge, storageDir), eventReporter);
                        } catch (final IOException ioe) {
                            logger.error(
                                    "Failed to merge Journal Files {} into a Provenance Log File due to {}",
                                    journalsToMerge, ioe.toString());
                            logger.error("", ioe);
                            return;
                        }

                        if (fileRolledOver == null) {
                            logger.debug(
                                    "Couldn't merge journals. Will try again in 10 seconds. journalsToMerge: {}, storageDir: {}",
                                    journalsToMerge, storageDir);
                            return;
                        }
                        final File file = fileRolledOver;

                        // update our map of id to Path
                        // We need to make sure that another thread doesn't also update the map at the same time. We cannot
                        // use the write lock when purging old events, and we want to use the same approach here.
                        boolean updated = false;
                        final Long fileFirstEventId = Long
                                .valueOf(StringUtils.substringBefore(fileRolledOver.getName(), "."));
                        while (!updated) {
                            final SortedMap<Long, Path> existingPathMap = idToPathMap.get();
                            final SortedMap<Long, Path> newIdToPathMap = new TreeMap<>(new PathMapComparator());
                            newIdToPathMap.putAll(existingPathMap);
                            newIdToPathMap.put(fileFirstEventId, file.toPath());
                            updated = idToPathMap.compareAndSet(existingPathMap, newIdToPathMap);
                        }

                        logger.info("Successfully Rolled over Provenance Event file containing {} records",
                                recordsWritten);
                        rolloverCompletions.getAndIncrement();

                        // We have finished successfully. Cancel the future so that we don't run anymore
                        Future<?> future;
                        while ((future = futureReference.get()) == null) {
                            try {
                                Thread.sleep(10L);
                            } catch (final InterruptedException ie) {
                            }
                        }

                        future.cancel(false);
                    } catch (final Throwable t) {
                        logger.error("Failed to rollover Provenance repository due to {}", t.toString());
                        logger.error("", t);
                    }
                }
            };

            // We are going to schedule the future to run immediately and then repeat every 10 seconds. This allows us to keep retrying if we
            // fail for some reason. When we succeed, the Runnable will cancel itself.
            future = rolloverExecutor.scheduleWithFixedDelay(rolloverRunnable, 0, 10, TimeUnit.SECONDS);
            futureReference.set(future);
        }

        streamStartTime.set(System.currentTimeMillis());
        bytesWrittenSinceRollover.set(0);

        // We don't want to create new 'writers' until the number of unmerged journals falls below our threshold. So we wait
        // here before we repopulate the 'writers' member variable and release the lock.
        int journalFileCount = getJournalCount();
        long repoSize = getSize(getLogFiles(), 0L);
        final int journalCountThreshold = configuration.getJournalCount() * 5;
        final long sizeThreshold = (long) (configuration.getMaxStorageCapacity() * 1.1D); // do not go over 10% of max capacity

        // check if we need to apply backpressure.
        // If we have too many journal files, or if the repo becomes too large, backpressure is necessary. Without it,
        // if the rate at which provenance events are registered exceeds the rate at which we can compress/merge them,
        // then eventually we will end up with all of the data stored in the 'journals' directory. This
        // would mean that the data would never even be accessible. In order to prevent this, if we exceeds 110% of the configured
        // max capacity for the repo, or if we have 5 sets of journal files waiting to be merged, we will block here until
        // that is no longer the case.
        if (journalFileCount > journalCountThreshold || repoSize > sizeThreshold) {
            logger.warn("The rate of the dataflow is exceeding the provenance recording rate. "
                    + "Slowing down flow to accommodate. Currently, there are {} journal files ({} bytes) and "
                    + "threshold for blocking is {} ({} bytes)", journalFileCount, repoSize,
                    journalCountThreshold, sizeThreshold);
            eventReporter.reportEvent(Severity.WARNING, "Provenance Repository", "The rate of the dataflow is "
                    + "exceeding the provenance recording rate. Slowing down flow to accommodate");

            while (journalFileCount > journalCountThreshold || repoSize > sizeThreshold) {
                // if a shutdown happens while we are in this loop, kill the rollover thread and break
                if (this.closed.get()) {
                    if (future != null) {
                        future.cancel(true);
                    }

                    break;
                }

                if (repoSize > sizeThreshold) {
                    logger.debug(
                            "Provenance Repository has exceeded its size threshold; will trigger purging of oldest events");
                    purgeOldEvents();

                    journalFileCount = getJournalCount();
                    repoSize = getSize(getLogFiles(), 0L);
                    continue;
                } else {
                    // if we are constrained by the number of journal files rather than the size of the repo,
                    // then we will just sleep a bit because another thread is already actively merging the journals,
                    // due to the runnable that we scheduled above
                    try {
                        Thread.sleep(100L);
                    } catch (final InterruptedException ie) {
                    }
                }

                logger.debug(
                        "Provenance Repository is still behind. Keeping flow slowed down "
                                + "to accommodate. Currently, there are {} journal files ({} bytes) and "
                                + "threshold for blocking is {} ({} bytes)",
                        journalFileCount, repoSize, journalCountThreshold, sizeThreshold);

                journalFileCount = getJournalCount();
                repoSize = getSize(getLogFiles(), 0L);
            }

            logger.info(
                    "Provenance Repository has now caught up with rolling over journal files. Current number of "
                            + "journal files to be rolled over is {}",
                    journalFileCount);
        }

        // we've finished rolling over successfully. Create new writers and reset state.
        writers = createWriters(configuration, idGenerator.get());
        dirtyWriterCount.set(0);
        streamStartTime.set(System.currentTimeMillis());
        recordsWrittenSinceRollover.getAndSet(0);
    }
}

From source file:org.apache.accumulo.tserver.TabletServer.java

public static Pair<Text, KeyExtent> verifyTabletInformation(AccumuloServerContext context, KeyExtent extent,
        TServerInstance instance, SortedMap<Key, Value> tabletsKeyValues, String clientAddress, ZooLock lock)
        throws AccumuloSecurityException, DistributedStoreException, AccumuloException {

    log.debug("verifying extent " + extent);
    if (extent.isRootTablet()) {
        return verifyRootTablet(extent, instance);
    }//from w  ww  .j av a  2 s  .com
    String tableToVerify = MetadataTable.ID;
    if (extent.isMeta())
        tableToVerify = RootTable.ID;

    List<ColumnFQ> columnsToFetch = Arrays
            .asList(new ColumnFQ[] { TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN,
                    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN,
                    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN,
                    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN,
                    TabletsSection.ServerColumnFamily.TIME_COLUMN });

    ScannerImpl scanner = new ScannerImpl(context, tableToVerify, Authorizations.EMPTY);
    scanner.setRange(extent.toMetadataRange());

    TreeMap<Key, Value> tkv = new TreeMap<Key, Value>();
    for (Entry<Key, Value> entry : scanner)
        tkv.put(entry.getKey(), entry.getValue());

    // only populate map after success
    if (tabletsKeyValues == null) {
        tabletsKeyValues = tkv;
    } else {
        tabletsKeyValues.clear();
        tabletsKeyValues.putAll(tkv);
    }

    Text metadataEntry = extent.getMetadataEntry();

    Value dir = checkTabletMetadata(extent, instance, tabletsKeyValues, metadataEntry);
    if (dir == null)
        return null;

    Value oldPrevEndRow = null;
    for (Entry<Key, Value> entry : tabletsKeyValues.entrySet()) {
        if (TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
            oldPrevEndRow = entry.getValue();
        }
    }

    if (oldPrevEndRow != null) {
        SortedMap<Text, SortedMap<ColumnFQ, Value>> tabletEntries;
        tabletEntries = MetadataTableUtil.getTabletEntries(tabletsKeyValues, columnsToFetch);

        KeyExtent fke;
        try {
            fke = MasterMetadataUtil.fixSplit(context, metadataEntry, tabletEntries.get(metadataEntry),
                    instance, lock);
        } catch (IOException e) {
            log.error("Error fixing split " + metadataEntry);
            throw new AccumuloException(e.toString());
        }

        if (!fke.equals(extent)) {
            return new Pair<Text, KeyExtent>(null, fke);
        }

        // reread and reverify metadata entries now that metadata entries were fixed
        tabletsKeyValues.clear();
        return verifyTabletInformation(context, fke, instance, tabletsKeyValues, clientAddress, lock);
    }

    return new Pair<Text, KeyExtent>(new Text(dir.get()), null);
}

From source file:de.huxhorn.lilith.swing.MainFrame.java

public SortedMap<EventSource<AccessEvent>, ViewContainer<AccessEvent>> getSortedAccessViews() {
    EventSourceComparator<AccessEvent> accessComparator = new EventSourceComparator<AccessEvent>();
    SortedMap<EventSource<AccessEvent>, ViewContainer<AccessEvent>> sortedAccessViews;
    sortedAccessViews = new TreeMap<EventSource<AccessEvent>, ViewContainer<AccessEvent>>(accessComparator);
    if (accessEventViewManager != null) {
        sortedAccessViews.putAll(accessEventViewManager.getViews());
    }/* w ww  .  j a  v a2  s .  com*/
    return sortedAccessViews;
}

From source file:de.huxhorn.lilith.swing.MainFrame.java

public SortedMap<EventSource<LoggingEvent>, ViewContainer<LoggingEvent>> getSortedLoggingViews() {
    EventSourceComparator<LoggingEvent> loggingComparator = new EventSourceComparator<LoggingEvent>();
    SortedMap<EventSource<LoggingEvent>, ViewContainer<LoggingEvent>> sortedLoggingViews;
    sortedLoggingViews = new TreeMap<EventSource<LoggingEvent>, ViewContainer<LoggingEvent>>(loggingComparator);
    if (loggingEventViewManager != null) {
        sortedLoggingViews.putAll(loggingEventViewManager.getViews());
    }/*from   ww w .  j  a v  a2s  .c om*/
    return sortedLoggingViews;
}