Example usage for java.util SortedSet iterator

List of usage examples for java.util SortedSet iterator

Introduction

In this page you can find the example usage for java.util SortedSet iterator.

Prototype

Iterator<E> iterator();

Source Link

Document

Returns an iterator over the elements in this set.

Usage

From source file:org.wymiwyg.r3xserializer.Serializer.java

/**
 * @param out//from  w w w.j ava2  s. c om
 * @param subject
 * @param forceShow
 * @throws LanguageUnavailableException
 */
private void writeSubject(PrintWriter out, Model model, Resource subject, URI baseURI, Locale[] locales,
        UnavailableLocalisationHandler localisationHandler, boolean forceShow)
        throws LanguageUnavailableException {
    if (subject.isAnon()) {
        out.print("  <rdf:Description rdf:nodeID=\"");
        out.print(escapedId(subject.getId().toString()));
    } else {
        out.print("  <rdf:Description rdf:about=\"");
        out.print(XMLEncoder.encode(relativize(baseURI, subject.getURI())));
    }
    out.println("\">");
    StmtIterator stmtIter = subject.listProperties();
    SortedSet sortedProperties = new TreeSet(new Comparator() {

        public int compare(Object arg0, Object arg1) {
            Statement stmt0 = (Statement) arg0;
            Statement stmt1 = (Statement) arg1;
            if (stmt0.equals(stmt1)) {
                return 0;
            }
            Property predicate0 = stmt0.getPredicate();
            Property predicate1 = stmt1.getPredicate();
            int ordinal0 = predicate0.getOrdinal();
            int ordinal1 = predicate1.getOrdinal();
            if (ordinal0 != ordinal1) {
                return ordinal0 > ordinal1 ? 1 : -1;
            }
            int stringCompare = predicate0.getURI().compareTo(predicate1.getURI());
            if (stringCompare != 0) {
                return stringCompare;
            } else {
                // diffent objects or just different languages
                return stmt0.toString().compareTo(stmt1.toString());
            }
        }

    });
    while (stmtIter.hasNext()) {
        sortedProperties.add(stmtIter.nextStatement());
    }
    Set processedLiteralPropertyTypes = new HashSet();
    Iterator sortedIterator = sortedProperties.iterator();
    while (sortedIterator.hasNext()) {
        Statement current = (Statement) sortedIterator.next();
        RDFNode object = current.getObject();
        if (object instanceof Literal) {
            Property predicate = current.getPredicate();
            if (!processedLiteralPropertyTypes.contains(predicate)) {
                processLanguageProperty(out, model, baseURI, subject, predicate, locales, localisationHandler,
                        forceShow);
                processedLiteralPropertyTypes.add(predicate);
            }
        } else {
            writeProperty(out, model, current, baseURI);
        }
    }
    out.println("  </rdf:Description>");

}

From source file:com.cloudera.whirr.cm.CmServerClusterInstance.java

@SuppressWarnings("unchecked")
public static Map<String, Map<String, Map<String, String>>> getClusterConfiguration(
        final Configuration configuration, SortedSet<String> mounts) throws IOException {

    Map<String, Map<String, Map<String, String>>> clusterConfiguration = new HashMap<String, Map<String, Map<String, String>>>();

    Iterator<String> keys = configuration.getKeys();
    while (keys.hasNext()) {
        String key = keys.next();
        if (key.startsWith(CONFIG_WHIRR_CM_CONFIG_PREFIX)) {
            String[] keyTokens = getClusterConfigurationKeyTokens(clusterConfiguration, key,
                    CONFIG_WHIRR_CM_CONFIG_PREFIX);
            clusterConfiguration.get(keyTokens[0]).get(keyTokens[1]).put(keyTokens[2],
                    configuration.getString(key));
        }//  w  ww  .ja v a 2 s  .  c o  m
    }

    keys = configuration.getKeys();
    while (keys.hasNext()) {
        final String key = keys.next();
        if (key.startsWith(CONFIG_WHIRR_INTERNAL_CM_CONFIG_DEFAULT_PREFIX)) {
            String[] keyTokens = getClusterConfigurationKeyTokens(clusterConfiguration, key,
                    CONFIG_WHIRR_INTERNAL_CM_CONFIG_DEFAULT_PREFIX);
            if (configuration.getString(
                    CONFIG_WHIRR_CM_CONFIG_PREFIX + keyTokens[1].toLowerCase() + "." + keyTokens[2]) == null) {
                if ((keyTokens[2].endsWith(CONFIG_CM_DIR_SUFFIX_LIST)
                        || keyTokens[2].endsWith(CONFIG_CM_DIR_SUFFIX_PLURAL)) && !mounts.isEmpty()) {
                    clusterConfiguration.get(keyTokens[0]).get(keyTokens[1]).put(keyTokens[2], Joiner.on(',')
                            .join(Lists.transform(Lists.newArrayList(mounts), new Function<String, String>() {
                                @Override
                                public String apply(String input) {
                                    return input + configuration.getString(key);
                                }
                            })));
                } else {
                    clusterConfiguration.get(keyTokens[0]).get(keyTokens[1]).put(keyTokens[2],
                            (mounts.isEmpty() ? configuration.getString(CONFIG_WHIRR_INTERNAL_DATA_DIRS_DEFAULT)
                                    : mounts.iterator().next()) + configuration.getString(key));
                }
            }
        }
    }

    keys = configuration.getKeys();
    while (keys.hasNext()) {
        final String key = keys.next();
        if (key.startsWith(CONFIG_WHIRR_CM_CONFIG_PREFIX) && key.endsWith(CONFIG_CM_DB_SUFFIX_TYPE)) {
            String[] keyTokens = getClusterConfigurationKeyTokens(clusterConfiguration, key,
                    CONFIG_WHIRR_CM_CONFIG_PREFIX);
            if (configuration.getString(key) != null && configuration.getString(key).length() == 0) {
                clusterConfiguration.get(keyTokens[0]).get(keyTokens[1]).put(keyTokens[2],
                        configuration.getString(CONFIG_WHIRR_DB_TYPE));
                if (configuration
                        .getString(key.replace(CONFIG_CM_DB_SUFFIX_TYPE, CONFIG_CM_DB_SUFFIX_PORT)) != null
                        && configuration
                                .getString(key.replace(CONFIG_CM_DB_SUFFIX_TYPE, CONFIG_CM_DB_SUFFIX_PORT))
                                .length() == 0) {
                    clusterConfiguration.get(keyTokens[0]).get(keyTokens[1]).put(
                            keyTokens[2].replace(CONFIG_CM_DB_SUFFIX_TYPE, CONFIG_CM_DB_SUFFIX_PORT),
                            configuration.getString(CONFIG_WHIRR_INTERNAL_PORTS_DB_PREFIX
                                    + configuration.getString(CONFIG_WHIRR_DB_TYPE)));
                } else if (configuration
                        .getString(key.replace(CONFIG_CM_DB_SUFFIX_TYPE, CONFIG_CM_DB_SUFFIX_HOST)) != null
                        && !configuration
                                .getString(key.replace(CONFIG_CM_DB_SUFFIX_TYPE, CONFIG_CM_DB_SUFFIX_HOST))
                                .contains(":")) {
                    clusterConfiguration.get(keyTokens[0]).get(keyTokens[1]).put(
                            keyTokens[2].replace(CONFIG_CM_DB_SUFFIX_TYPE, CONFIG_CM_DB_SUFFIX_HOST),
                            configuration
                                    .getString(key.replace(CONFIG_CM_DB_SUFFIX_TYPE, CONFIG_CM_DB_SUFFIX_HOST))
                                    + ":" + configuration.getString(CONFIG_WHIRR_INTERNAL_PORTS_DB_PREFIX
                                            + configuration.getString(CONFIG_WHIRR_DB_TYPE)));
                }
            }
        }
    }

    if (clusterConfiguration.get(CM_API_BASE_VERSION) == null) {
        clusterConfiguration.put(CM_API_BASE_VERSION, new HashMap<String, Map<String, String>>());
    }
    if (clusterConfiguration.get(CM_API_BASE_VERSION).get(CmServerServiceType.CLUSTER.getId()) == null) {
        clusterConfiguration.get(CM_API_BASE_VERSION).put(CmServerServiceType.CLUSTER.getId(),
                new HashMap<String, String>());
    }

    if (clusterConfiguration.get(CM_API_BASE_VERSION).get(CmServerServiceType.CLUSTER.getId())
            .get(CONFIG_CM_LICENSE_PROVIDED) == null) {
        if (Utils.urlForURI(configuration.getString(CONFIG_WHIRR_CM_LICENSE_URI)) != null) {
            clusterConfiguration.get(CM_API_BASE_VERSION).get(CmServerServiceType.CLUSTER.getId())
                    .put(CONFIG_CM_LICENSE_PROVIDED, Boolean.TRUE.toString());
        } else {
            clusterConfiguration.get(CM_API_BASE_VERSION).get(CmServerServiceType.CLUSTER.getId())
                    .put(CONFIG_CM_LICENSE_PROVIDED, Boolean.FALSE.toString());
        }
    }

    if (clusterConfiguration.get(CM_API_BASE_VERSION).get(CmServerServiceType.CLUSTER.getId())
            .get(CONFIG_CM_LICENSE_PROVIDED).equals(Boolean.TRUE.toString())) {
        if (clusterConfiguration.get(CM_API_BASE_VERSION)
                .get(CmServerServiceType.MAPREDUCE_TASK_TRACKER.getId()) == null) {
            clusterConfiguration.get(CM_API_BASE_VERSION)
                    .put(CmServerServiceType.MAPREDUCE_TASK_TRACKER.getId(), new HashMap<String, String>());
        }
        clusterConfiguration.get(CM_API_BASE_VERSION).get(CmServerServiceType.MAPREDUCE_TASK_TRACKER.getId())
                .put(CONFIG_CM_TASKTRACKER_INSTRUMENTATION, "org.apache.hadoop.mapred.TaskTrackerCmonInst");
    }

    return clusterConfiguration;

}

From source file:net.sourceforge.fenixedu.domain.Enrolment.java

/**
 * {@inheritDoc}//  w ww  .  jav  a 2  s  . com
 * 
 * <p>
 * This method assumes that each Student has at most one non evaluated Thesis and no more that two Thesis.
 */
@Override
final public Thesis getThesis() {
    Collection<Thesis> theses = getThesesSet();

    switch (theses.size()) {
    case 0:
        return null;
    case 1:
        return theses.iterator().next();
    default:
        SortedSet<Thesis> sortedTheses = new TreeSet<Thesis>(new Comparator<Thesis>() {
            @Override
            public int compare(Thesis o1, Thesis o2) {
                return o2.getCreation().compareTo(o1.getCreation());
            }
        });

        sortedTheses.addAll(theses);
        return sortedTheses.iterator().next();
    }
}

From source file:org.orbisgis.corejdbc.internal.ReadRowSetImpl.java

@Override
public SortedSet<Integer> getRowNumberFromRowPk(SortedSet<Long> pkSet) throws SQLException {
    SortedSet<Integer> rowsNum = new IntegerUnion();
    if (rowFetchFirstPk == null) {
        for (long pk : pkSet) {
            rowsNum.add((int) pk);
        }//from  w  ww.j  av  a2 s  . c  om
    } else {
        // Use first Pk value of batch in order to fetch only batch that contains a selected pk
        Iterator<Long> fetchPkIt = pkSet.iterator();
        int batchIterId = -1;
        List<Long> batchPK = new ArrayList<>(fetchSize);
        while (fetchPkIt.hasNext()) {
            Long fetchPk = fetchPkIt.next();
            if (fetchPk != null) {
                if (batchIterId == -1 || fetchPk > batchPK.get(batchPK.size() - 1)) {
                    batchPK.clear();
                    // Iterate through batch until next PK is superior than search pk.
                    // For optimisation sake, a binary search could be faster than serial search
                    Long nextPk = Long.MAX_VALUE;
                    final int batchCount = getBatchCount();
                    do {
                        batchIterId++;
                        if (batchIterId + 1 >= rowFetchFirstPk.size()
                                || rowFetchFirstPk.get(batchIterId + 1) == null) {
                            fetchBatchPk(batchIterId + 1);
                        }
                        if (rowFetchFirstPk.size() > batchIterId + 1) {
                            nextPk = rowFetchFirstPk.get(batchIterId + 1);
                        } else {
                            break;
                        }
                    } while (nextPk < fetchPk && batchIterId + 1 < batchCount - 1);
                    if (nextPk <= fetchPk) {
                        batchIterId++;
                    }
                }
                fetchBatchPk(batchIterId);
                Long batchFirstPk = rowFetchFirstPk.get(batchIterId);
                // We are in good batch
                // Query only PK for this batch
                if (batchPK.isEmpty()) {
                    try (Connection connection = dataSource.getConnection();
                            PreparedStatement st = createBatchQuery(connection, batchFirstPk, false, 0,
                                    fetchSize, true)) {
                        try (ResultSet rs = st.executeQuery()) {
                            while (rs.next()) {
                                batchPK.add(rs.getLong(1));
                            }
                        }
                    }
                }
                // Target batch is in memory, just find the target pk index in it
                rowsNum.add(batchIterId * fetchSize + Collections.binarySearch(batchPK, fetchPk) + 1);
            }
        }
    }
    return rowsNum;
}

From source file:org.apache.xml.security.c14n.implementations.Canonicalizer11.java

/**
 * Returns the Attr[]s to be output for the given element.
 * <br>/*from ww  w . ja  v a  2  s .c o  m*/
 * The code of this method is a copy of {@link #handleAttributes(Element,
 * NameSpaceSymbTable)},
 * whereas it takes into account that subtree-c14n is -- well -- 
 * subtree-based.
 * So if the element in question isRoot of c14n, it's parent is not in the
 * node set, as well as all other ancestors.
 *
 * @param element
 * @param ns
 * @return the Attr[]s to be output
 * @throws CanonicalizationException
 */
@Override
protected Iterator<Attr> handleAttributesSubtree(Element element, NameSpaceSymbTable ns)
        throws CanonicalizationException {
    if (!element.hasAttributes() && !firstCall) {
        return null;
    }
    // result will contain the attrs which have to be output
    final SortedSet<Attr> result = this.result;
    result.clear();

    if (element.hasAttributes()) {
        NamedNodeMap attrs = element.getAttributes();
        int attrsLength = attrs.getLength();

        for (int i = 0; i < attrsLength; i++) {
            Attr attribute = (Attr) attrs.item(i);
            String NUri = attribute.getNamespaceURI();
            String NName = attribute.getLocalName();
            String NValue = attribute.getValue();

            if (!XMLNS_URI.equals(NUri)) {
                // It's not a namespace attr node. Add to the result and continue.
                result.add(attribute);
            } else if (!(XML.equals(NName) && XML_LANG_URI.equals(NValue))) {
                // The default mapping for xml must not be output.
                Node n = ns.addMappingAndRender(NName, NValue, attribute);

                if (n != null) {
                    // Render the ns definition
                    result.add((Attr) n);
                    if (C14nHelper.namespaceIsRelative(attribute)) {
                        Object exArgs[] = { element.getTagName(), NName, attribute.getNodeValue() };
                        throw new CanonicalizationException("c14n.Canonicalizer.RelativeNamespace", exArgs);
                    }
                }
            }
        }
    }

    if (firstCall) {
        // It is the first node of the subtree
        // Obtain all the namespaces defined in the parents, and added to the output.
        ns.getUnrenderedNodes(result);
        // output the attributes in the xml namespace.
        xmlattrStack.getXmlnsAttr(result);
        firstCall = false;
    }

    return result.iterator();
}

From source file:org.orcid.core.adapter.impl.Jaxb2JpaAdapterImpl.java

private void setOrgAffiliationRelations(ProfileEntity profileEntity, Affiliations affiliations) {
    SortedSet<OrgAffiliationRelationEntity> existingOrgAffiliationEntities = profileEntity
            .getOrgAffiliationRelations();
    if (existingOrgAffiliationEntities == null) {
        existingOrgAffiliationEntities = new TreeSet<>();
    }//from   ww  w  .j a  va2  s.  co m
    Map<String, OrgAffiliationRelationEntity> existingOrgAffiliationsEntitiesMap = createOrgAffiliationEntitiesMap(
            existingOrgAffiliationEntities);
    SortedSet<OrgAffiliationRelationEntity> updatedOrgAffiliationEntities = new TreeSet<>();
    if (affiliations != null && !affiliations.getAffiliation().isEmpty()) {
        for (Affiliation affiliation : affiliations.getAffiliation()) {
            OrgAffiliationRelationEntity orgRelationEntity = getOrgAffiliationRelationEntity(affiliation,
                    existingOrgAffiliationsEntitiesMap.get(affiliation.getPutCode()));
            orgRelationEntity.setProfile(profileEntity);
            updatedOrgAffiliationEntities.add(orgRelationEntity);
        }
    }
    Map<String, OrgAffiliationRelationEntity> updatedOrgAffiliationEntitiesMap = createOrgAffiliationEntitiesMap(
            updatedOrgAffiliationEntities);
    // Remove orphans
    for (Iterator<OrgAffiliationRelationEntity> iterator = existingOrgAffiliationEntities.iterator(); iterator
            .hasNext();) {
        OrgAffiliationRelationEntity existingEntity = iterator.next();
        if (!updatedOrgAffiliationEntitiesMap.containsKey(String.valueOf(existingEntity.getId()))) {
            iterator.remove();
        }
    }
    // Add new
    for (OrgAffiliationRelationEntity updatedEntity : updatedOrgAffiliationEntities) {
        if (updatedEntity.getId() == null) {
            existingOrgAffiliationEntities.add(updatedEntity);
        }
    }
    profileEntity.setOrgAffiliationRelations(existingOrgAffiliationEntities);
}

From source file:com.spotify.heroic.filter.impl.AndFilterImpl.java

private static Filter optimize(SortedSet<Filter> statements) {
    final SortedSet<Filter> result = new TreeSet<>();

    root: for (final Filter f : statements) {
        if (f instanceof Filter.Not) {
            final Filter.Not not = (Filter.Not) f;

            if (statements.contains(not.first())) {
                return FalseFilterImpl.get();
            }/*from   w  w  w .j a va2 s  .  co m*/

            result.add(f);
            continue;
        }

        /**
         * If there exists two MatchTag statements, but they check for different values.
         */
        if (f instanceof Filter.MatchTag) {
            final Filter.MatchTag outer = (Filter.MatchTag) f;

            for (final Filter inner : statements) {
                if (inner.equals(outer)) {
                    continue;
                }

                if (inner instanceof Filter.MatchTag) {
                    final Filter.MatchTag matchTag = (Filter.MatchTag) inner;

                    if (!outer.first().equals(matchTag.first())) {
                        continue;
                    }

                    if (!FilterComparatorUtils.isEqual(outer.second(), matchTag.second())) {
                        return FalseFilterImpl.get();
                    }
                }
            }

            result.add(f);
            continue;
        }

        if (f instanceof Filter.MatchTag) {
            final Filter.MatchTag outer = (Filter.MatchTag) f;

            for (final Filter inner : statements) {
                if (inner.equals(outer)) {
                    continue;
                }

                if (inner instanceof Filter.MatchTag) {
                    final Filter.MatchTag tag = (Filter.MatchTag) inner;

                    if (!outer.first().equals(tag.first())) {
                        continue;
                    }

                    if (!FilterComparatorUtils.isEqual(outer.second(), tag.second())) {
                        return FalseFilterImpl.get();
                    }
                }
            }

            result.add(f);
            continue;
        }

        // optimize away prefixes which encompass eachother.
        // Example: foo ^ hello and foo ^ helloworld -> foo ^ helloworld
        if (f instanceof Filter.StartsWith) {
            final Filter.StartsWith outer = (Filter.StartsWith) f;

            for (final Filter inner : statements) {
                if (inner.equals(outer)) {
                    continue;
                }

                if (inner instanceof Filter.StartsWith) {
                    final Filter.StartsWith starts = (Filter.StartsWith) inner;

                    if (!outer.first().equals(starts.first())) {
                        continue;
                    }

                    if (FilterComparatorUtils.prefixedWith(starts.second(), outer.second())) {
                        continue root;
                    }
                }
            }

            result.add(f);
            continue;
        }

        // all ok!
        result.add(f);
    }

    if (result.isEmpty()) {
        return FalseFilterImpl.get();
    }

    if (result.size() == 1) {
        return result.iterator().next();
    }

    return new AndFilterImpl(new ArrayList<>(result));
}

From source file:org.jahia.services.importexport.ImportExportBaseService.java

private void exportNodesBinary(JCRNodeWrapper root, SortedSet<JCRNodeWrapper> nodes, ZipOutputStream zout,
        Set<String> typesToIgnore, String basepath) throws IOException, RepositoryException {

    // binary export can be time consuming, log some basic information
    long startExportingNodesBinary = System.currentTimeMillis();
    logger.info("Exporting binary nodes ...");

    byte[] buffer = new byte[4096];
    for (Iterator<JCRNodeWrapper> iterator = nodes.iterator(); iterator.hasNext();) {
        JCRNodeWrapper file = iterator.next();
        exportNodeBinary(root, file, zout, typesToIgnore, buffer, basepath, new HashSet<String>());
    }// w ww.j ava2s.co  m

    logger.info("Binary nodes exported in {} seconds", getDuration(startExportingNodesBinary));
}

From source file:org.orcid.core.adapter.impl.Jaxb2JpaAdapterImpl.java

private void setFundings(ProfileEntity profileEntity, FundingList orcidFundings) {
    SortedSet<ProfileFundingEntity> existingProfileFundingEntities = profileEntity.getProfileFunding();
    if (existingProfileFundingEntities == null) {
        existingProfileFundingEntities = new TreeSet<>();
    }/*from   w ww .  j ava 2 s  . co  m*/

    // Create a map containing the existing profile funding entities
    Map<String, ProfileFundingEntity> existingProfileFundingEntitiesMap = createProfileFundingEntitiesMap(
            existingProfileFundingEntities);

    // A set that will contain the updated profile funding entities that
    // comes from the orcidGrant object
    SortedSet<ProfileFundingEntity> updatedProfileFundingEntities = new TreeSet<>();

    // Populate a list of the updated profile funding entities that comes
    // from the fundingList object
    if (orcidFundings != null && orcidFundings.getFundings() != null
            && !orcidFundings.getFundings().isEmpty()) {
        for (Funding orcidFunding : orcidFundings.getFundings()) {
            ProfileFundingEntity newProfileGrantEntity = getProfileFundingEntity(orcidFunding,
                    existingProfileFundingEntitiesMap.get(orcidFunding.getPutCode()));
            newProfileGrantEntity.setProfile(profileEntity);
            updatedProfileFundingEntities.add(newProfileGrantEntity);
        }
    }

    // Create a map containing the profile funding that comes in the
    // orcidGrant object and that will be persisted
    Map<String, ProfileFundingEntity> updatedProfileGrantEntitiesMap = createProfileFundingEntitiesMap(
            updatedProfileFundingEntities);

    // Remove orphans
    for (Iterator<ProfileFundingEntity> iterator = existingProfileFundingEntities.iterator(); iterator
            .hasNext();) {
        ProfileFundingEntity existingEntity = iterator.next();
        if (!updatedProfileGrantEntitiesMap.containsKey(String.valueOf(existingEntity.getId()))) {
            iterator.remove();
        }
    }

    // Add new
    for (ProfileFundingEntity updatedEntity : updatedProfileFundingEntities) {
        if (updatedEntity.getId() == null) {
            existingProfileFundingEntities.add(updatedEntity);
        }
    }
    profileEntity.setProfileFunding(existingProfileFundingEntities);
}

From source file:org.apache.flume.channel.file.Log.java

/**
 * Write the current checkpoint object and then swap objects so that
 * the next checkpoint occurs on the other checkpoint directory.
 *
 * Synchronization is not required because this method acquires a
 * write lock. So this method gets exclusive access to all the
 * data structures this method accesses.
 * @param force  a flag to force the writing of checkpoint
 * @throws IOException if we are unable to write the checkpoint out to disk
 *//*  w w w.  j ava  2  s  .  co m*/
private Boolean writeCheckpoint(Boolean force) throws Exception {
    boolean checkpointCompleted = false;
    long usableSpace = checkpointDir.getUsableSpace();
    if (usableSpace <= minimumRequiredSpace) {
        throw new IOException("Usable space exhaused, only " + usableSpace + " bytes remaining, required "
                + minimumRequiredSpace + " bytes");
    }
    boolean lockAcquired = tryLockExclusive();
    if (!lockAcquired) {
        return false;
    }
    SortedSet<Integer> logFileRefCountsAll = null, logFileRefCountsActive = null;
    try {
        if (queue.checkpoint(force)) {
            long logWriteOrderID = queue.getLogWriteOrderID();

            //Since the active files might also be in the queue's fileIDs,
            //we need to either move each one to a new set or remove each one
            //as we do here. Otherwise we cannot make sure every element in
            //fileID set from the queue have been updated.
            //Since clone is smarter than insert, better to make
            //a copy of the set first so that we can use it later.
            logFileRefCountsAll = queue.getFileIDs();
            logFileRefCountsActive = new TreeSet<Integer>(logFileRefCountsAll);

            int numFiles = logFiles.length();
            for (int i = 0; i < numFiles; i++) {
                LogFile.Writer logWriter = logFiles.get(i);
                int logFileID = logWriter.getLogFileID();
                File logFile = logWriter.getFile();
                LogFile.MetaDataWriter writer = LogFileFactory.getMetaDataWriter(logFile, logFileID);
                try {
                    writer.markCheckpoint(logWriter.position(), logWriteOrderID);
                } finally {
                    writer.close();
                }
                logFileRefCountsAll.remove(logFileID);
                LOGGER.info("Updated checkpoint for file: " + logFile + " position: " + logWriter.position()
                        + " logWriteOrderID: " + logWriteOrderID);
            }

            // Update any inactive data files as well
            Iterator<Integer> idIterator = logFileRefCountsAll.iterator();
            while (idIterator.hasNext()) {
                int id = idIterator.next();
                LogFile.RandomReader reader = idLogFileMap.remove(id);
                File file = reader.getFile();
                reader.close();
                LogFile.MetaDataWriter writer = LogFileFactory.getMetaDataWriter(file, id);
                try {
                    writer.markCheckpoint(logWriteOrderID);
                } finally {
                    writer.close();
                }
                reader = LogFileFactory.getRandomReader(file, encryptionKeyProvider);
                idLogFileMap.put(id, reader);
                LOGGER.debug("Updated checkpoint for file: " + file + "logWriteOrderID " + logWriteOrderID);
                idIterator.remove();
            }
            Preconditions.checkState(logFileRefCountsAll.size() == 0,
                    "Could not update all data file timestamps: " + logFileRefCountsAll);
            //Add files from all log directories
            for (int index = 0; index < logDirs.length; index++) {
                logFileRefCountsActive.add(logFiles.get(index).getLogFileID());
            }
            checkpointCompleted = true;
        }
    } finally {
        unlockExclusive();
    }
    //Do the deletes outside the checkpointWriterLock
    //Delete logic is expensive.
    if (open && checkpointCompleted) {
        removeOldLogs(logFileRefCountsActive);
    }
    //Since the exception is not caught, this will not be returned if
    //an exception is thrown from the try.
    return true;
}