Example usage for java.util SortedSet isEmpty

List of usage examples for java.util SortedSet isEmpty

Introduction

In this page you can find the example usage for java.util SortedSet isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this set contains no elements.

Usage

From source file:org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore.java

@Override
public TimelineEvents getEntityTimelines(String entityType, SortedSet<String> entityIds, Long limit,
        Long windowStart, Long windowEnd, Set<String> eventType) throws IOException {
    TimelineEvents events = new TimelineEvents();
    if (entityIds == null || entityIds.isEmpty()) {
        return events;
    }//  www.jav a  2 s  .  com
    // create a lexicographically-ordered map from start time to entities
    Map<byte[], List<EntityIdentifier>> startTimeMap = new TreeMap<byte[], List<EntityIdentifier>>(
            new Comparator<byte[]>() {
                @Override
                public int compare(byte[] o1, byte[] o2) {
                    return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0, o2.length);
                }
            });

    // look up start times for the specified entities
    // skip entities with no start time
    for (String entityId : entityIds) {
        byte[] startTime = getStartTime(entityId, entityType);
        if (startTime != null) {
            List<EntityIdentifier> entities = startTimeMap.get(startTime);
            if (entities == null) {
                entities = new ArrayList<EntityIdentifier>();
                startTimeMap.put(startTime, entities);
            }
            entities.add(new EntityIdentifier(entityId, entityType));
        }
    }
    for (Entry<byte[], List<EntityIdentifier>> entry : startTimeMap.entrySet()) {
        // look up the events matching the given parameters (limit,
        // start time, end time, event types) for entities whose start times
        // were found and add the entities to the return list
        byte[] revStartTime = entry.getKey();
        for (EntityIdentifier entityIdentifier : entry.getValue()) {
            EventsOfOneEntity entity = new EventsOfOneEntity();
            entity.setEntityId(entityIdentifier.getId());
            entity.setEntityType(entityType);
            events.addEvent(entity);
            KeyBuilder kb = KeyBuilder.newInstance().add(entityType).add(revStartTime)
                    .add(entityIdentifier.getId()).add(EVENTS_COLUMN);
            byte[] prefix = kb.getBytesForLookup();
            if (windowEnd == null) {
                windowEnd = Long.MAX_VALUE;
            }
            byte[] revts = writeReverseOrderedLong(windowEnd);
            kb.add(revts);
            byte[] first = kb.getBytesForLookup();
            byte[] last = null;
            if (windowStart != null) {
                last = KeyBuilder.newInstance().add(prefix).add(writeReverseOrderedLong(windowStart))
                        .getBytesForLookup();
            }
            if (limit == null) {
                limit = DEFAULT_LIMIT;
            }
            DB db = entitydb.getDBForStartTime(readReverseOrderedLong(revStartTime, 0));
            if (db == null) {
                continue;
            }
            try (DBIterator iterator = db.iterator()) {
                for (iterator.seek(first); entity.getEvents().size() < limit && iterator.hasNext(); iterator
                        .next()) {
                    byte[] key = iterator.peekNext().getKey();
                    if (!prefixMatches(prefix, prefix.length, key) || (last != null
                            && WritableComparator.compareBytes(key, 0, key.length, last, 0, last.length) > 0)) {
                        break;
                    }
                    TimelineEvent event = getEntityEvent(eventType, key, prefix.length,
                            iterator.peekNext().getValue());
                    if (event != null) {
                        entity.addEvent(event);
                    }
                }
            }
        }
    }
    return events;
}

From source file:org.fenixedu.ulisboa.specifications.domain.evaluation.markSheet.CompetenceCourseMarkSheet.java

public GradeScaleValidator getGradeScaleValidator() {
    final SortedSet<GradeScaleValidator> result = Sets.newTreeSet(DomainObjectUtil.COMPARATOR_BY_ID);

    for (final GradeScaleValidator validator : EvaluationSeasonRule.find(getEvaluationSeason(),
            GradeScaleValidator.class)) {

        if (validator.getGradeScale() != getGradeScale()) {
            continue;
        }/*from   w w w  .  j a  v  a2s . c  o m*/

        final Set<DegreeType> markSheetDegreeTypes = getExecutionCourse().getAssociatedCurricularCoursesSet()
                .stream().map(c -> c.getDegree().getDegreeType()).collect(Collectors.toSet());
        if (Sets.intersection(markSheetDegreeTypes, validator.getDegreeTypeSet()).isEmpty()) {
            continue;
        }

        if (!validator.getAppliesToCurriculumAggregatorEntry()
                || !isCurriculumAggregatorEntryScaleConsistent()) {
            continue;
        }

        result.add(validator);
    }

    if (result.size() > 1) {
        logger.warn("Mark sheet {} has more than one GradeScaleValidator configured, returning the oldest",
                this);
    }

    return result.isEmpty() ? null : result.first();
}

From source file:com.repeatability.pdf.PDFTextStripper.java

/**
 * This will process a TextPosition object and add the text to the list of characters on a page. It takes care of
 * overlapping text.//from  www .j  a  v a  2 s .  c  o m
 *
 * @param text The text to process.
 */
@Override
protected void processTextPosition(TextPosition text) {
    boolean showCharacter = true;
    if (suppressDuplicateOverlappingText) {
        showCharacter = false;
        String textCharacter = text.getUnicode();
        float textX = text.getX();
        float textY = text.getY();
        TreeMap<Float, TreeSet<Float>> sameTextCharacters = characterListMapping.get(textCharacter);
        if (sameTextCharacters == null) {
            sameTextCharacters = new TreeMap<Float, TreeSet<Float>>();
            characterListMapping.put(textCharacter, sameTextCharacters);
        }
        // RDD - Here we compute the value that represents the end of the rendered
        // text. This value is used to determine whether subsequent text rendered
        // on the same line overwrites the current text.
        //
        // We subtract any positive padding to handle cases where extreme amounts
        // of padding are applied, then backed off (not sure why this is done, but there
        // are cases where the padding is on the order of 10x the character width, and
        // the TJ just backs up to compensate after each character). Also, we subtract
        // an amount to allow for kerning (a percentage of the width of the last
        // character).
        boolean suppressCharacter = false;
        float tolerance = text.getWidth() / textCharacter.length() / 3.0f;

        SortedMap<Float, TreeSet<Float>> xMatches = sameTextCharacters.subMap(textX - tolerance,
                textX + tolerance);
        for (TreeSet<Float> xMatch : xMatches.values()) {
            SortedSet<Float> yMatches = xMatch.subSet(textY - tolerance, textY + tolerance);
            if (!yMatches.isEmpty()) {
                suppressCharacter = true;
                break;
            }
        }
        if (!suppressCharacter) {
            TreeSet<Float> ySet = sameTextCharacters.get(textX);
            if (ySet == null) {
                ySet = new TreeSet<Float>();
                sameTextCharacters.put(textX, ySet);
            }
            ySet.add(textY);
            showCharacter = true;
        }
    }
    if (showCharacter) {
        // if we are showing the character then we need to determine which article it belongs to
        int foundArticleDivisionIndex = -1;
        int notFoundButFirstLeftAndAboveArticleDivisionIndex = -1;
        int notFoundButFirstLeftArticleDivisionIndex = -1;
        int notFoundButFirstAboveArticleDivisionIndex = -1;
        float x = text.getX();
        float y = text.getY();
        if (shouldSeparateByBeads) {
            for (int i = 0; i < beadRectangles.size() && foundArticleDivisionIndex == -1; i++) {
                PDRectangle rect = beadRectangles.get(i);
                if (rect != null) {
                    if (rect.contains(x, y)) {
                        foundArticleDivisionIndex = i * 2 + 1;
                    } else if ((x < rect.getLowerLeftX() || y < rect.getUpperRightY())
                            && notFoundButFirstLeftAndAboveArticleDivisionIndex == -1) {
                        notFoundButFirstLeftAndAboveArticleDivisionIndex = i * 2;
                    } else if (x < rect.getLowerLeftX() && notFoundButFirstLeftArticleDivisionIndex == -1) {
                        notFoundButFirstLeftArticleDivisionIndex = i * 2;
                    } else if (y < rect.getUpperRightY() && notFoundButFirstAboveArticleDivisionIndex == -1) {
                        notFoundButFirstAboveArticleDivisionIndex = i * 2;
                    }
                } else {
                    foundArticleDivisionIndex = 0;
                }
            }
        } else {
            foundArticleDivisionIndex = 0;
        }
        int articleDivisionIndex;
        if (foundArticleDivisionIndex != -1) {
            articleDivisionIndex = foundArticleDivisionIndex;
        } else if (notFoundButFirstLeftAndAboveArticleDivisionIndex != -1) {
            articleDivisionIndex = notFoundButFirstLeftAndAboveArticleDivisionIndex;
        } else if (notFoundButFirstLeftArticleDivisionIndex != -1) {
            articleDivisionIndex = notFoundButFirstLeftArticleDivisionIndex;
        } else if (notFoundButFirstAboveArticleDivisionIndex != -1) {
            articleDivisionIndex = notFoundButFirstAboveArticleDivisionIndex;
        } else {
            articleDivisionIndex = charactersByArticle.size() - 1;
        }

        List<TextPosition> textList = charactersByArticle.get(articleDivisionIndex);

        // In the wild, some PDF encoded documents put diacritics (accents on
        // top of characters) into a separate Tj element. When displaying them
        // graphically, the two chunks get overlayed. With text output though,
        // we need to do the overlay. This code recombines the diacritic with
        // its associated character if the two are consecutive.
        if (textList.isEmpty()) {
            textList.add(text);
        } else {
            // test if we overlap the previous entry.
            // Note that we are making an assumption that we need to only look back
            // one TextPosition to find what we are overlapping.
            // This may not always be true. */
            TextPosition previousTextPosition = textList.get(textList.size() - 1);
            if (text.isDiacritic() && previousTextPosition.contains(text)) {
                previousTextPosition.mergeDiacritic(text);
            }
            // If the previous TextPosition was the diacritic, merge it into this
            // one and remove it from the list.
            else if (previousTextPosition.isDiacritic() && text.contains(previousTextPosition)) {
                text.mergeDiacritic(previousTextPosition);
                textList.remove(textList.size() - 1);
                textList.add(text);
            } else {
                textList.add(text);
            }
        }
    }
}

From source file:org.apache.cassandra.db.CompactionManager.java

/**
 * This function goes over each file and removes the keys that the node is not responsible for
 * and only keeps keys that this node is responsible for.
 *
 * @throws IOException// w  ww.  j  a  va  2  s.  co  m
 */
private void doCleanupCompaction(ColumnFamilyStore cfs, Collection<SSTableReader> sstables,
        NodeId.OneShotRenewer renewer) throws IOException {
    assert !cfs.isIndex();
    Table table = cfs.table;
    Collection<Range> ranges = StorageService.instance.getLocalRanges(table.name);
    boolean isCommutative = cfs.metadata.getDefaultValidator().isCommutative();
    if (ranges.isEmpty()) {
        logger.info("Cleanup cannot run before a node has joined the ring");
        return;
    }

    for (SSTableReader sstable : sstables) {
        long startTime = System.currentTimeMillis();
        long totalkeysWritten = 0;

        int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(),
                (int) (SSTableReader.getApproximateKeyCount(Arrays.asList(sstable))));
        if (logger.isDebugEnabled())
            logger.debug("Expected bloom filter size : " + expectedBloomFilterSize);

        SSTableWriter writer = null;
        try {
            logger.info("Cleaning up " + sstable);
            // Calculate the expected compacted filesize
            long expectedRangeFileSize = cfs.getExpectedCompactedFileSize(Arrays.asList(sstable)) / 2;
            String compactionFileLocation = table.getDataFileLocation(expectedRangeFileSize);
            if (compactionFileLocation == null)
                throw new IOException("disk full");

            SSTableScanner scanner = sstable.getDirectScanner(CompactionIterator.FILE_BUFFER_SIZE);
            SortedSet<ByteBuffer> indexedColumns = cfs.getIndexedColumns();
            CleanupInfo ci = new CleanupInfo(sstable, scanner);
            executor.beginCompaction(ci);
            try {
                while (scanner.hasNext()) {
                    SSTableIdentityIterator row = (SSTableIdentityIterator) scanner.next();
                    if (Range.isTokenInRanges(row.getKey().token, ranges)) {
                        writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, writer,
                                Collections.singletonList(sstable));
                        writer.append(getCompactedRow(row, sstable.descriptor, false));
                        totalkeysWritten++;
                    } else {
                        cfs.invalidateCachedRow(row.getKey());
                        if (!indexedColumns.isEmpty() || isCommutative) {
                            while (row.hasNext()) {
                                IColumn column = row.next();
                                if (column instanceof CounterColumn)
                                    renewer.maybeRenew((CounterColumn) column);
                                if (indexedColumns.contains(column.name()))
                                    Table.cleanupIndexEntry(cfs, row.getKey().key, column);
                            }
                        }
                    }
                }
            } finally {
                scanner.close();
                executor.finishCompaction(ci);
            }
        } finally {
            cfs.getDataTracker().unmarkCompacting(Arrays.asList(sstable));
        }

        List<SSTableReader> results = new ArrayList<SSTableReader>();
        if (writer != null) {
            SSTableReader newSstable = writer.closeAndOpenReader(sstable.maxDataAge);
            results.add(newSstable);

            String format = "Cleaned up to %s.  %,d to %,d (~%d%% of original) bytes for %,d keys.  Time: %,dms.";
            long dTime = System.currentTimeMillis() - startTime;
            long startsize = sstable.length();
            long endsize = newSstable.length();
            double ratio = (double) endsize / (double) startsize;
            logger.info(String.format(format, writer.getFilename(), startsize, endsize, (int) (ratio * 100),
                    totalkeysWritten, dTime));
        }

        // flush to ensure we don't lose the tombstones on a restart, since they are not commitlog'd
        for (ByteBuffer columnName : cfs.getIndexedColumns()) {
            try {
                cfs.getIndexedColumnFamilyStore(columnName).forceBlockingFlush();
            } catch (ExecutionException e) {
                throw new RuntimeException(e);
            } catch (InterruptedException e) {
                throw new AssertionError(e);
            }
        }
        cfs.replaceCompactedSSTables(Arrays.asList(sstable), results);
    }
}

From source file:net.sourceforge.fenixedu.domain.ExecutionCourse.java

public static ExecutionCourse readLastBySigla(final String sigla) {
    SortedSet<ExecutionCourse> result = new TreeSet<ExecutionCourse>(
            EXECUTION_COURSE_EXECUTION_PERIOD_COMPARATOR);
    for (ExecutionCourse executionCourse : Bennu.getInstance().getExecutionCoursesSet()) {
        if (sigla.equalsIgnoreCase(executionCourse.getSigla())) {
            result.add(executionCourse);
        }/*w w w  .ja  va2 s. com*/
    }
    return result.isEmpty() ? null : result.last();
}

From source file:org.apache.cassandra.db.compaction.CompactionManager.java

/**
 * This function goes over each file and removes the keys that the node is not responsible for
 * and only keeps keys that this node is responsible for.
 *
 * @throws IOException/*from w ww  .j  a  v  a2  s. com*/
 */
private void doCleanupCompaction(ColumnFamilyStore cfs, Collection<SSTableReader> sstables,
        NodeId.OneShotRenewer renewer) throws IOException {
    assert !cfs.isIndex();
    Table table = cfs.table;
    Collection<Range> ranges = StorageService.instance.getLocalRanges(table.name);
    boolean isCommutative = cfs.metadata.getDefaultValidator().isCommutative();
    if (ranges.isEmpty()) {
        logger.info("Cleanup cannot run before a node has joined the ring");
        return;
    }

    for (SSTableReader sstable : sstables) {
        CompactionController controller = new CompactionController(cfs, Collections.singletonList(sstable),
                getDefaultGcBefore(cfs), false);
        long startTime = System.currentTimeMillis();

        long totalkeysWritten = 0;

        int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(),
                (int) (SSTableReader.getApproximateKeyCount(Arrays.asList(sstable))));
        if (logger.isDebugEnabled())
            logger.debug("Expected bloom filter size : " + expectedBloomFilterSize);

        SSTableWriter writer = null;

        logger.info("Cleaning up " + sstable);
        // Calculate the expected compacted filesize
        long expectedRangeFileSize = cfs.getExpectedCompactedFileSize(Arrays.asList(sstable)) / 2;
        String compactionFileLocation = table.getDataFileLocation(expectedRangeFileSize);
        if (compactionFileLocation == null)
            throw new IOException("disk full");

        SSTableScanner scanner = sstable.getDirectScanner(CompactionIterator.FILE_BUFFER_SIZE);
        SortedSet<ByteBuffer> indexedColumns = cfs.getIndexedColumns();
        CleanupInfo ci = new CleanupInfo(sstable, scanner);
        executor.beginCompaction(ci);
        try {
            while (scanner.hasNext()) {
                SSTableIdentityIterator row = (SSTableIdentityIterator) scanner.next();
                if (Range.isTokenInRanges(row.getKey().token, ranges)) {
                    AbstractCompactedRow compactedRow = controller.getCompactedRow(row);
                    if (compactedRow.isEmpty())
                        continue;
                    writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, writer,
                            Collections.singletonList(sstable));
                    writer.append(compactedRow);
                    totalkeysWritten++;
                } else {
                    cfs.invalidateCachedRow(row.getKey());
                    if (!indexedColumns.isEmpty() || isCommutative) {
                        while (row.hasNext()) {
                            IColumn column = row.next();
                            if (column instanceof CounterColumn)
                                renewer.maybeRenew((CounterColumn) column);
                            if (indexedColumns.contains(column.name()))
                                Table.cleanupIndexEntry(cfs, row.getKey().key, column);
                        }
                    }
                }
            }
        } finally {
            scanner.close();
            executor.finishCompaction(ci);
        }

        List<SSTableReader> results = new ArrayList<SSTableReader>();
        if (writer != null) {
            SSTableReader newSstable = writer.closeAndOpenReader(sstable.maxDataAge);
            results.add(newSstable);

            String format = "Cleaned up to %s.  %,d to %,d (~%d%% of original) bytes for %,d keys.  Time: %,dms.";
            long dTime = System.currentTimeMillis() - startTime;
            long startsize = sstable.length();
            long endsize = newSstable.length();
            double ratio = (double) endsize / (double) startsize;
            logger.info(String.format(format, writer.getFilename(), startsize, endsize, (int) (ratio * 100),
                    totalkeysWritten, dTime));
        }

        // flush to ensure we don't lose the tombstones on a restart, since they are not commitlog'd
        for (ByteBuffer columnName : cfs.getIndexedColumns()) {
            try {
                cfs.getIndexedColumnFamilyStore(columnName).forceBlockingFlush();
            } catch (ExecutionException e) {
                throw new RuntimeException(e);
            } catch (InterruptedException e) {
                throw new AssertionError(e);
            }
        }
        cfs.replaceCompactedSSTables(Arrays.asList(sstable), results);
    }
}

From source file:org.tolweb.content.preparers.PageCoPreparer.java

@SuppressWarnings("unchecked")
public void processContent() {

    Element page = new Element(PageContentElements.PAGE, ContentPreparer.NS);
    page.addAttribute(new Attribute(PageContentAttributes.ID, "" + getMappedPage().getPageId()));

    String pageNodeNameUrl = getMappedPage().getMappedNode().getName();
    try {/* w w w .j  a  v a  2  s  .c o m*/
        pageNodeNameUrl = URLEncoder.encode(pageNodeNameUrl, "UTF-8");
    } catch (Exception e) {
        e.printStackTrace();
    }
    String nodeIdString = "" + getMappedPage().getMappedNode().getId();
    pageNodeNameUrl = StringUtils.notEmpty(pageNodeNameUrl) ? pageNodeNameUrl + "/" + nodeIdString
            : nodeIdString;
    String pageUrl = "http://tolweb.org/" + pageNodeNameUrl;
    page.addAttribute(new Attribute(PageContentAttributes.PAGE_URL, pageUrl));
    page.addAttribute(new Attribute(PageContentAttributes.PAGE_STATUS, getMappedPage().getStatus()));
    page.addAttribute(new Attribute(PageContentAttributes.DATE_CREATED,
            getSafeString(getMappedPage().getFirstOnlineString())));
    page.addAttribute(new Attribute(PageContentAttributes.DATE_CHANGED,
            safeToString(getMappedPage().getContentChangedDate())));

    Element group = new Element(PageContentElements.GROUP, ContentPreparer.NS);
    page.appendChild(group);

    group.addAttribute(new Attribute(PageContentAttributes.NODE, "" + getMappedPage().getMappedNode().getId()));
    group.addAttribute(new Attribute(PageContentAttributes.EXTINCT,
            (getMappedPage().getMappedNode().getExtinct() == 2) ? "true" : "false"));
    group.addAttribute(new Attribute(PageContentAttributes.PHYLESIS,
            getPhylesisString(getMappedPage().getMappedNode().getPhylesis())));
    group.addAttribute(new Attribute(PageContentAttributes.LEAF,
            getMappedPage().getMappedNode().getIsLeaf() ? "true" : "false"));

    Element groupDesc = new Element(PageContentElements.GROUP_DESCRIPTION, ContentPreparer.NS);
    String groupDescText = getMappedPage().getMappedNode().getDescription();
    groupDesc.appendChild(new Text(groupDescText));

    if (StringUtils.notEmpty(groupDescText)) {
        group.appendChild(groupDesc);
    }

    Element groupCmt = new Element(PageContentElements.GROUP_COMMENT, ContentPreparer.NS);
    String groupCmtText = getMappedPage().getLeadText();
    groupCmt.appendChild(new Text(groupCmtText));

    if (StringUtils.notEmpty(groupCmtText)) {
        group.appendChild(groupCmt);
    }

    Element names = new Element(PageContentElements.NAMES, ContentPreparer.NS);
    page.appendChild(names);

    Element name = new Element(PageContentElements.NAME, ContentPreparer.NS);
    names.appendChild(name);

    name.appendChild(new Text(getMappedPage().getMappedNode().getName()));
    name.addAttribute(new Attribute(PageContentAttributes.ITALICIZE_NAME,
            Boolean.valueOf(getMappedPage().getMappedNode().getItalicizeName()).toString()));
    name.addAttribute(new Attribute(PageContentAttributes.AUTHORITY,
            getSafeString(getMappedPage().getMappedNode().getNameAuthority())));
    name.addAttribute(new Attribute(PageContentAttributes.AUTH_DATE,
            safeToString(getMappedPage().getMappedNode().getAuthorityDate())));
    name.addAttribute(new Attribute(PageContentAttributes.NAME_COMMENT,
            getSafeString(getMappedPage().getMappedNode().getNameComment())));
    name.addAttribute(new Attribute(PageContentAttributes.NEW_COMBINATION,
            Boolean.valueOf(getMappedPage().getMappedNode().getIsNewCombination()).toString()));
    name.addAttribute(new Attribute(PageContentAttributes.COMBINATION_AUTHOR,
            getSafeString(getMappedPage().getMappedNode().getCombinationAuthor())));
    name.addAttribute(new Attribute(PageContentAttributes.COMBINATION_DATE,
            safeToString(getMappedPage().getMappedNode().getCombinationDate())));

    Element othernames = new Element(PageContentElements.OTHERNAMES, ContentPreparer.NS);

    SortedSet otherNamesSet = getMappedPage().getMappedNode().getSynonyms();

    for (Iterator itr = otherNamesSet.iterator(); itr.hasNext();) {
        MappedOtherName moname = (MappedOtherName) itr.next();
        Element othername = new Element(PageContentElements.OTHERNAME, ContentPreparer.NS);
        othername.addAttribute(new Attribute(PageContentAttributes.ID, "" + moname.getId()));
        othername.addAttribute(new Attribute(PageContentAttributes.ITALICIZE_NAME,
                Boolean.valueOf(moname.getItalicize()).toString()));
        othername.addAttribute(
                new Attribute(PageContentAttributes.AUTHORITY, getSafeString(moname.getAuthority())));
        othername.addAttribute(
                new Attribute(PageContentAttributes.AUTH_DATE, safeToString(moname.getAuthorityYear())));
        othername.addAttribute(
                new Attribute(PageContentAttributes.NAME_COMMENT, getSafeString(moname.getComment())));
        othername.addAttribute(new Attribute(PageContentAttributes.IS_IMPORTANT,
                Boolean.valueOf(moname.getIsImportant()).toString()));
        othername.addAttribute(new Attribute(PageContentAttributes.IS_PREFERRED,
                Boolean.valueOf(moname.getIsPreferred()).toString()));
        othername.addAttribute(new Attribute(PageContentAttributes.SEQUENCE, safeToString(moname.getOrder())));
    }

    if (!otherNamesSet.isEmpty()) {
        names.appendChild(othernames);
    }

    List children = getMappedPage().getMappedNode().getChildren();
    boolean isTerminal = children != null && children.isEmpty();

    // add this if not a leaf or writeaslist is false... or is terminal (e.g. no children)
    if (getMappedPage().getMappedNode().getIsLeaf() && !isTerminal) {
        Element subgroups = new Element(PageContentElements.SUBGROUPS, ContentPreparer.NS);
        page.appendChild(subgroups);

        if (!getMappedPage().getWriteAsList()) {
            Element treeimage = new Element(PageContentElements.TREEIMAGE, ContentPreparer.NS);
            ContributorLicenseInfo currDefault = new ContributorLicenseInfo(
                    ContributorLicenseInfo.TREE_IMAGE_LICENSE);
            treeimage.addAttribute(new Attribute(PageContentAttributes.LICENSE, currDefault.toShortString()));
            String treeImageName = getMappedPage().getGroupName().replaceAll("\\s", "_");
            treeimage
                    .appendChild(new Text("http://www.tolweb.org/Public/treeImages/" + treeImageName + ".png"));
            subgroups.appendChild(treeimage);
        }

        Element newicktree = new Element(PageContentElements.NEWICKTREE, ContentPreparer.NS);
        subgroups.appendChild(newicktree);

        Element taxonlist = new Element(PageContentElements.TAXON_LIST, ContentPreparer.NS);
        taxonlist.appendChild(new Text(StringEscapeUtils.escapeXml(getTaxonListAsHTML())));
        subgroups.appendChild(taxonlist);

        Element treecomment = new Element(PageContentElements.TREE_COMMENT, ContentPreparer.NS);
        subgroups.appendChild(treecomment);
        treecomment.appendChild(new Text(StringEscapeUtils.escapeXml(getMappedPage().getPostTreeText())));
    }

    Element sections = new Element(PageContentElements.SECTIONS, ContentPreparer.NS);
    page.appendChild(sections);

    SortedSet textSections = getMappedPage().getTextSections();
    for (Iterator itr = textSections.iterator(); itr.hasNext();) {
        MappedTextSection mtxt = (MappedTextSection) itr.next();
        Element section = new Element(PageContentElements.SECTION, ContentPreparer.NS);
        section.addAttribute(new Attribute(PageContentAttributes.ID, "" + mtxt.getTextSectionId()));
        section.addAttribute(new Attribute(PageContentAttributes.SECTION_TITLE, mtxt.getHeading()));
        section.addAttribute(new Attribute(PageContentAttributes.PAGE_ORDER, safeToString(mtxt.getOrder())));
        section.addAttribute(
                new Attribute(PageContentAttributes.COPYRIGHT_DATE, getMappedPage().getCopyrightDate()));
        section.addAttribute(new Attribute(PageContentAttributes.LICENSE,
                getLicenseShortName(getMappedPage().getUsePermission())));
        section.addAttribute(new Attribute(PageContentAttributes.AUTHORS,
                getAuthorsIdString(getMappedPage().getContributors())));
        section.addAttribute(new Attribute(PageContentAttributes.CORRESPONDENTS,
                getCorrespondentsIdString(getMappedPage().getContributors())));
        section.addAttribute(new Attribute(PageContentAttributes.COPYRIGHT_OWNERS,
                getCopyrightOwnersIdString(getMappedPage().getContributors())));
        section.addAttribute(new Attribute(PageContentAttributes.OTHER_COPYRIGHT,
                getSafeString(getMappedPage().getCopyrightHolder())));
        section.addAttribute(new Attribute(PageContentAttributes.CONTENT_CHANGED,
                safeToString(getMappedPage().getContentChangedDate())));

        // add the section-text text element
        Element sectionText = new Element(PageContentElements.SECTION_TEXT, ContentPreparer.NS);
        sectionText.appendChild(new Text(processSectionText(mtxt.getText(), pageUrl)));
        section.appendChild(sectionText);

        Element sectionMedia = new Element(PageContentElements.SECTION_MEDIA, ContentPreparer.NS);
        processSectionMedia(sectionMedia);
        section.appendChild(sectionMedia);

        // add the section-source element
        Element sectionSource = new Element(PageContentElements.SECTION_SOURCE, ContentPreparer.NS);
        // TODO add attribute data to section-source
        section.appendChild(sectionSource);

        String sectionAnchor = mtxt.getHeadingNoSpaces();
        sectionSource.addAttribute(new Attribute(PageContentAttributes.SOURCE_COLLECTION, "0"));
        sectionSource.addAttribute(new Attribute(PageContentAttributes.SOURCE_TITLE, mtxt.getHeading()));
        sectionSource.addAttribute(new Attribute(PageContentAttributes.SOURCE_URL,
                "http://tolweb.org/" + pageNodeNameUrl + "#" + sectionAnchor));
        sectionSource.addAttribute(new Attribute(PageContentAttributes.MORE_SOURCE, "[future-use]"));

        sections.appendChild(section);
    }

    Element refs = new Element(PageContentElements.REFERENCES, ContentPreparer.NS);
    page.appendChild(refs);
    TextPreparer txtPrep = new TextPreparer();
    List refsList = txtPrep.getNewlineSeparatedList(getMappedPage().getReferences());
    for (Iterator itr = refsList.iterator(); itr.hasNext();) {
        String ref = (String) itr.next();
        // only add the reference element if it's not empty
        if (StringUtils.notEmpty(ref)) {
            Element refEl = new Element(PageContentElements.REFERENCE, ContentPreparer.NS);
            refEl.appendChild(new Text(StringEscapeUtils.escapeXml(ref)));
            refs.appendChild(refEl);
        }
    }

    Element internetInfo = new Element(PageContentElements.INTERNET_INFO, ContentPreparer.NS);
    page.appendChild(internetInfo);
    internetInfo.appendChild(new Text(StringEscapeUtils.escapeXml(getMappedPage().getInternetInfo())));

    getElement().appendChild(page);
}

From source file:net.sourceforge.fenixedu.domain.ExecutionCourse.java

public static ExecutionCourse readLastByExecutionYearAndSigla(final String sigla, ExecutionYear executionYear) {
    SortedSet<ExecutionCourse> result = new TreeSet<ExecutionCourse>(
            EXECUTION_COURSE_EXECUTION_PERIOD_COMPARATOR);
    for (final ExecutionSemester executionSemester : executionYear.getExecutionPeriodsSet()) {
        for (ExecutionCourse executionCourse : executionSemester.getAssociatedExecutionCoursesSet()) {
            if (sigla.equalsIgnoreCase(executionCourse.getSigla())) {
                result.add(executionCourse);
            }// w  w  w.  j ava  2 s . c o  m
        }
    }
    return result.isEmpty() ? null : result.last();
}

From source file:org.pharmgkb.Subject.java

public String getBreastCancerFreeInterval() {
    SortedSet<Integer> freeIntervals = Sets.newTreeSet();

    if (getCauseOfDeath() != null && getCauseOfDeath().equals("1")) {
        freeIntervals.add(parseDays(getDaysDiagtoDeath()));
    }// w  w  w .  jav a2  s.  c  o m

    if (!ItpcUtils.isBlank(getAddCxIpsilateral())) {
        Integer ipsiDays = parseDays(getAddCxIpsilateral());
        if (ipsiDays > 0) {
            freeIntervals.add(ipsiDays);
        }
    }
    if (!ItpcUtils.isBlank(getAddCxDistantRecur())) {
        Integer days = parseDays(getAddCxDistantRecur());
        if (days > 0) {
            freeIntervals.add(days);
        }
    }
    if (!ItpcUtils.isBlank(getAddCxContralateral())) {
        Integer days = parseDays(getAddCxContralateral());
        if (days > 0) {
            freeIntervals.add(days);
        }
    }

    if (!freeIntervals.isEmpty()) {
        return Integer.toString(freeIntervals.first());
    } else {
        return "";
    }
}

From source file:cerrla.Performance.java

/**
 * Outputs performance information and estimates convergence.
 * //from  w ww .  jav  a 2  s . com
 * @param convergence
 *            The convergence as given by the rule distributions.
 * @param numElites
 *            The minimum number of elites.
 * @param elites
 *            The current elites.
 * @param numSlots
 *            The number of slots in the distribution.
 * @param goalCondition
 *            The goal condition this performance is concerned with.
 */
public void estimateETA(double convergence, int numElites, SortedSet<PolicyValue> elites, int numSlots,
        GoalCondition goalCondition) {
    if (!ProgramArgument.SYSTEM_OUTPUT.booleanValue())
        return;

    boolean mainGoal = goalCondition.isMainGoal();

    if (mainGoal) {
        long currentTime = System.currentTimeMillis();
        long elapsedTime = currentTime - trainingStartTime_;
        String elapsed = "Elapsed: " + RRLExperiment.toTimeFormat(elapsedTime);
        System.out.println(elapsed);
    }

    boolean noUpdates = false;
    if (convergence == PolicyGenerator.NO_UPDATES_CONVERGENCE) {
        noUpdates = true;
        convergence = 0;
    }
    double totalRunComplete = (1.0 * runIndex_ + convergence) / Config.getInstance().getNumRepetitions();
    if (frozen_)
        totalRunComplete = 1.0 * (runIndex_ + 1) / Config.getInstance().getNumRepetitions();

    DecimalFormat formatter = new DecimalFormat("#0.0000");
    String modular = "";
    if (!goalCondition.isMainGoal())
        modular = "MODULAR: [" + goalCondition + "] ";
    // No updates yet, convergence unknown
    String percentStr = null;
    if (noUpdates) {
        percentStr = "Unknown convergence; No updates yet.";
    } else if (!frozen_) {
        percentStr = "~" + formatter.format(100 * convergence) + "% " + modular + "converged (" + numSlots
                + " slots).";
    } else {
        if (convergence <= 1)
            percentStr = formatter.format(100 * convergence) + "% " + modular + "test complete.";
        else
            percentStr = "---FULLY CONVERGED---";
    }
    System.out.println(percentStr);

    if (!frozen_) {
        // Adjust numElites if using bounded elites
        String best = (!elites.isEmpty()) ? "" + formatter.format(elites.first().getValue()) : "?";
        String worst = (!elites.isEmpty()) ? "" + formatter.format(elites.last().getValue()) : "?";
        String eliteString = "N_E: " + numElites + ", |E|: " + elites.size() + ", E_best: " + best
                + ", E_worst: " + worst;
        System.out.println(eliteString);
    }

    if (mainGoal) {
        String totalPercentStr = formatter.format(100 * totalRunComplete) + "% experiment complete.";
        System.out.println(totalPercentStr);
    }
}