Example usage for java.util SortedSet last

List of usage examples for java.util SortedSet last

Introduction

In this page you can find the example usage for java.util SortedSet last.

Prototype

E last();

Source Link

Document

Returns the last (highest) element currently in this set.

Usage

From source file:com.tesora.dve.tools.aitemplatebuilder.AiTemplateBuilder.java

private String getCommonTableNamePrefix(final SortedSet<TableStats> tables) {
    final String firstName = tables.first().getTableName();
    final String lastName = tables.last().getTableName();

    final int minLength = Math.min(firstName.length(), lastName.length());
    for (int i = 0; i < minLength; ++i) {
        if ((i > (TABLE_NAME_MIN_PREFIX_LENGTH - 1)) && (firstName.charAt(i) != lastName.charAt(i))) {
            return firstName.substring(0, i);
        }//from w  ww .j a  v a 2 s  .c om
    }

    return firstName.substring(0, minLength);
}

From source file:org.hyperic.hq.measurement.server.session.AvailabilityManagerImpl.java

private AvailabilityDataRLE findAvailBefore(DataPoint state,
        Map<Integer, TreeSet<AvailabilityDataRLE>> currAvails) {
    Integer mId = state.getMeasurementId();
    TreeSet<AvailabilityDataRLE> rles = currAvails.get(mId);
    long start = state.getTimestamp();
    AvailabilityDataRLE tmp = new AvailabilityDataRLE();
    // headSet is inclusive so we need to subtract 1 from start
    tmp.setStartime(start - 1);/*ww w  .  j  a  v a  2  s.  c o m*/
    SortedSet<AvailabilityDataRLE> set = rles.headSet(tmp);
    if (set.size() == 0) {
        return null;
    }
    return set.last();
}

From source file:org.apache.nifi.registry.service.RegistryService.java

public VersionedFlowSnapshot createFlowSnapshot(final VersionedFlowSnapshot flowSnapshot) {
    if (flowSnapshot == null) {
        throw new IllegalArgumentException("Versioned flow snapshot cannot be null");
    }//from  w  w w.  j a v  a2  s  .  com

    // validation will ensure that the metadata and contents are not null
    if (flowSnapshot.getSnapshotMetadata() != null) {
        flowSnapshot.getSnapshotMetadata().setTimestamp(System.currentTimeMillis());
    }

    // these fields aren't used for creation
    flowSnapshot.setFlow(null);
    flowSnapshot.setBucket(null);

    validate(flowSnapshot, "Cannot create versioned flow snapshot");

    writeLock.lock();
    try {
        final VersionedFlowSnapshotMetadata snapshotMetadata = flowSnapshot.getSnapshotMetadata();

        // ensure the bucket exists
        final BucketEntity existingBucket = metadataService
                .getBucketById(snapshotMetadata.getBucketIdentifier());
        if (existingBucket == null) {
            LOGGER.warn("The specified bucket id [{}] does not exist.", snapshotMetadata.getBucketIdentifier());
            throw new ResourceNotFoundException("The specified bucket ID does not exist in this registry.");
        }

        // ensure the flow exists
        final FlowEntity existingFlow = metadataService.getFlowById(snapshotMetadata.getFlowIdentifier());
        if (existingFlow == null) {
            LOGGER.warn("The specified flow id [{}] does not exist.", snapshotMetadata.getFlowIdentifier());
            throw new ResourceNotFoundException("The specified flow ID does not exist in this bucket.");
        }

        if (!existingBucket.getId().equals(existingFlow.getBucketId())) {
            throw new IllegalStateException("The requested flow is not located in the given bucket");
        }

        // convert the set of FlowSnapshotEntity to set of VersionedFlowSnapshotMetadata
        final SortedSet<VersionedFlowSnapshotMetadata> sortedSnapshots = new TreeSet<>();
        final List<FlowSnapshotEntity> existingFlowSnapshots = metadataService
                .getSnapshots(existingFlow.getId());
        if (existingFlowSnapshots != null) {
            existingFlowSnapshots.stream()
                    .forEach(s -> sortedSnapshots.add(DataModelMapper.map(existingBucket, s)));
        }

        // if we already have snapshots we need to verify the new one has the correct version
        if (sortedSnapshots != null && sortedSnapshots.size() > 0) {
            final VersionedFlowSnapshotMetadata lastSnapshot = sortedSnapshots.last();

            if (snapshotMetadata.getVersion() <= lastSnapshot.getVersion()) {
                throw new IllegalStateException(
                        "A Versioned flow snapshot with the same version already exists: "
                                + snapshotMetadata.getVersion());
            }

            if (snapshotMetadata.getVersion() > (lastSnapshot.getVersion() + 1)) {
                throw new IllegalStateException(
                        "Version must be a one-up number, last version was " + lastSnapshot.getVersion()
                                + " and version for this snapshot was " + snapshotMetadata.getVersion());
            }
        } else if (snapshotMetadata.getVersion() != 1) {
            throw new IllegalStateException("Version of first snapshot must be 1");
        }

        // serialize the snapshot
        final ByteArrayOutputStream out = new ByteArrayOutputStream();
        processGroupSerializer.serialize(flowSnapshot.getFlowContents(), out);

        // save the serialized snapshot to the persistence provider
        final Bucket bucket = DataModelMapper.map(existingBucket);
        final VersionedFlow versionedFlow = DataModelMapper.map(existingBucket, existingFlow);
        final FlowSnapshotContext context = new StandardFlowSnapshotContext.Builder(bucket, versionedFlow,
                snapshotMetadata).build();
        flowPersistenceProvider.saveFlowContent(context, out.toByteArray());

        // create snapshot in the metadata provider
        metadataService.createFlowSnapshot(DataModelMapper.map(snapshotMetadata));

        // update the modified date on the flow
        metadataService.updateFlow(existingFlow);

        // get the updated flow, we need to use "with counts" here so we can return this is a part of the response
        final FlowEntity updatedFlow = metadataService
                .getFlowByIdWithSnapshotCounts(snapshotMetadata.getFlowIdentifier());
        if (updatedFlow == null) {
            throw new ResourceNotFoundException(
                    "Versioned flow does not exist for identifier " + snapshotMetadata.getFlowIdentifier());
        }
        final VersionedFlow updatedVersionedFlow = DataModelMapper.map(existingBucket, updatedFlow);

        flowSnapshot.setBucket(bucket);
        flowSnapshot.setFlow(updatedVersionedFlow);
        return flowSnapshot;
    } finally {
        writeLock.unlock();
    }
}

From source file:com.palantir.atlasdb.sweep.SweepTaskRunner.java

private Set<Long> getTimestampsToSweep(Cell cell, Collection<Long> timestamps /* start timestamps */,
        @Modified Map<Long, Long> startTsToCommitTs, @Output Set<Cell> sentinelsToAdd, long sweepTimestamp,
        boolean sweepLastCommitted, SweepStrategy sweepStrategy) {
    Set<Long> uncommittedTimestamps = Sets.newHashSet();
    SortedSet<Long> committedTimestampsToSweep = Sets.newTreeSet();
    long maxStartTs = TransactionConstants.FAILED_COMMIT_TS;
    boolean maxStartTsIsCommitted = false;
    for (long startTs : timestamps) {
        long commitTs = ensureCommitTimestampExists(startTs, startTsToCommitTs);

        if (startTs > maxStartTs && commitTs < sweepTimestamp) {
            maxStartTs = startTs;/*from   ww w  . j a v a 2 s .c  om*/
            maxStartTsIsCommitted = commitTs != TransactionConstants.FAILED_COMMIT_TS;
        }
        // Note: there could be an open transaction whose start timestamp is equal to
        // sweepTimestamp; thus we want to sweep all cells such that:
        // (1) their commit timestamp is less than sweepTimestamp
        // (2) their start timestamp is NOT the greatest possible start timestamp
        //     passing condition (1)
        if (commitTs > 0 && commitTs < sweepTimestamp) {
            committedTimestampsToSweep.add(startTs);
        } else if (commitTs == TransactionConstants.FAILED_COMMIT_TS) {
            uncommittedTimestamps.add(startTs);
        }
    }

    if (committedTimestampsToSweep.isEmpty()) {
        return uncommittedTimestamps;
    }

    if (sweepStrategy == SweepStrategy.CONSERVATIVE && committedTimestampsToSweep.size() > 1) {
        // We need to add a sentinel if we are removing a committed value
        sentinelsToAdd.add(cell);
    }

    if (sweepLastCommitted && maxStartTsIsCommitted) {
        return Sets.union(uncommittedTimestamps, committedTimestampsToSweep);
    }
    return Sets.union(uncommittedTimestamps,
            committedTimestampsToSweep.subSet(0L, committedTimestampsToSweep.last()));
}

From source file:com.gtwm.pb.servlets.ServletSchemaMethods.java

public synchronized static void addFormTab(HttpServletRequest request, SessionDataInfo sessionData,
        DatabaseInfo databaseDefn)//from  w  w w  .  ja  v  a2 s  . c  o  m
        throws DisallowedException, MissingParametersException, ObjectNotFoundException {
    TableInfo table = ServletUtilMethods.getTableForRequest(sessionData, request, databaseDefn, true);
    AuthManagerInfo authManager = databaseDefn.getAuthManager();
    if (!authManager.getAuthenticator().loggedInUserAllowedTo(request, PrivilegeType.MANAGE_TABLE, table)) {
        throw new DisallowedException(authManager.getLoggedInUser(request), PrivilegeType.MANAGE_TABLE, table);
    }
    String tabTableId = request.getParameter("tabtable");
    if (tabTableId == null) {
        throw new MissingParametersException("tabtable must be supplied to add a form tab to a table");
    }
    TableInfo tabTable = databaseDefn.getTable(request, tabTableId);
    SortedSet<FormTabInfo> formTabs = table.getFormTabs();
    int newIndex = 0;
    if (formTabs.size() > 0) {
        newIndex = formTabs.last().getIndex() + 1;
    }
    FormTabInfo formTab = new FormTab(table, tabTable, newIndex);
    try {
        HibernateUtil.startHibernateTransaction();
        HibernateUtil.currentSession().save(formTab);
        HibernateUtil.activateObject(table);
        table.addFormTab(formTab);
        HibernateUtil.currentSession().getTransaction().commit();
    } finally {
        HibernateUtil.closeSession();
    }
}

From source file:net.sourceforge.fenixedu.domain.ExecutionCourse.java

public static ExecutionCourse readLastBySigla(final String sigla) {
    SortedSet<ExecutionCourse> result = new TreeSet<ExecutionCourse>(
            EXECUTION_COURSE_EXECUTION_PERIOD_COMPARATOR);
    for (ExecutionCourse executionCourse : Bennu.getInstance().getExecutionCoursesSet()) {
        if (sigla.equalsIgnoreCase(executionCourse.getSigla())) {
            result.add(executionCourse);
        }/*from w  w w  .  j ava  2 s.c om*/
    }
    return result.isEmpty() ? null : result.last();
}

From source file:net.sourceforge.fenixedu.domain.ExecutionCourse.java

public static ExecutionCourse readLastByExecutionYearAndSigla(final String sigla, ExecutionYear executionYear) {
    SortedSet<ExecutionCourse> result = new TreeSet<ExecutionCourse>(
            EXECUTION_COURSE_EXECUTION_PERIOD_COMPARATOR);
    for (final ExecutionSemester executionSemester : executionYear.getExecutionPeriodsSet()) {
        for (ExecutionCourse executionCourse : executionSemester.getAssociatedExecutionCoursesSet()) {
            if (sigla.equalsIgnoreCase(executionCourse.getSigla())) {
                result.add(executionCourse);
            }/*from  www.  j  ava 2s . c  om*/
        }
    }
    return result.isEmpty() ? null : result.last();
}

From source file:net.sourceforge.fenixedu.domain.DegreeCurricularPlan.java

public boolean canSubmitImprovementMarkSheets(final ExecutionYear executionYear) {
    SortedSet<ExecutionDegree> sortedExecutionDegrees = new TreeSet<ExecutionDegree>(
            ExecutionDegree.EXECUTION_DEGREE_COMPARATORY_BY_YEAR);
    sortedExecutionDegrees.addAll(getExecutionDegreesSet());
    return sortedExecutionDegrees.last().getExecutionYear().equals(executionYear.getPreviousExecutionYear());
}

From source file:org.apache.pulsar.broker.admin.impl.NamespacesBase.java

protected BundlesData validateBundlesData(BundlesData initialBundles) {
    SortedSet<String> partitions = new TreeSet<String>();
    for (String partition : initialBundles.getBoundaries()) {
        Long partBoundary = Long.decode(partition);
        partitions.add(String.format("0x%08x", partBoundary));
    }//ww  w . ja v  a  2  s.c o  m
    if (partitions.size() != initialBundles.getBoundaries().size()) {
        log.debug("Input bundles included repeated partition points. Ignored.");
    }
    try {
        NamespaceBundleFactory.validateFullRange(partitions);
    } catch (IllegalArgumentException iae) {
        throw new RestException(Status.BAD_REQUEST, "Input bundles do not cover the whole hash range. first:"
                + partitions.first() + ", last:" + partitions.last());
    }
    List<String> bundles = Lists.newArrayList();
    bundles.addAll(partitions);
    return new BundlesData(bundles);
}

From source file:org.jclouds.blobstore.integration.internal.StubAsyncBlobStore.java

public Future<? extends ListContainerResponse<? extends ResourceMetadata>> list(final String name,
        ListContainerOptions... optionsList) {
    final ListContainerOptions options = (optionsList.length == 0) ? new ListContainerOptions()
            : optionsList[0];//w  ww .j  av  a  2 s. c o m
    return new FutureBase<ListContainerResponse<ResourceMetadata>>() {
        public ListContainerResponse<ResourceMetadata> get() throws InterruptedException, ExecutionException {
            final Map<String, Blob> realContents = getContainerToBlobs().get(name);

            if (realContents == null)
                throw new ContainerNotFoundException(name);

            SortedSet<ResourceMetadata> contents = Sets.newTreeSet(
                    Iterables.transform(realContents.keySet(), new Function<String, ResourceMetadata>() {
                        public ResourceMetadata apply(String key) {
                            return copy(realContents.get(key).getMetadata());
                        }
                    }));

            if (options.getMarker() != null) {
                final String finalMarker = options.getMarker();
                ResourceMetadata lastMarkerMetadata = Iterables.find(contents,
                        new Predicate<ResourceMetadata>() {
                            public boolean apply(ResourceMetadata metadata) {
                                return metadata.getName().equals(finalMarker);
                            }
                        });
                contents = contents.tailSet(lastMarkerMetadata);
                contents.remove(lastMarkerMetadata);
            }

            final String prefix = options.getPath();
            if (prefix != null) {
                contents = Sets.newTreeSet(Iterables.filter(contents, new Predicate<ResourceMetadata>() {
                    public boolean apply(ResourceMetadata o) {
                        return (o != null && o.getName().startsWith(prefix));
                    }
                }));
            }

            int maxResults = contents.size();
            boolean truncated = false;
            String marker = null;
            if (options.getMaxResults() != null && contents.size() > 0) {
                SortedSet<ResourceMetadata> contentsSlice = firstSliceOfSize(contents,
                        options.getMaxResults().intValue());
                maxResults = options.getMaxResults();
                if (!contentsSlice.contains(contents.last())) {
                    // Partial listing
                    truncated = true;
                    marker = contentsSlice.last().getName();
                } else {
                    marker = null;
                }
                contents = contentsSlice;
            }

            final String delimiter = options.isRecursive() ? null : "/";
            if (delimiter != null) {
                SortedSet<String> commonPrefixes = null;
                Iterable<String> iterable = Iterables.transform(contents,
                        new CommonPrefixes(prefix != null ? prefix : null, delimiter));
                commonPrefixes = iterable != null ? Sets.newTreeSet(iterable) : new TreeSet<String>();
                commonPrefixes.remove(CommonPrefixes.NO_PREFIX);

                contents = Sets.newTreeSet(Iterables.filter(contents,
                        new DelimiterFilter(prefix != null ? prefix : null, delimiter)));

                Iterables.<ResourceMetadata>addAll(contents,
                        Iterables.transform(commonPrefixes, new Function<String, ResourceMetadata>() {
                            public ResourceMetadata apply(String o) {
                                MutableResourceMetadata md = new MutableResourceMetadataImpl();
                                md.setType(ResourceType.RELATIVE_PATH);
                                md.setName(o);
                                return md;
                            }
                        }));
            }
            return new ListContainerResponseImpl<ResourceMetadata>(contents, prefix, marker, maxResults,
                    truncated);
        }
    };
}