List of usage examples for java.util List sort
@SuppressWarnings({ "unchecked", "rawtypes" }) default void sort(Comparator<? super E> c)
From source file:com.uber.hoodie.utilities.sources.DFSSource.java
@Override public Pair<Optional<JavaRDD<GenericRecord>>, String> fetchNewData(Optional<String> lastCheckpointStr, long maxInputBytes) { try {/*from w w w . j a v a 2s . c o m*/ // obtain all eligible files under root folder. List<FileStatus> eligibleFiles = new ArrayList<>(); RemoteIterator<LocatedFileStatus> fitr = fs .listFiles(new Path(config.getString(Config.ROOT_INPUT_PATH_PROP)), true); while (fitr.hasNext()) { LocatedFileStatus fileStatus = fitr.next(); if (fileStatus.isDirectory() || IGNORE_FILEPREFIX_LIST.stream() .filter(pfx -> fileStatus.getPath().getName().startsWith(pfx)).count() > 0) { continue; } eligibleFiles.add(fileStatus); } // sort them by modification time. eligibleFiles.sort((FileStatus f1, FileStatus f2) -> Long.valueOf(f1.getModificationTime()) .compareTo(Long.valueOf(f2.getModificationTime()))); // Filter based on checkpoint & input size, if needed long currentBytes = 0; long maxModificationTime = Long.MIN_VALUE; List<FileStatus> filteredFiles = new ArrayList<>(); for (FileStatus f : eligibleFiles) { if (lastCheckpointStr.isPresent() && f.getModificationTime() <= Long.valueOf(lastCheckpointStr.get())) { // skip processed files continue; } maxModificationTime = f.getModificationTime(); currentBytes += f.getLen(); filteredFiles.add(f); if (currentBytes >= maxInputBytes) { // we have enough data, we are done break; } } // no data to read if (filteredFiles.size() == 0) { return new ImmutablePair<>(Optional.empty(), lastCheckpointStr.isPresent() ? lastCheckpointStr.get() : String.valueOf(Long.MIN_VALUE)); } // read the files out. String pathStr = filteredFiles.stream().map(f -> f.getPath().toString()) .collect(Collectors.joining(",")); String schemaStr = schemaProvider.getSourceSchema().toString(); final AvroConvertor avroConvertor = new AvroConvertor(schemaStr); return new ImmutablePair<>( Optional.of(DFSSource.fromFiles(dataFormat, avroConvertor, pathStr, sparkContext)), String.valueOf(maxModificationTime)); } catch (IOException ioe) { throw new HoodieIOException("Unable to read from source from checkpoint: " + lastCheckpointStr, ioe); } }
From source file:org.niord.core.publication.PublicationService.java
/** * Returns the publications with the given database IDs * * @param ids the database IDs/*from www .jav a 2 s.co m*/ * @return the publications with the database IDs */ public List<Publication> findByIds(List<Integer> ids) { if (ids == null || ids.isEmpty()) { return Collections.emptyList(); } List<Publication> publications = em.createNamedQuery("Publication.findByIds", Publication.class) .setParameter("ids", ids).getResultList(); // Sort the result according to the order of the publications in the ID list publications.sort(Comparator.comparingInt(m -> ids.indexOf(m.getId()))); return publications; }
From source file:org.openvpms.archetype.rules.party.PartyRules.java
/** * Looks for a party contact that matches the criteria. * * @param party the party/*from w w w .ja v a 2 s .c o m*/ * @param matcher the contact matcher * @return the matching contact or {@code null} */ private Contact getContact(Party party, ContactMatcher matcher) { List<Contact> contacts = Contacts.sort(party.getContacts()); return Contacts.find(contacts, matcher); }
From source file:org.commonjava.indy.pkg.maven.content.group.MavenMetadataMerger.java
@Override public Metadata mergeFromMetadatas(final Collection<Metadata> sources, final Group group, final String path) { Logger logger = LoggerFactory.getLogger(getClass()); logger.debug("Generating merged metadata in: {}:{}", group.getKey(), path); final Metadata master = new Metadata(); master.setVersioning(new Versioning()); boolean merged = false; for (final Metadata src : sources) { logger.trace("Adding in metadata content from: {}", src); // there is a lot of junk in here to make up for Metadata's anemic merge() method. if (src.getGroupId() != null) { master.setGroupId(src.getGroupId()); }// w ww.ja va 2s . c o m if (src.getArtifactId() != null) { master.setArtifactId(src.getArtifactId()); } if (src.getVersion() != null) { master.setVersion(src.getVersion()); } master.merge(src); Versioning versioning = master.getVersioning(); Versioning mdVersioning = src.getVersioning(); // FIXME: Should we try to merge snapshot lists instead of using the first one we encounter?? if (versioning.getSnapshot() == null && mdVersioning != null) { logger.trace("INCLUDING snapshot information from: {} in: {}:{}", src, group.getKey(), path); versioning.setSnapshot(mdVersioning.getSnapshot()); final List<SnapshotVersion> snapshotVersions = versioning.getSnapshotVersions(); boolean added = false; for (final SnapshotVersion snap : mdVersioning.getSnapshotVersions()) { if (!snapshotVersions.contains(snap)) { snapshotVersions.add(snap); added = true; } } if (added) { snapshotVersions.sort(new SnapshotVersionComparator()); } } else { logger.warn("SKIPPING snapshot information from: {} in: {}:{})", src, group.getKey(), path); } merged = true; } Versioning versioning = master.getVersioning(); if (versioning != null && versioning.getVersions() != null) { if (metadataProviders != null) { for (MavenMetadataProvider provider : metadataProviders) { try { Metadata toMerge = provider.getMetadata(group.getKey(), path); if (toMerge != null) { merged = master.merge(toMerge) || merged; } } catch (IndyWorkflowException e) { logger.error( String.format("Cannot read metadata: %s from metadata provider: %s. Reason: %s", path, provider.getClass().getSimpleName(), e.getMessage()), e); } } } List<SingleVersion> versionObjects = versioning.getVersions().stream().map((v) -> { try { return VersionUtils.createSingleVersion(v); } catch (InvalidVersionSpecificationException e) { return null; } }).filter(Objects::nonNull).collect(Collectors.toList()); Collections.sort(versionObjects); versioning.setVersions( versionObjects.stream().map(SingleVersion::renderStandard).collect(Collectors.toList())); if (versionObjects.size() > 0) { String latest = versionObjects.get(versionObjects.size() - 1).renderStandard(); versioning.setLatest(latest); versioning.setRelease(latest); } } if (merged) { return master; } return null; }
From source file:com.evolveum.midpoint.wf.impl.processors.primary.policy.ApprovalSchemaBuilder.java
private void sortFragments(List<Fragment> fragments) { fragments.forEach(f -> {/* w w w . j a v a2s . com*/ if (f.compositionStrategy != null && BooleanUtils.isTrue(f.compositionStrategy.isMergeable()) && f.compositionStrategy.getOrder() == null) { throw new IllegalStateException("Mergeable composition strategy with no order: " + f.compositionStrategy + " in " + f.policyRule); } }); // relying on the fact that the sort algorithm is stable fragments.sort((f1, f2) -> { ApprovalCompositionStrategyType s1 = f1.compositionStrategy; ApprovalCompositionStrategyType s2 = f2.compositionStrategy; Integer o1 = s1 != null ? s1.getOrder() : null; Integer o2 = s2 != null ? s2.getOrder() : null; if (o1 == null || o2 == null) { return MiscUtil.compareNullLast(o1, o2); } int c = Integer.compare(o1, o2); if (c != 0) { return c; } // non-mergeable first boolean m1 = BooleanUtils.isTrue(s1.isMergeable()); boolean m2 = BooleanUtils.isTrue(s2.isMergeable()); if (m1 && !m2) { return 1; } else if (!m1 && m2) { return -1; } else { return 0; } }); }
From source file:dpfmanager.conformancechecker.tiff.reporting.PdfReport.java
private List<String> sortByTag(Set<String> keysSet) { List<String> keys = new ArrayList<>(keysSet); keys.sort(new Comparator<String>() { @Override//from w ww. ja va 2 s . c o m public int compare(String o1, String o2) { String sub1 = o1.substring(0, 3); String sub2 = o2.substring(0, 3); if (sub1.equals(sub2)) { return o1.compareTo(o2); } else if (sub1.equals("ifd") || sub2.equals("ifd")) { return sub1.equals("ifd") ? -1 : 1; } else if (sub1.equals("sub") || sub2.equals("sub")) { return sub1.equals("sub") ? -1 : 1; } else if (sub1.equals("exi") || sub2.equals("exi")) { return sub1.equals("exi") ? -1 : 1; } else if (sub1.equals("xmp") || sub2.equals("xmp")) { return sub1.equals("xmp") ? -1 : 1; } else if (sub1.equals("ipt") || sub2.equals("ipt")) { return sub1.equals("ipt") ? -1 : 1; } return 0; } }); return keys; }
From source file:io.stallion.dataAccess.file.FilePersisterBase.java
@Override public List<T> fetchAll() { File target = new File(Settings.instance().getTargetFolder()); if (!target.isDirectory()) { if (getItemController().isWritable()) { target.mkdirs();//from www. j a v a2 s. com } else { throw new ConfigException(String.format( "The JSON bucket %s (path %s) is read-only, but does not exist in the file system. Either create the folder, make it writable, or remove it from the configuration.", getItemController().getBucket(), getBucketFolderPath())); } } TreeVisitor visitor = new TreeVisitor(); Path folderPath = FileSystems.getDefault().getPath(getBucketFolderPath()); try { Files.walkFileTree(folderPath, visitor); } catch (IOException e) { throw new RuntimeException(e); } List<T> objects = new ArrayList<>(); for (Path path : visitor.getPaths()) { if (!matchesExtension(path.toString())) { continue; } if (path.toString().contains(".#")) { continue; } if (path.getFileName().startsWith(".")) { continue; } T o = fetchOne(path.toString()); if (o != null) { objects.add(o); } } objects.sort(new PropertyComparator<T>(sortField)); if (sortDirection.toLowerCase().equals("desc")) { Collections.reverse(objects); } return objects; }
From source file:com.groupon.deployment.fleet.Sequential.java
/** * Public constructor.//from w w w .j a va 2 s. c o m * * @param hostDeploymentFactory a factory to create a host deployment * @param dcf deployment client factory * @param sshFactory ssh session factory * @param deployment deployment to run */ @AssistedInject public Sequential(final HostDeploymentFactory hostDeploymentFactory, final DeploymentClientFactory dcf, final SshSessionFactory sshFactory, @Assisted final Deployment deployment) { _hostDeploymentFactory = hostDeploymentFactory; _dcf = dcf; _sshFactory = sshFactory; _deployment = Deployment.getById(deployment.getId()); // Refresh the deployment final String myName; try { myName = InetAddress.getLocalHost().getCanonicalHostName(); } catch (final UnknownHostException e) { throw Throwables.propagate(e); } // If this host no longer owns the deployment, die if (!myName.equals(_deployment.getDeploymentOwner())) { Logger.warn(String.format( "Current server does not own the deployment, aborting deploy on this server; owner=%s", _deployment.getDeploymentOwner())); self().tell(PoisonPill.getInstance(), self()); } Logger.info("Sequential fleet deployment actor started up"); final List<HostDeployment> hosts = Lists.newArrayList(); deployment.getHostStates().forEach(host -> { final DeploymentState hostState = host.getState(); if (host.getFinished() == null || hostState == null || (hostState != DeploymentState.FAILED && hostState != DeploymentState.SUCCEEDED)) { hosts.add(host); } }); // Sort the hosts with the following rules: // TODO(barp): 1) hosts that are "down" should be deployed first [Artemis-?] // 2) if the current machine is in the list, it should be last hosts.sort((a, b) -> { if (a.getHost().getName().equals(myName)) { return 1; } if (b.getHost().getName().equals(myName)) { return -1; } return a.getHost().getName().compareTo(b.getHost().getName()); }); _hostQueue = Queues.newArrayDeque(hosts); self().tell("start", self()); }
From source file:com.github.ambry.store.DiskReformatter.java
/** * Uses {@link StoreCopier} to convert all the partitions on the given disk (D). * 1. Copies one partition on D to a scratch space * 2. Using {@link StoreCopier}, performs copies of all other partitions on D using D as a staging area. When a * partition is completely copied and verified, the original is replaced by the copy. * 3. Copies the partition in the scratch space back onto D. * 4. Deletes the folder in the scratch space * @param diskMountPath the mount path of the disk to reformat * @param scratch the scratch space to use * @throws Exception//from w w w.j a v a 2 s . c o m */ public void reformat(String diskMountPath, File scratch) throws Exception { if (!scratch.exists()) { throw new IllegalArgumentException("Scratch space " + scratch + " does not exist"); } List<ReplicaId> replicasOnDisk = new ArrayList<>(); // populate the replicas on disk List<? extends ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId); for (ReplicaId replicaId : replicaIds) { if (replicaId.getDiskId().getMountPath().equals(diskMountPath)) { replicasOnDisk.add(replicaId); } } if (replicasOnDisk.size() == 0) { throw new IllegalArgumentException("There are no replicas on " + diskMountPath + " of " + dataNodeId); } replicasOnDisk.sort(Comparator.comparingLong(ReplicaId::getCapacityInBytes)); logger.info("Found {} on {}", replicasOnDisk, diskMountPath); // move the last replica id (the largest one) to scratch space ReplicaId toMove = replicasOnDisk.get(replicasOnDisk.size() - 1); String partIdString = toMove.getPartitionId().toString(); File scratchSrc = new File(toMove.getReplicaPath()); File scratchTmp = new File(scratch, partIdString + RELOCATION_IN_PROGRESS_SUFFIX); File scratchTgt = new File(scratch, partIdString + RELOCATED_DIR_NAME_SUFFIX); if (scratchTmp.exists()) { throw new IllegalStateException(scratchTmp + " already exists"); } if (scratchTgt.exists()) { throw new IllegalStateException(scratchTgt + " already exists"); } ensureNotInUse(scratchSrc, toMove.getCapacityInBytes()); logger.info("Moving {} to {}", scratchSrc, scratchTgt); FileUtils.moveDirectory(scratchSrc, scratchTmp); if (!scratchTmp.renameTo(scratchTgt)) { throw new IllegalStateException("Could not rename " + scratchTmp + " to " + scratchTgt); } // reformat each store, except the one moved, one by one for (int i = 0; i < replicasOnDisk.size() - 1; i++) { ReplicaId replicaId = replicasOnDisk.get(i); partIdString = replicaId.getPartitionId().toString(); File src = new File(replicaId.getReplicaPath()); File tgt = new File(replicaId.getMountPath(), partIdString + UNDER_REFORMAT_DIR_NAME_SUFFIX); logger.info("Copying {} to {}", src, tgt); copy(partIdString, src, tgt, replicaId.getCapacityInBytes()); logger.info("Deleting {}", src); Utils.deleteFileOrDirectory(src); logger.info("Renaming {} to {}", tgt, src); if (!tgt.renameTo(src)) { throw new IllegalStateException("Could not rename " + tgt + " to " + src); } logger.info("Done reformatting {}", replicaId); } // reformat the moved store logger.info("Copying {} to {}", scratchTgt, scratchSrc); copy(toMove.getPartitionId().toString(), scratchTgt, scratchSrc, toMove.getCapacityInBytes()); logger.info("Deleting {}", scratchTgt); Utils.deleteFileOrDirectory(scratchTgt); logger.info("Done reformatting {}", toMove); logger.info("Done reformatting disk {}", diskMountPath); }
From source file:org.cgiar.ccafs.marlo.action.powb.CRPStaffingAction.java
public List<LiaisonInstitution> getFlagships() { List<LiaisonInstitution> flagshipsList = loggedCrp.getLiaisonInstitutions().stream() .filter(c -> c.getCrpProgram() != null && c.getCrpProgram().getProgramType() == ProgramType.FLAGSHIP_PROGRAM_TYPE.getValue() && c.isActive()) .collect(Collectors.toList()); if (flagshipsList != null) { flagshipsList.sort(Comparator.comparing(LiaisonInstitution::getAcronym)); return flagshipsList; } else {/*from ww w .ja v a2s . co m*/ return new ArrayList<>(); } }