List of usage examples for com.google.common.collect Sets newTreeSet
public static <E> TreeSet<E> newTreeSet(Comparator<? super E> comparator)
From source file:io.druid.indexer.path.GranularUnprocessedPathSpec.java
@Override public Job addInputPaths(HadoopDruidIndexerConfig config, Job job) throws IOException { // This PathSpec breaks so many abstractions that we might as break some more Preconditions.checkState(config.getGranularitySpec() instanceof UniformGranularitySpec, String.format("Cannot use %s without %s", GranularUnprocessedPathSpec.class.getSimpleName(), UniformGranularitySpec.class.getSimpleName())); final Path betaInput = new Path(getInputPath()); final FileSystem fs = betaInput.getFileSystem(job.getConfiguration()); final Granularity segmentGranularity = config.getGranularitySpec().getSegmentGranularity(); Map<DateTime, Long> inputModifiedTimes = new TreeMap<>(Comparators.inverse(Comparators.comparable())); for (FileStatus status : FSSpideringIterator.spiderIterable(fs, betaInput)) { final DateTime key = segmentGranularity.toDate(status.getPath().toString()); final Long currVal = inputModifiedTimes.get(key); final long mTime = status.getModificationTime(); inputModifiedTimes.put(key, currVal == null ? mTime : Math.max(currVal, mTime)); }/*from w w w.j a v a 2 s. co m*/ Set<Interval> bucketsToRun = Sets.newTreeSet(Comparators.intervals()); for (Map.Entry<DateTime, Long> entry : inputModifiedTimes.entrySet()) { DateTime timeBucket = entry.getKey(); long mTime = entry.getValue(); String bucketOutput = String.format("%s/%s", config.getSchema().getIOConfig().getSegmentOutputPath(), segmentGranularity.toPath(timeBucket)); for (FileStatus fileStatus : FSSpideringIterator.spiderIterable(fs, new Path(bucketOutput))) { if (fileStatus.getModificationTime() > mTime) { bucketsToRun.add(new Interval(timeBucket, segmentGranularity.increment(timeBucket))); break; } } if (bucketsToRun.size() >= maxBuckets) { break; } } config.setGranularitySpec(new UniformGranularitySpec(segmentGranularity, config.getGranularitySpec().getQueryGranularity(), Lists.newArrayList(bucketsToRun))); return super.addInputPaths(config, job); }
From source file:org.apache.aurora.scheduler.storage.StorageBackfill.java
private static void guaranteeShardUniqueness(ScheduledTask task, TaskStore.Mutable taskStore, Clock clock) { if (Tasks.isActive(task.getStatus())) { // Perform a sanity check on the number of active shards. Query.Builder query = Query.instanceScoped(IJobKey.build(task.getAssignedTask().getTask().getJob()), task.getAssignedTask().getInstanceId()).active(); Set<String> activeTasksInShard = FluentIterable.from(taskStore.fetchTasks(query)) .transform(Tasks.SCHEDULED_TO_ID).toSet(); if (activeTasksInShard.size() > 1) { SHARD_SANITY_CHECK_FAILS.incrementAndGet(); LOG.severe("Active shard sanity check failed when loading " + Tasks.id(task) + ", active tasks found: " + activeTasksInShard); // We want to keep exactly one task from this shard, so sort the IDs and keep the // highest (newest) in the hopes that it is legitimately running. String newestTask = Iterables.getLast(Sets.newTreeSet(activeTasksInShard)); if (Tasks.id(task).equals(newestTask)) { LOG.info("Retaining task " + Tasks.id(task)); } else { task.setStatus(ScheduleStatus.KILLED); task.addToTaskEvents(new TaskEvent(clock.nowMillis(), ScheduleStatus.KILLED) .setMessage("Killed duplicate shard.")); // TODO(wfarner); Circle back if this is necessary. Currently there's a race // condition between the time the scheduler is actually available without hitting // IllegalStateException (see DriverImpl). // driver.killTask(Tasks.id(task)); }//from w ww . j a v a 2s . co m } } }
From source file:org.gradle.api.reporting.internal.TaskReportContainer.java
@Input public SortedSet<String> getEnabledReportNames() { return Sets.newTreeSet(Iterables.transform(getEnabled(), REPORT_NAME)); }
From source file:com.eucalyptus.cloudformation.template.dependencies.DependencyManager.java
public synchronized List<String> dependencyList() throws CyclicDependencyException { LinkedList<String> sortedNodes = Lists.newLinkedList(); Set<String> unmarkedNodes = Sets.newTreeSet(nodes); Set<String> temporarilyMarkedNodes = Sets.newLinkedHashSet(); // this also represents the current path... Set<String> permanentlyMarkedNodes = Sets.newHashSet(); while (!unmarkedNodes.isEmpty()) { String currentNode = unmarkedNodes.iterator().next(); visitNode(currentNode, unmarkedNodes, temporarilyMarkedNodes, permanentlyMarkedNodes, sortedNodes); }/*w ww . ja v a2s .c om*/ return sortedNodes; }
From source file:com.facebook.buck.model.BuildFileTree.java
public BuildFileTree(Iterable<String> basePaths) { TreeSet<String> allBasePaths = Sets.newTreeSet(PATH_COMPARATOR); for (String basePath : basePaths) { allBasePaths.add(basePath);//from w ww .j a v a 2s. c om } // Initialize basePathToNodeIndex with a Node that corresponds to the empty string. This ensures // that findParent() will always return a non-null Node because the empty string is a prefix of // all base paths. basePathToNodeIndex = Maps.newHashMap(); Node root = new Node(""); basePathToNodeIndex.put("", root); // Build up basePathToNodeIndex in a breadth-first manner. for (String basePath : allBasePaths) { if ("".equals(basePath)) { continue; } Node child = new Node(basePath); Node parent = findParent(child, basePathToNodeIndex); parent.addChild(child); basePathToNodeIndex.put(basePath, child); } }
From source file:org.jclouds.rackspace.cloudfiles.functions.ParseObjectInfoListFromJsonResponse.java
public PageSet<ObjectInfo> apply(InputStream stream) { checkState(request != null, "request should be initialized at this point"); checkState(request.getArgs() != null, "request.getArgs() should be initialized at this point"); checkArgument(request.getArgs()[0] instanceof String, "arg[0] must be a container name"); checkArgument(request.getArgs()[1] instanceof ListContainerOptions[], "arg[1] must be an array of ListContainerOptions"); ListContainerOptions[] optionsList = (ListContainerOptions[]) request.getArgs()[1]; ListContainerOptions options = optionsList.length > 0 ? optionsList[0] : ListContainerOptions.NONE; Type listType = new TypeToken<SortedSet<ObjectInfoImpl>>() { }.getType();//from w w w . j ava 2 s. co m try { SortedSet<ObjectInfoImpl> list = apply(stream, listType); SortedSet<ObjectInfo> returnVal = Sets .newTreeSet(Iterables.transform(list, new Function<ObjectInfoImpl, ObjectInfo>() { public ObjectInfo apply(ObjectInfoImpl from) { return from; } })); boolean truncated = options.getMaxResults() == returnVal.size(); String marker = truncated ? returnVal.last().getName() : null; return new PageSetImpl<ObjectInfo>(returnVal, marker); } catch (IOException e) { throw new RuntimeException("problem reading response from request: " + request, e); } }
From source file:uk.ac.ebi.atlas.utils.ExperimentInfo.java
public void setExperimentalFactors(Set<String> experimentalFactors) { this.experimentalFactors = Sets.newTreeSet(experimentalFactors); }
From source file:org.jclouds.samples.googleappengine.GetAllContainersController.java
private void addMyContainersToRequest(HttpServletRequest request) throws InterruptedException, ExecutionException, TimeoutException { request.setAttribute("containers", Sets.newTreeSet(Iterables.transform(contexts.keySet(), blobStoreContextToContainerResult))); }
From source file:org.apache.druid.indexer.path.GranularityPathSpec.java
@Override public Job addInputPaths(HadoopDruidIndexerConfig config, Job job) throws IOException { final Set<Interval> intervals = Sets.newTreeSet(Comparators.intervalsByStartThenEnd()); for (Interval inputInterval : config.getInputIntervals()) { for (Interval interval : dataGranularity.getIterable(inputInterval)) { intervals.add(trim(inputInterval, interval)); }/*from w ww. jav a 2s .c o m*/ } Path betaInput = new Path(inputPath); FileSystem fs = betaInput.getFileSystem(job.getConfiguration()); Set<String> paths = Sets.newTreeSet(); Pattern fileMatcher = Pattern.compile(filePattern); DateTimeFormatter customFormatter = null; if (pathFormat != null) { customFormatter = DateTimeFormat.forPattern(pathFormat); } for (Interval interval : intervals) { DateTime t = interval.getStart(); String intervalPath; if (customFormatter != null) { intervalPath = customFormatter.print(t); } else { intervalPath = dataGranularity.toPath(t); } Path granularPath = new Path(betaInput, intervalPath); log.info("Checking path[%s]", granularPath); for (FileStatus status : FSSpideringIterator.spiderIterable(fs, granularPath)) { final Path filePath = status.getPath(); if (fileMatcher.matcher(filePath.toString()).matches()) { paths.add(filePath.toString()); } } } log.info("Appending path %s", paths); StaticPathSpec.addToMultipleInputs(config, job, paths, inputFormat); return job; }
From source file:org.apache.pulsar.client.impl.conf.ProducerConfigurationData.java
public ProducerConfigurationData clone() { try {//from w ww. j a v a2 s.c om ProducerConfigurationData c = (ProducerConfigurationData) super.clone(); c.encryptionKeys = Sets.newTreeSet(this.encryptionKeys); c.properties = Maps.newTreeMap(this.properties); return c; } catch (CloneNotSupportedException e) { throw new RuntimeException("Failed to clone ProducerConfigurationData", e); } }