Example usage for com.google.common.collect Maps newTreeMap

List of usage examples for com.google.common.collect Maps newTreeMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newTreeMap.

Prototype

public static <C, K extends C, V> TreeMap<K, V> newTreeMap(@Nullable Comparator<C> comparator) 

Source Link

Document

Creates a mutable, empty TreeMap instance using the given comparator.

Usage

From source file:com.metamx.druid.indexer.HadoopDruidIndexerJob.java

@Override
public boolean run() {
    List<Jobby> jobs = Lists.newArrayList();

    ensurePaths();/*from  w  w w  .ja  v a  2  s.  c o  m*/

    if (config.partitionByDimension()) {
        jobs.add(new DeterminePartitionsJob(config));
    } else {
        Map<DateTime, List<HadoopyShardSpec>> shardSpecs = Maps.newTreeMap(DateTimeComparator.getInstance());
        int shardCount = 0;
        for (Interval segmentGranularity : config.getSegmentGranularIntervals()) {
            DateTime bucket = segmentGranularity.getStart();
            final HadoopyShardSpec spec = new HadoopyShardSpec(new NoneShardSpec(), shardCount++);
            shardSpecs.put(bucket, Lists.newArrayList(spec));
            log.info("DateTime[%s], spec[%s]", bucket, spec);
        }
        config.setShardSpecs(shardSpecs);
    }

    indexJob = new IndexGeneratorJob(config);
    jobs.add(indexJob);

    if (dbUpdaterJob != null) {
        jobs.add(dbUpdaterJob);
    } else {
        log.info("No updaterJobSpec set, not uploading to database");
    }

    String failedMessage = null;
    for (Jobby job : jobs) {
        if (failedMessage == null) {
            if (!job.run()) {
                failedMessage = String.format("Job[%s] failed!", job.getClass());
            }
        }
    }

    if (failedMessage == null) {
        publishedSegments = IndexGeneratorJob.getPublishedSegments(config);
    }

    if (!config.isLeaveIntermediate()) {
        if (failedMessage == null || config.isCleanupOnFailure()) {
            Path workingPath = config.makeIntermediatePath();
            log.info("Deleting path[%s]", workingPath);
            try {
                workingPath.getFileSystem(new Configuration()).delete(workingPath, true);
            } catch (IOException e) {
                log.error(e, "Failed to cleanup path[%s]", workingPath);
            }
        }
    }

    if (failedMessage != null) {
        throw new ISE(failedMessage);
    }

    return true;
}

From source file:co.cask.cdap.data2.dataset2.lib.table.leveldb.LevelDBOrderedTable.java

@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> changes) throws Exception {
    persistedVersion = tx == null ? System.currentTimeMillis() : tx.getWritePointer();

    NavigableMap<byte[], NavigableMap<byte[], byte[]>> puts = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    NavigableMap<byte[], NavigableMap<byte[], Long>> increments = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    for (Map.Entry<byte[], NavigableMap<byte[], Update>> rowEntry : changes.entrySet()) {
        for (Map.Entry<byte[], Update> colEntry : rowEntry.getValue().entrySet()) {
            Update val = colEntry.getValue();
            if (val instanceof IncrementValue) {
                NavigableMap<byte[], Long> incrCols = increments.get(rowEntry.getKey());
                if (incrCols == null) {
                    incrCols = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
                    increments.put(rowEntry.getKey(), incrCols);
                }/*  www .j  av  a  2s .c o  m*/
                incrCols.put(colEntry.getKey(), ((IncrementValue) val).getValue());
            } else if (val instanceof PutValue) {
                NavigableMap<byte[], byte[]> putCols = puts.get(rowEntry.getKey());
                if (putCols == null) {
                    putCols = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
                    puts.put(rowEntry.getKey(), putCols);
                }
                putCols.put(colEntry.getKey(), ((PutValue) val).getValue());
            }
        }
    }
    for (Map.Entry<byte[], NavigableMap<byte[], Long>> incEntry : increments.entrySet()) {
        core.increment(incEntry.getKey(), incEntry.getValue());
    }
    core.persist(puts, persistedVersion);
}

From source file:co.cask.cdap.data2.dataset2.lib.table.inmemory.InMemoryMetricsTable.java

@Override
public void put(NavigableMap<byte[], NavigableMap<byte[], Long>> updates) {
    NavigableMap<byte[], NavigableMap<byte[], Update>> convertedUpdates = Maps
            .newTreeMap(Bytes.BYTES_COMPARATOR);
    for (NavigableMap.Entry<byte[], NavigableMap<byte[], Long>> entry : updates.entrySet()) {
        convertedUpdates.put(entry.getKey(), Maps.transformValues(entry.getValue(), Updates.LONG_TO_PUTS));
    }//from   w ww  .ja  v a 2s .com
    InMemoryTableService.merge(tableName, convertedUpdates, System.currentTimeMillis());
}

From source file:org.apache.crunch.impl.mr.plan.Edge.java

public Map<NodePath, PCollectionImpl> getSplitPoints(boolean breakpointsOnly) {
    List<NodePath> np = Lists.newArrayList(paths);
    List<PCollectionImpl<?>> smallestOverallPerPath = Lists.newArrayListWithExpectedSize(np.size());
    Map<PCollectionImpl<?>, Set<Integer>> pathCounts = Maps.newTreeMap(PCOL_CMP);
    Map<NodePath, PCollectionImpl> splitPoints = Maps.newHashMap();
    for (int i = 0; i < np.size(); i++) {
        long bestSize = Long.MAX_VALUE;
        boolean breakpoint = false;
        PCollectionImpl<?> best = null;
        for (PCollectionImpl<?> pc : np.get(i)) {
            if (!(pc instanceof BaseGroupedTable) && (!breakpointsOnly || pc.isBreakpoint())) {
                if (pc.isBreakpoint()) {
                    if (!breakpoint || pc.getSize() < bestSize) {
                        best = pc;/*from   www  .j  av  a  2s .co m*/
                        bestSize = pc.getSize();
                        breakpoint = true;
                    }
                } else if (!breakpoint && pc.getSize() < bestSize) {
                    best = pc;
                    bestSize = pc.getSize();
                }
                Set<Integer> cnts = pathCounts.get(pc);
                if (cnts == null) {
                    cnts = Sets.newHashSet();
                    pathCounts.put(pc, cnts);
                }
                cnts.add(i);
            }
        }
        smallestOverallPerPath.add(best);
        if (breakpoint) {
            splitPoints.put(np.get(i), best);
        }
    }

    Set<Integer> missing = Sets.newHashSet();
    for (int i = 0; i < np.size(); i++) {
        if (!splitPoints.containsKey(np.get(i))) {
            missing.add(i);
        }
    }

    if (breakpointsOnly && missing.size() > 0) {
        // We can't create new splits in this mode
        return ImmutableMap.of();
    } else if (missing.isEmpty()) {
        return splitPoints;
    } else {
        // Need to either choose the smallest collection from each missing path,
        // or the smallest single collection that is on all paths as the split target.
        Set<PCollectionImpl<?>> smallest = Sets.newHashSet();
        long smallestSize = 0;
        for (Integer id : missing) {
            PCollectionImpl<?> s = smallestOverallPerPath.get(id);
            if (!smallest.contains(s)) {
                smallest.add(s);
                smallestSize += s.getSize();
            }
        }

        PCollectionImpl<?> singleBest = null;
        long singleSmallestSize = Long.MAX_VALUE;
        for (Map.Entry<PCollectionImpl<?>, Set<Integer>> e : pathCounts.entrySet()) {
            if (Sets.difference(missing, e.getValue()).isEmpty() && e.getKey().getSize() < singleSmallestSize) {
                singleBest = e.getKey();
                singleSmallestSize = singleBest.getSize();
            }
        }

        if (smallestSize < singleSmallestSize) {
            for (Integer id : missing) {
                splitPoints.put(np.get(id), smallestOverallPerPath.get(id));
            }
        } else {
            for (Integer id : missing) {
                splitPoints.put(np.get(id), singleBest);
            }
        }
    }
    return splitPoints;
}

From source file:com.google.caliper.model.VmSpec.java

private VmSpec(Builder builder) {
    this.properties = Maps.newTreeMap(builder.properties);
    this.options = Maps.newTreeMap(builder.options);
}

From source file:io.druid.server.http.IntervalsResource.java

@GET
@Produces(MediaType.APPLICATION_JSON)/*  w w w.j  av  a 2s  .  c  om*/
public Response getIntervals(@Context final HttpServletRequest req) {
    final Comparator<Interval> comparator = Comparators.inverse(Comparators.intervalsByStartThenEnd());
    final Set<DruidDataSource> datasources = authConfig.isEnabled()
            ? InventoryViewUtils.getSecuredDataSources(serverInventoryView,
                    (AuthorizationInfo) req.getAttribute(AuthConfig.DRUID_AUTH_TOKEN))
            : InventoryViewUtils.getDataSources(serverInventoryView);

    final Map<Interval, Map<String, Map<String, Object>>> retVal = Maps.newTreeMap(comparator);
    for (DruidDataSource dataSource : datasources) {
        for (DataSegment dataSegment : dataSource.getSegments()) {
            Map<String, Map<String, Object>> interval = retVal.get(dataSegment.getInterval());
            if (interval == null) {
                Map<String, Map<String, Object>> tmp = Maps.newHashMap();
                retVal.put(dataSegment.getInterval(), tmp);
            }
            setProperties(retVal, dataSource, dataSegment);
        }
    }

    return Response.ok(retVal).build();
}

From source file:com.google.caliper.model.Host.java

private Host(Builder builder) {
    this.properties = Maps.newTreeMap(builder.properties);
    // eagerly initialize hash to allow for the test-only hash function
    initHash(builder.hashFunction);/*from www .j a  v a  2 s .  com*/
}

From source file:org.gradle.plugins.ide.internal.tooling.BuildInvocationsBuilder.java

@SuppressWarnings("StringEquality")
public DefaultBuildInvocations buildAll(String modelName, Project project) {
    if (!canBuild(modelName)) {
        throw new GradleException("Unknown model name " + modelName);
    }/*from w  w w  .  ja  v a2 s . c  o  m*/

    // construct task selectors
    List<LaunchableGradleTaskSelector> selectors = Lists.newArrayList();
    Map<String, LaunchableGradleTaskSelector> selectorsByName = Maps.newTreeMap(Ordering.natural());
    Set<String> visibleTasks = Sets.newLinkedHashSet();
    findTasks(project, selectorsByName, visibleTasks);
    for (String selectorName : selectorsByName.keySet()) {
        LaunchableGradleTaskSelector selector = selectorsByName.get(selectorName);
        selectors.add(selector.setName(selectorName).setTaskName(selectorName).setProjectPath(project.getPath())
                .setDisplayName(String.format("%s in %s and subprojects.", selectorName, project.toString()))
                .setPublic(visibleTasks.contains(selectorName)));
    }

    // construct project tasks
    List<LaunchableGradleTask> projectTasks = tasks(project);

    // construct build invocations from task selectors and project tasks
    return new DefaultBuildInvocations().setSelectors(selectors).setTasks(projectTasks);
}

From source file:co.cask.cdap.data2.transaction.stream.leveldb.LevelDBStreamConsumerStateStore.java

@Override
protected void delete(byte[] row, Set<byte[]> columns) throws IOException {
    if (columns.isEmpty()) {
        return;//ww w  . ja v a  2  s  .  c o  m
    }
    Map<byte[], byte[]> deleteColumns = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    for (byte[] column : columns) {
        deleteColumns.put(column, Bytes.EMPTY_BYTE_ARRAY); // Value doesn't matter
    }
    Map<byte[], Map<byte[], byte[]>> undoes = ImmutableSortedMap
            .<byte[], Map<byte[], byte[]>>orderedBy(Bytes.BYTES_COMPARATOR).put(row, deleteColumns).build();

    tableCore.undo(undoes, Long.MAX_VALUE);
}

From source file:org.apache.druid.server.http.IntervalsResource.java

@GET
@Produces(MediaType.APPLICATION_JSON)//from   w  w w .j  av  a  2  s  . c  o  m
public Response getIntervals(@Context final HttpServletRequest req) {
    final Comparator<Interval> comparator = Comparators.inverse(Comparators.intervalsByStartThenEnd());
    final Set<ImmutableDruidDataSource> datasources = InventoryViewUtils.getSecuredDataSources(req,
            serverInventoryView, authorizerMapper);

    final Map<Interval, Map<String, Map<String, Object>>> retVal = Maps.newTreeMap(comparator);
    for (ImmutableDruidDataSource dataSource : datasources) {
        for (DataSegment dataSegment : dataSource.getSegments()) {
            Map<String, Map<String, Object>> interval = retVal.get(dataSegment.getInterval());
            if (interval == null) {
                Map<String, Map<String, Object>> tmp = Maps.newHashMap();
                retVal.put(dataSegment.getInterval(), tmp);
            }
            setProperties(retVal, dataSource, dataSegment);
        }
    }

    return Response.ok(retVal).build();
}