Example usage for com.google.common.collect Maps newTreeMap

List of usage examples for com.google.common.collect Maps newTreeMap

Introduction

In this page you can find the example usage for com.google.common.collect Maps newTreeMap.

Prototype

public static <K extends Comparable, V> TreeMap<K, V> newTreeMap() 

Source Link

Document

Creates a mutable, empty TreeMap instance using the natural ordering of its elements.

Usage

From source file:com.google.gdt.eclipse.designer.uibinder.model.widgets.UIObjectSizeSupport.java

/**
 * @return {@link Boolean} result of executing script.
 *//*from   ww  w. java2 s .  com*/
private boolean executeBooleanScript(String scriptName) throws Exception {
    Map<String, Object> variables = Maps.newTreeMap();
    variables.put("model", m_object);
    variables.put("object", m_object.getObject());
    String script = XmlObjectUtils.getParameter(m_object, scriptName);
    return (Boolean) m_object.getUIObjectUtils().executeScript(script, variables);
}

From source file:com.google.gerrit.sshd.commands.ShowCaches.java

@Override
protected void run() {
    nw = columns - 50;/*from ww  w  .  ja  va2s.c  om*/
    Date now = new Date();
    stdout.format("%-25s %-20s      now  %16s\n", "Gerrit Code Review",
            Version.getVersion() != null ? Version.getVersion() : "",
            new SimpleDateFormat("HH:mm:ss   zzz").format(now));
    stdout.format("%-25s %-20s   uptime %16s\n", "", "", uptime(now.getTime() - serverStarted));
    stdout.print('\n');

    stdout.print(String.format(//
            "%1s %-" + nw + "s|%-21s|  %-5s |%-9s|\n" //
            , "" //
            , "Name" //
            , "Entries" //
            , "AvgGet" //
            , "Hit Ratio" //
    ));
    stdout.print(String.format(//
            "%1s %-" + nw + "s|%6s %6s %7s|  %-5s  |%-4s %-4s|\n" //
            , "" //
            , "" //
            , "Mem" //
            , "Disk" //
            , "Space" //
            , "" //
            , "Mem" //
            , "Disk" //
    ));
    stdout.print("--");
    for (int i = 0; i < nw; i++) {
        stdout.print('-');
    }
    stdout.print("+---------------------+---------+---------+\n");

    Map<String, H2CacheImpl<?, ?>> disks = Maps.newTreeMap();
    printMemoryCaches(disks, sortedCoreCaches());
    printMemoryCaches(disks, sortedPluginCaches());
    for (Map.Entry<String, H2CacheImpl<?, ?>> entry : disks.entrySet()) {
        H2CacheImpl<?, ?> cache = entry.getValue();
        CacheStats stat = cache.stats();
        H2CacheImpl.DiskStats disk = cache.diskStats();
        stdout.print(String.format("D %-" + nw + "s|%6s %6s %7s| %7s |%4s %4s|\n", entry.getKey(),
                count(cache.size()), count(disk.size()), bytes(disk.space()),
                duration(stat.averageLoadPenalty()), percent(stat.hitCount(), stat.requestCount()),
                percent(disk.hitCount(), disk.requestCount())));
    }
    stdout.print('\n');

    if (gc) {
        System.gc();
        System.runFinalization();
        System.gc();
    }

    sshSummary();
    taskSummary();
    memSummary();

    if (showJVM) {
        jvmSummary();
    }

    stdout.flush();
}

From source file:com.chiorichan.http.Routes.java

public Route searchRoutes(String uri, String domain, String subdomain) throws IOException {
    synchronized (this) {
        File routesFile = new File(site.directory(), "routes");

        if (routes.size() < 1 || System.currentTimeMillis() - lastRequest > 2500) {
            routes.clear();//w  w w .  j  av a  2  s.  c  o  m

            try {
                if (routesFile.exists()) {
                    String contents = FileUtils.readFileToString(routesFile);
                    for (String l : contents.split("\n"))
                        try {
                            if (!l.startsWith("#"))
                                routes.add(new Route(l, site));
                        } catch (IOException e1) {

                        }
                }
            } catch (IOException e) {
                e.printStackTrace();
            }

            try {
                SQLDatastore sql = AppConfig.get().getDatabase();

                if (sql != null && sql.initalized()) {
                    if (!sql.table("pages").exists()) {
                        Log.get().info(
                                "We detected the non-existence of table 'pages' in the server database, we will attempt to create it now.");

                        SQLTable table = sql.table("pages");
                        table.addColumnVar("site", 255);
                        table.addColumnVar("domain", 255);
                        table.addColumnVar("page", 255);
                        table.addColumnVar("title", 255);
                        table.addColumnVar("reqlevel", 255, "-1");
                        table.addColumnVar("theme", 255);
                        table.addColumnVar("view", 255);
                        table.addColumnText("html");
                        table.addColumnVar("file", 255);
                    }

                    // ResultSet rs = sql.query( "SELECT * FROM `pages` WHERE (subdomain = '" + subdomain + "' OR subdomain = '') AND domain = '" + domain + "' UNION SELECT * FROM `pages` WHERE (subdomain = '" + subdomain +
                    // "' OR subdomain = '') AND domain = '';" );

                    SQLQuerySelect result = sql.table("pages").select().where("domain").matches(domain).or()
                            .where("domain").matches("").execute();

                    for (Map<String, Object> row : result.set())
                        routes.add(new Route(
                                new MapCaster<String, String>(String.class, String.class).castTypes(row),
                                site));

                    // ResultSet rs = sql.query( "SELECT * FROM `pages` WHERE domain = '" + domain + "' OR domain = '';" );
                    // if ( sql.getRowCount( rs ) > 0 )
                    // do
                    // routes.add( new Route( rs, site ) );
                    // while ( rs.next() );
                }
            } catch (SQLException e) {
                throw new IOException(e);
            }
        }
        lastRequest = System.currentTimeMillis();

        if (routes.size() > 0) {
            Map<String, Route> matches = Maps.newTreeMap();
            int keyInter = 0;

            for (Route route : routes) {
                String weight = route.match(domain, subdomain, uri);
                if (weight != null) {
                    matches.put(weight + keyInter, route);
                    keyInter++;
                }
            }

            if (matches.size() > 0)
                return (Route) matches.values().toArray()[0];
            else
                Log.get().fine("Failed to find a page redirect for... '" + subdomain + "." + domain + "' '"
                        + uri + "'");
        } else
            Log.get().fine(
                    "Failed to find a page redirect for... '" + subdomain + "." + domain + "' '" + uri + "'");

        return null;
    }
}

From source file:org.jbb.system.web.logging.controller.AcpLoggerController.java

private Map<String, Boolean> prepareAppendersMap(LoggingConfiguration loggingConfiguration,
        AppLogger appLogger) {//ww  w .  java  2  s  .c  o  m
    AppLogger targetLogger = Optional.ofNullable(appLogger).orElse(new AppLogger());

    Map<String, Boolean> consoleAppenders = loggingConfiguration.getConsoleAppenders().stream()
            .collect(Collectors.toMap(LogConsoleAppender::getName, appender -> targetLogger.getAppenders()
                    .stream().anyMatch(app -> app.getName().equals(appender.getName()))));

    Map<String, Boolean> fileAppenders = loggingConfiguration.getFileAppenders().stream()
            .collect(Collectors.toMap(LogFileAppender::getName, appender -> targetLogger.getAppenders().stream()
                    .anyMatch(app -> app.getName().equals(appender.getName()))));

    Map<String, Boolean> result = Maps.newTreeMap();
    result.putAll(consoleAppenders);
    result.putAll(fileAppenders);

    return result;
}

From source file:com.facebook.buck.features.project.intellij.IjProjectWriter.java

private Map<String, Map<String, String>> readTargetInfoMap() throws IOException {
    Path targetInfoMapPath = getTargetInfoMapPath();
    return outFilesystem.exists(targetInfoMapPath)
            ? ObjectMappers.createParser(outFilesystem.newFileInputStream(targetInfoMapPath))
                    .readValueAs(new TypeReference<TreeMap<String, TreeMap<String, String>>>() {
                    })//from  w  ww.  j a  va  2 s  .  c  om
            : Maps.newTreeMap();
}

From source file:org.splevo.jamopp.vpm.mergedecider.JaMoPPMergeDecider.java

private TreeMap<Integer, Statement> indexStatementPosition(List<Statement> statements) {
    TreeMap<Integer, Statement> positionIndex = Maps.newTreeMap();
    for (Statement statement : statements) {
        int pos = JaMoPPElementUtil.getPositionInContainer(statement);
        positionIndex.put(pos, statement);
    }/*from w  ww.  j a v a 2 s .  com*/
    return positionIndex;
}

From source file:org.eclipse.wb.internal.core.model.generic.ContainerObjectValidators.java

private static boolean validateContainer(String expression, Object container) {
    Map<String, Object> variables = Maps.newTreeMap();
    variables.put("container", container);
    return evaluate(expression, variables);
}

From source file:com.metamx.druid.client.CachingClusteredClient.java

@Override
public Sequence<T> run(final Query<T> query) {
    final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    final CacheStrategy<T, Object, Query<T>> strategy = toolChest.getCacheStrategy(query);

    final Map<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();

    final List<Pair<DateTime, byte[]>> cachedResults = Lists.newArrayList();
    final Map<String, CachePopulator> cachePopulatorMap = Maps.newHashMap();

    final boolean useCache = Boolean.parseBoolean(query.getContextValue("useCache", "true"))
            && strategy != null;
    final boolean populateCache = Boolean.parseBoolean(query.getContextValue("populateCache", "true"))
            && strategy != null;
    final boolean isBySegment = Boolean.parseBoolean(query.getContextValue("bySegment", "false"));

    ImmutableMap.Builder<String, String> contextBuilder = new ImmutableMap.Builder<String, String>();

    final String priority = query.getContextValue("priority", "0");
    contextBuilder.put("priority", priority);

    if (populateCache) {
        contextBuilder.put("bySegment", "true");
    }/*from w  w w. j  a  v a 2 s .  com*/
    contextBuilder.put("intermediate", "true");

    final Query<T> rewrittenQuery = query.withOverriddenContext(contextBuilder.build());

    VersionedIntervalTimeline<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());
    if (timeline == null) {
        return Sequences.empty();
    }

    // build set of segments to query
    Set<Pair<ServerSelector, SegmentDescriptor>> segments = Sets.newLinkedHashSet();

    List<TimelineObjectHolder<String, ServerSelector>> serversLookup = Lists.newLinkedList();

    for (Interval interval : rewrittenQuery.getIntervals()) {
        serversLookup.addAll(timeline.lookup(interval));
    }

    // Let tool chest filter out unneeded segments
    final List<TimelineObjectHolder<String, ServerSelector>> filteredServersLookup = toolChest
            .filterSegments(query, serversLookup);

    for (TimelineObjectHolder<String, ServerSelector> holder : filteredServersLookup) {
        for (PartitionChunk<ServerSelector> chunk : holder.getObject()) {
            ServerSelector selector = chunk.getObject();
            final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(),
                    holder.getVersion(), chunk.getChunkNumber());

            segments.add(Pair.of(selector, descriptor));
        }
    }

    final byte[] queryCacheKey;
    if (strategy != null) {
        queryCacheKey = strategy.computeCacheKey(query);
    } else {
        queryCacheKey = null;
    }

    // Pull cached segments from cache and remove from set of segments to query
    if (useCache && queryCacheKey != null) {
        Map<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> cacheKeys = Maps.newHashMap();
        for (Pair<ServerSelector, SegmentDescriptor> e : segments) {
            cacheKeys.put(e, computeSegmentCacheKey(e.lhs.getSegment().getIdentifier(), e.rhs, queryCacheKey));
        }

        Map<Cache.NamedKey, byte[]> cachedValues = cache.getBulk(cacheKeys.values());

        for (Map.Entry<Pair<ServerSelector, SegmentDescriptor>, Cache.NamedKey> entry : cacheKeys.entrySet()) {
            Pair<ServerSelector, SegmentDescriptor> segment = entry.getKey();
            Cache.NamedKey segmentCacheKey = entry.getValue();

            final ServerSelector selector = segment.lhs;
            final SegmentDescriptor descriptor = segment.rhs;
            final Interval segmentQueryInterval = descriptor.getInterval();

            final byte[] cachedValue = cachedValues.get(segmentCacheKey);

            if (cachedValue != null) {
                cachedResults.add(Pair.of(segmentQueryInterval.getStart(), cachedValue));

                // remove cached segment from set of segments to query
                segments.remove(segment);
            } else {
                final String segmentIdentifier = selector.getSegment().getIdentifier();
                cachePopulatorMap.put(String.format("%s_%s", segmentIdentifier, segmentQueryInterval),
                        new CachePopulator(cache, objectMapper, segmentCacheKey));
            }
        }
    }

    // Compile list of all segments not pulled from cache
    for (Pair<ServerSelector, SegmentDescriptor> segment : segments) {
        final QueryableDruidServer queryableDruidServer = segment.lhs.pick();

        if (queryableDruidServer == null) {
            log.error("No servers found for %s?! How can this be?!", segment.rhs);
        } else {
            final DruidServer server = queryableDruidServer.getServer();
            List<SegmentDescriptor> descriptors = serverSegments.get(server);

            if (descriptors == null) {
                descriptors = Lists.newArrayList();
                serverSegments.put(server, descriptors);
            }

            descriptors.add(segment.rhs);
        }
    }

    return new LazySequence<T>(new Supplier<Sequence<T>>() {
        @Override
        public Sequence<T> get() {
            ArrayList<Pair<DateTime, Sequence<T>>> listOfSequences = Lists.newArrayList();

            addSequencesFromServer(listOfSequences);
            addSequencesFromCache(listOfSequences);

            Collections.sort(listOfSequences,
                    Ordering.natural().onResultOf(Pair.<DateTime, Sequence<T>>lhsFn()));

            final Sequence<Sequence<T>> seq = Sequences
                    .simple(Iterables.transform(listOfSequences, Pair.<DateTime, Sequence<T>>rhsFn()));
            if (strategy == null) {
                return toolChest.mergeSequences(seq);
            } else {
                return strategy.mergeSequences(seq);
            }
        }

        private void addSequencesFromCache(ArrayList<Pair<DateTime, Sequence<T>>> listOfSequences) {
            if (strategy == null) {
                return;
            }

            final Function<Object, T> pullFromCacheFunction = strategy.pullFromCache();
            final TypeReference<Object> cacheObjectClazz = strategy.getCacheObjectClazz();
            for (Pair<DateTime, byte[]> cachedResultPair : cachedResults) {
                final byte[] cachedResult = cachedResultPair.rhs;
                Sequence<Object> cachedSequence = new BaseSequence<Object, Iterator<Object>>(
                        new BaseSequence.IteratorMaker<Object, Iterator<Object>>() {
                            @Override
                            public Iterator<Object> make() {
                                try {
                                    if (cachedResult.length == 0) {
                                        return Iterators.emptyIterator();
                                    }

                                    return objectMapper.readValues(
                                            objectMapper.getJsonFactory().createJsonParser(cachedResult),
                                            cacheObjectClazz);
                                } catch (IOException e) {
                                    throw Throwables.propagate(e);
                                }
                            }

                            @Override
                            public void cleanup(Iterator<Object> iterFromMake) {
                            }
                        });
                listOfSequences.add(
                        Pair.of(cachedResultPair.lhs, Sequences.map(cachedSequence, pullFromCacheFunction)));
            }
        }

        @SuppressWarnings("unchecked")
        private void addSequencesFromServer(ArrayList<Pair<DateTime, Sequence<T>>> listOfSequences) {
            for (Map.Entry<DruidServer, List<SegmentDescriptor>> entry : serverSegments.entrySet()) {
                final DruidServer server = entry.getKey();
                final List<SegmentDescriptor> descriptors = entry.getValue();

                final QueryRunner clientQueryable = serverView.getQueryRunner(server);
                if (clientQueryable == null) {
                    log.makeAlert("WTF!? server[%s] doesn't have a client Queryable?", server).emit();
                    continue;
                }

                final Sequence<T> resultSeqToAdd;
                final MultipleSpecificSegmentSpec segmentSpec = new MultipleSpecificSegmentSpec(descriptors);
                List<Interval> intervals = segmentSpec.getIntervals();

                if ("realtime".equals(server.getType()) || !populateCache || isBySegment) {
                    resultSeqToAdd = clientQueryable.run(query.withQuerySegmentSpec(segmentSpec));
                } else {
                    resultSeqToAdd = toolChest.mergeSequences(
                            Sequences.map(clientQueryable.run(rewrittenQuery.withQuerySegmentSpec(segmentSpec)),
                                    new Function<Object, Sequence<T>>() {
                                        private final Function<T, Object> prepareForCache = strategy
                                                .prepareForCache();

                                        @Override
                                        public Sequence<T> apply(Object input) {
                                            Result<Object> result = (Result<Object>) input;
                                            final BySegmentResultValueClass<T> value = (BySegmentResultValueClass<T>) result
                                                    .getValue();
                                            String segmentIdentifier = value.getSegmentId();
                                            final Iterable<T> segmentResults = value.getResults();

                                            cachePopulatorMap
                                                    .get(String.format("%s_%s", segmentIdentifier,
                                                            value.getInterval()))
                                                    .populate(Iterables.transform(segmentResults,
                                                            prepareForCache));

                                            return Sequences.simple(Iterables.transform(segmentResults,
                                                    toolChest.makeMetricManipulatorFn(rewrittenQuery,
                                                            new MetricManipulationFn() {
                                                                @Override
                                                                public Object manipulate(
                                                                        AggregatorFactory factory,
                                                                        Object object) {
                                                                    return factory.deserialize(object);
                                                                }
                                                            })));
                                        }
                                    }));
                }

                listOfSequences.add(Pair.of(intervals.get(0).getStart(), resultSeqToAdd));
            }
        }
    });
}

From source file:io.druid.query.BaseQuery.java

protected Map<String, Object> computeOverridenContext(Map<String, Object> overrides) {
    Map<String, Object> overridden = Maps.newTreeMap();
    final Map<String, Object> context = getContext();
    if (context != null) {
        overridden.putAll(context);/*from w  w w. j a  v  a 2 s  .  c  om*/
    }
    overridden.putAll(overrides);

    return overridden;
}

From source file:org.apache.pulsar.tests.integration.topologies.PulsarCluster.java

private PulsarCluster(PulsarClusterSpec spec) {

    this.spec = spec;
    this.clusterName = spec.clusterName();
    this.network = Network.newNetwork();
    this.enablePrestoWorker = spec.enablePrestoWorker();

    if (enablePrestoWorker) {
        prestoWorkerContainer = new PrestoWorkerContainer(clusterName, PrestoWorkerContainer.NAME)
                .withNetwork(network).withNetworkAliases(PrestoWorkerContainer.NAME)
                .withEnv("clusterName", clusterName).withEnv("zkServers", ZKContainer.NAME)
                .withEnv("pulsar.zookeeper-uri", ZKContainer.NAME + ":" + ZKContainer.ZK_PORT)
                .withEnv("pulsar.broker-service-url", "http://pulsar-broker-0:8080");
    } else {// w  ww . j  av a 2 s. c  om
        prestoWorkerContainer = null;
    }

    this.zkContainer = new ZKContainer(clusterName);
    this.zkContainer.withNetwork(network).withNetworkAliases(ZKContainer.NAME)
            .withEnv("clusterName", clusterName).withEnv("zkServers", ZKContainer.NAME)
            .withEnv("configurationStore", CSContainer.NAME + ":" + CS_PORT).withEnv("forceSync", "no")
            .withEnv("pulsarNode", "pulsar-broker-0");

    this.csContainer = new CSContainer(clusterName).withNetwork(network).withNetworkAliases(CSContainer.NAME);

    this.bookieContainers = Maps.newTreeMap();
    this.brokerContainers = Maps.newTreeMap();
    this.workerContainers = Maps.newTreeMap();

    this.proxyContainer = new ProxyContainer(clusterName, ProxyContainer.NAME).withNetwork(network)
            .withNetworkAliases("pulsar-proxy").withEnv("zkServers", ZKContainer.NAME)
            .withEnv("zookeeperServers", ZKContainer.NAME)
            .withEnv("configurationStoreServers", CSContainer.NAME + ":" + CS_PORT)
            .withEnv("clusterName", clusterName);

    // create bookies
    bookieContainers.putAll(runNumContainers("bookie", spec.numBookies(),
            (name) -> new BKContainer(clusterName, name).withNetwork(network).withNetworkAliases(name)
                    .withEnv("zkServers", ZKContainer.NAME).withEnv("useHostNameAsBookieID", "true")
                    // Disable fsyncs for tests since they're slow within the containers
                    .withEnv("journalSyncData", "false").withEnv("journalMaxGroupWaitMSec", "0")
                    .withEnv("clusterName", clusterName)));

    // create brokers
    brokerContainers.putAll(runNumContainers("broker", spec.numBrokers(),
            (name) -> new BrokerContainer(clusterName, name).withNetwork(network).withNetworkAliases(name)
                    .withEnv("zkServers", ZKContainer.NAME).withEnv("zookeeperServers", ZKContainer.NAME)
                    .withEnv("configurationStoreServers", CSContainer.NAME + ":" + CS_PORT)
                    .withEnv("clusterName", clusterName)
                    .withEnv("brokerServiceCompactionMonitorIntervalInSeconds", "1")));

    spec.classPathVolumeMounts.entrySet().forEach(e -> {
        zkContainer.withClasspathResourceMapping(e.getKey(), e.getValue(), BindMode.READ_WRITE);
        proxyContainer.withClasspathResourceMapping(e.getKey(), e.getValue(), BindMode.READ_WRITE);

        bookieContainers.values()
                .forEach(c -> c.withClasspathResourceMapping(e.getKey(), e.getValue(), BindMode.READ_WRITE));
        brokerContainers.values()
                .forEach(c -> c.withClasspathResourceMapping(e.getKey(), e.getValue(), BindMode.READ_WRITE));
        workerContainers.values()
                .forEach(c -> c.withClasspathResourceMapping(e.getKey(), e.getValue(), BindMode.READ_WRITE));
    });

}