List of usage examples for com.google.common.collect Maps newTreeMap
public static <K extends Comparable, V> TreeMap<K, V> newTreeMap()
From source file:com.netflix.servo.examples.JvmMetricExample.java
public static void main(String[] args) throws Exception { // Filter used to identify metrics that are counters NavigableMap<String, MetricFilter> counters = Maps.newTreeMap(); // ClassLoadingMXBean counters.put("LoadedClassCount", BasicMetricFilter.MATCH_ALL); counters.put("TotalLoadedClassCount", BasicMetricFilter.MATCH_ALL); counters.put("UnloadedClassCount", BasicMetricFilter.MATCH_ALL); // CompilationMXBean counters.put("TotalCompilationTime", BasicMetricFilter.MATCH_ALL); // GarbageCollectorMXBean counters.put("CollectionCount", BasicMetricFilter.MATCH_ALL); counters.put("CollectionTime", BasicMetricFilter.MATCH_ALL); // MemoryPoolMXBean counters.put("CollectionUsageThresholdCount", BasicMetricFilter.MATCH_ALL); counters.put("UsageThresholdCount", BasicMetricFilter.MATCH_ALL); // RuntimeMXBean counters.put("Uptime", BasicMetricFilter.MATCH_ALL); // ThreadMXBean counters.put("TotalStartedThreadCount", BasicMetricFilter.MATCH_ALL); // Create prefix filter on the metric name, default to match none if // no match is found so that by default metrics will be GAUGEs MetricFilter counterFilter = new PrefixMetricFilter(null, // Tag key, null means use metric name BasicMetricFilter.MATCH_NONE, // Root filter if no better match counters); // Specific filters // Create a new poller for the local JMX server that queries all // metrics from the java.lang domain MetricPoller poller = new JmxMetricPoller(new LocalJmxConnector(), new ObjectName("java.lang:type=*,*"), counterFilter);/* ww w .ja va 2 s .c om*/ // Filter to restrict the set of metrics returned, in this case ignore // many of the flags indicating whether or not certain features are // suported or enabled MetricFilter filter = new RegexMetricFilter(null, // Tag key, null means use metric name Pattern.compile(".*Supported$|.*Enabled$|^Valid$|^Verbose$"), false, // Match if the tag is missing true); // Invert the pattern match // Create a new observer that records observations to files in the // current working directory MetricObserver observer = new FileMetricObserver("jvmstats", new File(".")); // Sampling interval final long samplingInterval = 10; TimeUnit samplingUnit = TimeUnit.SECONDS; // Transform used to convert counter metrics into a rate per second MetricObserver transform = new CounterToRateMetricTransform(observer, 2 * samplingInterval, samplingUnit); // Schedule metrics to be collected in the background every 10 seconds PollRunnable task = new PollRunnable(poller, filter, transform); PollScheduler scheduler = PollScheduler.getInstance(); scheduler.start(); scheduler.addPoller(task, samplingInterval, samplingUnit); // Do main work of program while (true) { System.out.println("Doing work..."); Thread.sleep(samplingUnit.toMillis(samplingInterval)); } }
From source file:cc.twittertools.util.VerifySubcollection.java
@SuppressWarnings("static-access") public static void main(String[] args) throws Exception { Options options = new Options(); options.addOption(OptionBuilder.withArgName("dir").hasArg().withDescription("source collection directory") .create(COLLECTION_OPTION)); options.addOption(//from w w w . j a va 2s . co m OptionBuilder.withArgName("file").hasArg().withDescription("list of tweetids").create(ID_OPTION)); CommandLine cmdline = null; CommandLineParser parser = new GnuParser(); try { cmdline = parser.parse(options, args); } catch (ParseException exp) { System.err.println("Error parsing command line: " + exp.getMessage()); System.exit(-1); } if (!cmdline.hasOption(COLLECTION_OPTION) || !cmdline.hasOption(ID_OPTION)) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp(ExtractSubcollection.class.getName(), options); System.exit(-1); } String collectionPath = cmdline.getOptionValue(COLLECTION_OPTION); LongOpenHashSet tweetids = new LongOpenHashSet(); File tweetidsFile = new File(cmdline.getOptionValue(ID_OPTION)); if (!tweetidsFile.exists()) { System.err.println("Error: " + tweetidsFile + " does not exist!"); System.exit(-1); } LOG.info("Reading tweetids from " + tweetidsFile); FileInputStream fin = new FileInputStream(tweetidsFile); BufferedReader br = new BufferedReader(new InputStreamReader(fin)); String s; while ((s = br.readLine()) != null) { tweetids.add(Long.parseLong(s)); } br.close(); fin.close(); LOG.info("Read " + tweetids.size() + " tweetids."); File file = new File(collectionPath); if (!file.exists()) { System.err.println("Error: " + file + " does not exist!"); System.exit(-1); } LongOpenHashSet seen = new LongOpenHashSet(); TreeMap<Long, String> tweets = Maps.newTreeMap(); PrintStream out = new PrintStream(System.out, true, "UTF-8"); StatusStream stream = new JsonStatusCorpusReader(file); Status status; int cnt = 0; while ((status = stream.next()) != null) { if (!tweetids.contains(status.getId())) { LOG.error("tweetid " + status.getId() + " doesn't belong in collection"); continue; } if (seen.contains(status.getId())) { LOG.error("tweetid " + status.getId() + " already seen!"); continue; } tweets.put(status.getId(), status.getJsonObject().toString()); seen.add(status.getId()); cnt++; } LOG.info("total of " + cnt + " tweets in subcollection."); for (Map.Entry<Long, String> entry : tweets.entrySet()) { out.println(entry.getValue()); } stream.close(); out.close(); }
From source file:cosmos.example.BuildingPermitsExample.java
public static void main(String[] args) throws Exception { BuildingPermitsExample example = new BuildingPermitsExample(); new JCommander(example, args); File inputFile = new File(example.fileName); Preconditions.checkArgument(inputFile.exists() && inputFile.isFile() && inputFile.canRead(), "Expected " + example.fileName + " to be a readable file"); String zookeepers;//from w ww . ja v a2 s . c o m String instanceName; Connector connector; MiniAccumuloCluster mac = null; File macDir = null; // Use the MiniAccumuloCluster is requested if (example.useMiniAccumuloCluster) { macDir = Files.createTempDir(); String password = "password"; MiniAccumuloConfig config = new MiniAccumuloConfig(macDir, password); config.setNumTservers(1); mac = new MiniAccumuloCluster(config); mac.start(); zookeepers = mac.getZooKeepers(); instanceName = mac.getInstanceName(); ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zookeepers); connector = instance.getConnector("root", new PasswordToken(password)); } else { // Otherwise connect to a running instance zookeepers = example.zookeepers; instanceName = example.instanceName; ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zookeepers); connector = instance.getConnector(example.username, new PasswordToken(example.password)); } // Instantiate an instance of Cosmos Cosmos cosmos = new CosmosImpl(zookeepers); // Create a definition for the data we want to load Store id = Store.create(connector, new Authorizations(), AscendingIndexIdentitySet.create()); // Register the definition with Cosmos so it can track its progress. cosmos.register(id); // Load all of the data from our inputFile LoadBuildingPermits loader = new LoadBuildingPermits(cosmos, id, inputFile); loader.run(); // Finalize the SortableResult which will prevent future writes to the data set cosmos.finalize(id); // Flush the ingest traces to the backend so we can see the results; id.sendTraces(); // Get back the Set of Columns that we've ingested. Set<Column> schema = Sets.newHashSet(cosmos.columns(id)); log.debug("\nColumns: " + schema); Iterator<Column> iter = schema.iterator(); while (iter.hasNext()) { Column c = iter.next(); // Remove the internal ID field and columns that begin with CONTRACTOR_ if (c.equals(LoadBuildingPermits.ID) || c.name().startsWith("CONTRACTOR_")) { iter.remove(); } } Iterable<Index> indices = Iterables.transform(schema, new Function<Column, Index>() { @Override public Index apply(Column col) { return Index.define(col); } }); // Ensure that we have locality groups set as we expect log.info("Ensure locality groups are set"); id.optimizeIndices(indices); // Compact down the data for this SortableResult log.info("Issuing compaction for relevant data"); id.consolidate(); final int numTopValues = 10; // Walk through each column in the result set for (Column c : schema) { Stopwatch sw = new Stopwatch(); sw.start(); // Get the number of times we've seen each value in a given column CloseableIterable<Entry<RecordValue<?>, Long>> groupingsInColumn = cosmos.groupResults(id, c); log.info(c.name() + ":"); // Iterate over the counts, collecting the top N values in each column TreeMap<Long, RecordValue<?>> topValues = Maps.newTreeMap(); for (Entry<RecordValue<?>, Long> entry : groupingsInColumn) { if (topValues.size() == numTopValues) { Entry<Long, RecordValue<?>> least = topValues.pollFirstEntry(); if (least.getKey() < entry.getValue()) { topValues.put(entry.getValue(), entry.getKey()); } else { topValues.put(least.getKey(), least.getValue()); } } else if (topValues.size() < numTopValues) { topValues.put(entry.getValue(), entry.getKey()); } } for (Long key : topValues.descendingKeySet()) { log.info(topValues.get(key).value() + " occurred " + key + " times"); } sw.stop(); log.info("Took " + sw.toString() + " to run query.\n"); } log.info("Deleting records"); // Delete the records we've ingested if (!example.useMiniAccumuloCluster) { // Because I'm lazy and don't want to wait around to run the BatchDeleter when we're just going // to rm -rf the directory in a few secs. cosmos.delete(id); } // And shut down Cosmos cosmos.close(); log.info("Cosmos stopped"); // If we were using MAC, also stop that if (example.useMiniAccumuloCluster && null != mac) { mac.stop(); if (null != macDir) { FileUtils.deleteDirectory(macDir); } } }
From source file:com.ngdata.hbaseindexer.util.solr.SolrConnectionParamUtil.java
public static List<String> getShards(Map<String, String> connectionParams) { Map<Integer, String> shardsByUserIndex = Maps.newTreeMap(); for (Map.Entry<String, String> param : connectionParams.entrySet()) { if (param.getKey().startsWith(SolrConnectionParams.SOLR_SHARD_PREFIX)) { Integer index = Integer .valueOf(param.getKey().substring(SolrConnectionParams.SOLR_SHARD_PREFIX.length())); shardsByUserIndex.put(index, param.getValue()); }//from ww w.j a v a 2 s . c o m } return Lists.newArrayList(shardsByUserIndex.values()); }
From source file:com.spotify.helios.cli.command.JobStatusFetcher.java
public static Map<JobId, ListenableFuture<JobStatus>> getJobsStatuses(HeliosClient client, Set<JobId> jobIds) throws InterruptedException { final Map<JobId, ListenableFuture<JobStatus>> futures = Maps.newTreeMap(); try {//from ww w. j av a 2 s .co m final Map<JobId, JobStatus> statuses = client.jobStatuses(jobIds).get(); for (final Entry<JobId, JobStatus> entry : statuses.entrySet()) { futures.put(entry.getKey(), Futures.immediateFuture(entry.getValue())); } } catch (final ExecutionException e) { System.err.println("Warning: masters failed batch status fetching. Falling back to" + " slower job status method"); for (final JobId jobId : jobIds) { futures.put(jobId, client.jobStatus(jobId)); } } return futures; }
From source file:scoutdoc.main.structure.TaskUtility.java
public static Task toTask(String filename) throws FileNotFoundException, IOException { Task t = new Task(); Map<String, Page> pages = Maps.newTreeMap(); //Creates a mutable, empty TreeMap instance using the natural ordering of its elements. Properties properties = new Properties(); properties.load(new FileInputStream(filename)); for (Entry<Object, Object> e : properties.entrySet()) { String key = (String) e.getKey(); String value = (String) e.getValue(); if (PROP_OUTPUT_FOLDER.equals(key)) { t.setOutputFolder(value);//from w w w . j a v a2s.c o m } else if (PROP_OUTPUT_TITLE.equals(key)) { t.setOutputTitle(value); } else if (PROP_OUTPUT_TOC_FILE.equals(key)) { t.setOutputTocFile(value); } else if (PROP_OUTPUT_CHECK_FILE.equals(key)) { t.setOutputCheckstyleFile(value); } else if (key.startsWith(PAGES_PREFIX)) { pages.put(key, Pages.get(value)); } else { throw new IllegalStateException("Unknwon property <" + key + ">"); } } t.setInputPages(Lists.newArrayList(pages.values())); return t; }
From source file:org.ojai.util.Documents.java
/** * Compares two documents for equality./*from ww w . ja v a 2s. c o m*/ * @param d1 the first document to compare * @param d2 the second document to compare * @return {@code true} if both the documents are equal, * {@code false} otherwise. */ public static boolean equals(Document d1, Document d2) { if (d1 == d2) { return true; // both are null or same reference } else if (d1 == null || d2 == null || d1.size() != d2.size()) { return false; } else { Map<String, Value> keyValues = Maps.newTreeMap(); Iterator<Entry<String, Value>> i = d2.iterator(); while (i.hasNext()) { Entry<String, Value> e = i.next(); keyValues.put(e.getKey(), e.getValue()); } Iterator<Entry<String, Value>> j = d1.iterator(); while (j.hasNext()) { Entry<String, Value> e = j.next(); String k = e.getKey(); Value v = keyValues.get(k); if (v == null || !e.getValue().equals(v)) { return false; } } } return true; }
From source file:ivory.core.tokenize.DocumentProcessingUtils.java
public static SortedMap<Integer, int[]> integerizeTermDocVector(TermDocVector doc, Dictionary termIDMap) { SortedMap<Integer, int[]> positions = Maps.newTreeMap(); TermDocVector.Reader reader = null; try {//from w ww . j ava 2 s . c o m reader = doc.getReader(); } catch (IOException e1) { throw new RuntimeException("Error getting TermDocVectorReader: " + e1.getMessage()); } while (reader.hasMoreTerms()) { int termid = termIDMap.getId(reader.nextTerm()); if (termid <= 0) { continue; } positions.put(termid, reader.getPositions()); } return positions; }
From source file:com.google.caliper.worker.handler.HostDevice.java
/** Gets a selection of properties about the device this worker is running on. */ static Map<String, String> getProperties() { TreeMap<String, String> propertyMap = Maps.newTreeMap(); Map<String, String> sysProps = Maps.fromProperties(System.getProperties()); // Sometimes java.runtime.version is more descriptive than java.version String version = sysProps.get("java.version"); String alternateVersion = sysProps.get("java.runtime.version"); if (alternateVersion != null && alternateVersion.length() > version.length()) { version = alternateVersion;/*ww w.j a v a 2 s .c o m*/ } propertyMap.put("host.availableProcessors", Integer.toString(Runtime.getRuntime().availableProcessors())); String osName = sysProps.get("os.name"); propertyMap.put("os.name", osName); propertyMap.put("os.version", sysProps.get("os.version")); propertyMap.put("os.arch", sysProps.get("os.arch")); if (osName.equals("Linux")) { getLinuxEnvironment(propertyMap); } return propertyMap; }
From source file:com.google.wave.api.data.converter.EventDataConverterModule.java
/** * @return A singleton instance of a {@link EventDataConverterManager}. */// w w w . j a va 2 s . co m @Singleton @Provides static EventDataConverterManager provideEventDataConverterManager() { // v0.1 till v0.21 use the same event data converter. NavigableMap<ProtocolVersion, EventDataConverter> converters = Maps.newTreeMap(); EventDataConverterV21 eventDataConverterV21 = new EventDataConverterV21(); converters.put(ProtocolVersion.V1, eventDataConverterV21); converters.put(ProtocolVersion.V2, eventDataConverterV21); converters.put(ProtocolVersion.V2_1, eventDataConverterV21); converters.put(ProtocolVersion.V2_2, new EventDataConverterV22()); return new EventDataConverterManager(converters); }