Example usage for com.google.common.collect ImmutableMap of

List of usage examples for com.google.common.collect ImmutableMap of

Introduction

In this page you can find the example usage for com.google.common.collect ImmutableMap of.

Prototype

public static <K, V> ImmutableMap<K, V> of(K k1, V v1) 

Source Link

Usage

From source file:io.v.syncslidepresenter.Main.java

public static void main(String[] args) throws SyncbaseServer.StartException, VException, IOException {
    Options options = new Options();
    JCommander commander = new JCommander(options);
    try {/*  ww  w. java2s .  c om*/
        commander.parse(args);
    } catch (ParameterException e) {
        logger.warning("Could not parse parameters: " + e.getMessage());
        commander.usage();
        return;
    }

    if (options.help) {
        commander.usage();
        return;
    }

    // Make command-Q do the same as closing the main frame (i.e. exit).
    System.setProperty("apple.eawt.quitStrategy", "CLOSE_ALL_WINDOWS");

    JFrame frame = new JFrame();
    enableOSXFullscreen(frame);
    frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE);
    frame.setVisible(true);

    VContext baseContext = V.init();

    AccessList acl = new AccessList(ImmutableList.of(new BlessingPattern("...")), ImmutableList.<String>of());
    Permissions permissions = new Permissions(ImmutableMap.of("1", acl));
    String name = NamingUtil.join(options.mountPrefix, UUID.randomUUID().toString());
    logger.info("Mounting new syncbase server at " + name);
    VContext mountContext = SyncbaseServer.withNewServer(baseContext, new SyncbaseServer.Params()
            .withPermissions(permissions).withName(name).withStorageRootDir(options.storageRootDir));
    final Server server = V.getServer(mountContext);
    if (server.getStatus().getEndpoints().length > 0) {
        logger.info("Mounted syncbase server at the following endpoints: ");
        for (Endpoint e : server.getStatus().getEndpoints()) {
            logger.info("\t" + e);
        }
        logger.info("End of endpoint list");

        SyncbaseService service = Syncbase.newService("/" + server.getStatus().getEndpoints()[0]);
        SyncbaseApp app = service.getApp(SYNCBASE_APP);
        if (!app.exists(baseContext)) {
            app.create(baseContext, permissions);
        }
        Database db = app.getNoSqlDatabase(SYNCBASE_DB, null);
        if (!db.exists(baseContext)) {
            db.create(baseContext, permissions);
        }
        Table decks = db.getTable(DECKS_TABLE);
        if (!decks.exists(baseContext)) {
            decks.create(baseContext, permissions);
        }
        Table presentations = db.getTable(PRESENTATIONS_TABLE);
        if (!presentations.exists(baseContext)) {
            presentations.create(baseContext, permissions);
        }

        JPanel panel = new JPanel(new GridBagLayout());
        ScaleToFitJPanel presentationPanel = new ScaleToFitJPanel();
        GridBagConstraints constraints = new GridBagConstraints();
        constraints.weightx = 1;
        constraints.weighty = 1;
        constraints.fill = GridBagConstraints.BOTH;
        panel.add(presentationPanel, constraints);
        frame.getContentPane().add(panel);
        frame.pack();

        Main m = new Main(baseContext, presentationPanel, db, decks, presentations);

        Presentation presentation = new Discovery(baseContext, options.mountPrefix, options.deckPrefix,
                options.maxMtScanCount).getPresentation();
        logger.info("Using presentation: " + presentation);
        m.joinPresentation(presentation, options.joinTimeoutSeconds, options.slideRowFormat);
    }
}

From source file:com.dataradiant.beam.examples.StreamWordCount.java

public static void main(String[] args) throws Exception {

    Options options = PipelineOptionsFactory.fromArgs(args).withValidation().as(Options.class);
    options.setRunner(FlinkRunner.class);

    Pipeline p = Pipeline.create(options);

    KafkaIO.Read<byte[], String> kafkaIOReader = KafkaIO.read().withBootstrapServers("192.168.99.100:32771")
            .withTopics(Arrays.asList("beam".split(",")))
            .updateConsumerProperties(ImmutableMap.of("auto.offset.reset", (Object) "earliest"))
            .withValueCoder(StringUtf8Coder.of());

    p.apply(kafkaIOReader.withoutMetadata()).apply(Values.<String>create())
            .apply(Window.<String>into(FixedWindows.of(Duration.standardMinutes(options.getWindowSize()))))
            .apply(new CountWords()).apply(MapElements.via(new FormatAsTextFn()))
            .apply("WriteCounts", TextIO.Write.to(options.getOutput()));

    p.run();/*from  w ww.j  a  va  2 s. co m*/
}

From source file:com.flaptor.indextank.storage.LogWriterClient.java

public static void main(String[] args) throws FileNotFoundException {
    LogWriterClient client = new LogWriterClient("localhost", 15000);

    Scanner in = new Scanner(System.in);
    while (true) {
        String line = in.nextLine();
        if (line.startsWith("i")) {
            IndexLog log = new IndexLog(line.substring(1));
            Segment segment = log.getLargestOptimizedSegment();
            String docid = null;//from w  w  w. ja v  a  2  s.  c  o m
            /*do {
            Pair<SegmentReader, String> page = segment.pageReader(docid);
            docid = page.last();
            List<LogRecord> list = Lists.newArrayList(page.first());
            System.out.println(list.size());
            System.out.println(list.get(0));
            } while (docid != null);*/
            /*SegmentIndexReader r = new SegmentIndexReader(log.getLargestOptimizedSegment());
            for (Pair<Long, String> pair : r) {
            System.out.println(pair);
            }*/
            /*} else if (line.startsWith("?")) {
                long t = System.currentTimeMillis();
                LogPageToken token = new LogPageToken();
                int total = 0;
                while (token != null) {
            System.out.println(token);
            LogPage page = client.readPage(line.substring(1), token);
            int size = page.get_batch().get_records_size();
            if (size > 0) {
                LogRecord first = page.get_batch().get_records().get(0);
                LogRecord last = page.get_batch().get_records().get(size-1);
                System.out.println(String.format("> %d ---------> %d - %d --------> %s - %s", size, first.get_id(), last.get_id(), first.get_docid(), last.get_docid()));
            }
            if (page.is_set_next_page_token()) {
                token = page.get_next_page_token();
            } else {
                token = null;
            }
            total += size;
                }
                System.out.println("Total: " + total);
                System.out.println("Execution time = " + (System.currentTimeMillis() - t) / 1000.0);*/
        } else {
            String[] split = line.split(" ", 4);
            long start = Long.parseLong(split[0]);
            int count = Integer.parseInt(split[1]);
            String code = split[2];
            String text = null;
            if (split.length > 3) {
                text = split[3];
            }
            LogBatch b = new LogBatch();
            for (long id = start; id - start < count; id++) {
                if (((id - start) % 1000) == 0 && id != start) {
                    System.out.println("sending " + id);
                    client.sendBatch(b);
                    b.get_records().clear();
                }
                LogRecord record = new LogRecord();
                record.set_docid("d" + id);
                record.set_timestamp_ms(System.currentTimeMillis());
                record.set_index_code(code);
                if (text != null) {
                    record.set_fields(ImmutableMap.of("text", text));
                } else {
                    record.set_deleted(true);
                }
                b.add_to_records(record);
            }
            if (!b.get_records().isEmpty())
                client.sendBatch(b);
        }
    }
}

From source file:com.metamx.druid.http.BrokerMain.java

public static void main(String[] args) throws Exception {
    LogLevelAdjuster.register();/*from  ww w. j  ava  2  s . c om*/

    final ObjectMapper jsonMapper = new DefaultObjectMapper();
    final ObjectMapper smileMapper = new DefaultObjectMapper(new SmileFactory());
    smileMapper.getJsonFactory().setCodec(smileMapper);

    final Properties props = Initialization.loadProperties();
    final Lifecycle lifecycle = new Lifecycle();
    final ConfigurationObjectFactory configFactory = Config.createFactory(props);
    final ZkClient zkClient = Initialization.makeZkClient(configFactory.build(ZkClientConfig.class), lifecycle);
    final PhoneBook phoneBook = Initialization.createYellowPages(jsonMapper, zkClient, "Client-ZKYP--%s",
            lifecycle);

    final HttpClient httpClient = HttpClientInit.createClient(HttpClientConfig.builder()
            .withNumConnections(Integer.parseInt(props.getProperty("druid.client.http.connections"))).build(),
            lifecycle);
    final HttpClient emitterHttpClient = HttpClientInit
            .createClient(HttpClientConfig.builder().withNumConnections(1).build(), lifecycle);
    final ServiceEmitter emitter = new ServiceEmitter(props.getProperty("druid.service"),
            props.getProperty("druid.host"), Emitters.create(props, emitterHttpClient, jsonMapper, lifecycle));

    final QueryToolChestWarehouse warehouse = new ReflectionQueryToolChestWarehouse();
    final ClientConfig clientConfig = configFactory.build(ClientConfig.class);
    final ClientSideServerView view = new ClientSideServerView(warehouse, smileMapper, httpClient);
    final ClientInventoryManager clientInventoryManager = new ClientInventoryManager(
            clientConfig.getClientInventoryManagerConfig(), phoneBook, view);
    lifecycle.addManagedInstance(clientInventoryManager);

    final CacheBroker cacheBroker = MapCacheBroker.create(configFactory
            .buildWithReplacements(MapCacheBrokerConfig.class, ImmutableMap.of("prefix", "druid.bard.cache")));
    final CachingClusteredClient baseClient = new CachingClusteredClient(warehouse, view, cacheBroker,
            smileMapper);
    lifecycle.addManagedInstance(baseClient);

    final ScheduledExecutorFactory scheduledExecutorFactory = ScheduledExecutors.createFactory(lifecycle);
    final ScheduledExecutorService globalScheduledExec = scheduledExecutorFactory.create(1, "Global--%d");
    final MonitorScheduler monitorScheduler = new MonitorScheduler(
            configFactory.build(MonitorSchedulerConfig.class), globalScheduledExec, emitter,
            ImmutableList.<Monitor>of(new JvmMonitor(), new SysMonitor(), new CacheMonitor(cacheBroker)));
    lifecycle.addManagedInstance(monitorScheduler);

    final ServiceDiscoveryConfig serviceDiscoveryConfig = configFactory.build(ServiceDiscoveryConfig.class);
    CuratorFramework curatorFramework = Initialization
            .makeCuratorFrameworkClient(serviceDiscoveryConfig.getZkHosts(), lifecycle);

    final ServiceDiscovery serviceDiscovery = Initialization.makeServiceDiscoveryClient(curatorFramework,
            configFactory.build(ServiceDiscoveryConfig.class), lifecycle);

    final RequestLogger requestLogger = Initialization
            .makeRequestLogger(scheduledExecutorFactory.create(1, "RequestLogger--%d"), props);
    lifecycle.addManagedInstance(requestLogger);

    final ClientQuerySegmentWalker texasRanger = new ClientQuerySegmentWalker(warehouse, emitter, baseClient);

    final Injector injector = Guice
            .createInjector(new ClientServletModule(texasRanger, clientInventoryManager, jsonMapper));
    final Server server = Initialization.makeJettyServer(configFactory.build(ServerConfig.class));
    final Context root = new Context(server, "/druid/v2", Context.SESSIONS);

    root.addServlet(new ServletHolder(new StatusServlet()), "/status");
    root.addServlet(
            new ServletHolder(new QueryServlet(jsonMapper, smileMapper, texasRanger, emitter, requestLogger)),
            "/*");

    root.addEventListener(new GuiceServletConfig(injector));
    root.addFilter(GuiceFilter.class, "/heatmap/*", 0);
    root.addFilter(GuiceFilter.class, "/datasources/*", 0);

    try {
        lifecycle.start();
    } catch (Throwable t) {
        log.error(t, "Error when starting up.  Failing.");
        System.exit(1);
    }

    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override
        public void run() {
            log.info("Running shutdown hook");
            lifecycle.stop();
        }
    }));

    server.start();
    server.join();
}

From source file:io.prestosql.plugin.raptor.legacy.RaptorQueryRunner.java

public static void main(String[] args) throws Exception {
    Logging.initialize();/*from   ww  w .j a va 2s  .c  om*/
    Map<String, String> properties = ImmutableMap.of("http-server.http.port", "8080");
    DistributedQueryRunner queryRunner = createRaptorQueryRunner(properties, false, false);
    Thread.sleep(10);
    Logger log = Logger.get(RaptorQueryRunner.class);
    log.info("======== SERVER STARTED ========");
    log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl());
}

From source file:com.google.pubsub.flic.Driver.java

public static void main(String[] args) {
    Driver driver = new Driver();
    JCommander jCommander = new JCommander(driver, args);
    if (driver.help) {
        jCommander.usage();/*from w  w w . j  ava 2 s. co  m*/
        return;
    }
    driver.run((project, clientParamsMap) -> GCEController.newGCEController(project,
            ImmutableMap.of(driver.zone, clientParamsMap), Executors.newScheduledThreadPool(500)));
}

From source file:io.prestosql.plugin.hive.HiveQueryRunner.java

public static void main(String[] args) throws Exception {
    // You need to add "--user user" to your CLI for your queries to work
    Logging.initialize();//www  .j  a v  a  2s.co m
    DistributedQueryRunner queryRunner = createQueryRunner(TpchTable.getTables(),
            ImmutableMap.of("http-server.http.port", "8080"));
    Thread.sleep(10);
    Logger log = Logger.get(DistributedQueryRunner.class);
    log.info("======== SERVER STARTED ========");
    log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl());
}

From source file:com.cloudera.exhibit.core.ExhibitDescriptor.java

public static ExhibitDescriptor of(String name, ObsDescriptor frame) {
    return new ExhibitDescriptor(ObsDescriptor.EMPTY, ImmutableMap.of(name, frame));
}

From source file:io.prestosql.benchmark.BenchmarkQueryRunner.java

public static LocalQueryRunner createLocalQueryRunnerHashEnabled() {
    return createLocalQueryRunner(ImmutableMap.of("optimizer.optimize_hash_generation", "true"));
}

From source file:com.opengamma.strata.product.Attributes.java

/**
 * Obtains an empty instance./*  w  w w.  ja v a  2  s . c o m*/
 * <p>
 * The {@link #withAttribute(AttributeType, Object)} method can be used on
 * the instance to add attributes.
 * 
 * @param <T>  the type of the attribute value
 * @param type  the type providing meaning to the value
 * @param value  the value
 * @return the instance
 */
public static <T> Attributes of(AttributeType<T> type, T value) {
    return new SimpleAttributes(ImmutableMap.of(type, value));
}