Example usage for io.vertx.core.json JsonObject getInteger

List of usage examples for io.vertx.core.json JsonObject getInteger

Introduction

In this page you can find the example usage for io.vertx.core.json JsonObject getInteger.

Prototype

public Integer getInteger(String key, Integer def) 

Source Link

Document

Like #getInteger(String) but specifying a default value to return if there is no entry.

Usage

From source file:com.baldmountain.depot.models.LineItem.java

License:Open Source License

public LineItem(JsonObject json) {
    super("line_items", json);
    cartId = json.getString("cart");
    productId = json.getString("product");
    count = json.getInteger("count", 1);
}

From source file:com.cyngn.vertx.bosun.BosunReporter.java

License:Apache License

@Override
public void start(final Future<Void> startedResult) {

    // setup the default config values
    JsonObject config = context.config();
    hosts = config.getJsonArray("hosts", new JsonArray("[{ \"host\" : \"localhost\", \"port\" : 8070}]"));
    address = config.getString("address", DEFAULT_ADDRESS);
    maxTags = config.getInteger("max_tags", OPENTSDB_DEFAULT_MAX_TAGS);
    maxIndexCacheSize = config.getInteger("max_index_cache_size", DEFAULT_UNIQUE_METRICS_INDEXED);
    indexExpiryInMinutes = config.getInteger("index_expiry_minutes", DEFAULT_INDEX_EXPIRY_MINUTES);
    timeout = config.getInteger("default_timeout_ms", DEFAULT_TIMEOUT_MS);

    metricsIndexed = new AtomicInteger(0);
    metricsPut = new AtomicInteger(0);
    metricsErrors = new AtomicInteger(0);

    eventBus = vertx.eventBus();//from w w  w .  j  a va 2 s  .  c o  m

    // create the list of workers
    connections = new ArrayList<>(hosts.size());

    initializeConnections(startedResult);
    createMessageHandlers();
    outputConfig();

    // initialize the in memory index cache
    distinctMetrics = CacheBuilder.newBuilder().maximumSize(maxIndexCacheSize)
            .expireAfterWrite(DEFAULT_INDEX_EXPIRY_MINUTES, TimeUnit.MINUTES)
            .build(new CacheLoader<String, Boolean>() {
                public Boolean load(String key) throws Exception {
                    return true;
                }
            });

    // start listening for incoming messages
    eventBus.consumer(address, this);
    initStatsReporting();
}

From source file:com.github.ithildir.airbot.AirBotVerticle.java

License:Open Source License

private Future<HttpServer> _startHttpServer(JsonObject configJsonObject) {
    Future<HttpServer> future = Future.future();

    HttpServer httpServer = vertx.createHttpServer();

    Router router = Router.router(vertx);

    Route authHandlerRoute = router.route();

    String username = configJsonObject.getString(ConfigKeys.USERNAME);
    String password = configJsonObject.getString(ConfigKeys.PASSWORD);

    AuthProvider authProvider = new SingleUserAuthProvider(username, password);

    authHandlerRoute.handler(BasicAuthHandler.create(authProvider));

    Route bodyHandlerRoute = router.route();

    bodyHandlerRoute.handler(BodyHandler.create());

    _addHttpRouteApiAi(router);/*from   w w w.  jav a2 s .  c o  m*/

    httpServer.requestHandler(router::accept);

    int port = configJsonObject.getInteger(ConfigKeys.PORT, _DEFAULT_PORT);

    httpServer.listen(port, future);

    return future;
}

From source file:com.groupon.vertx.memcache.MemcacheConfig.java

License:Apache License

public MemcacheConfig(JsonObject jsonConfig) {
    if (jsonConfig == null) {
        log.error("initialize", "exception", "noConfigFound");
        throw new MemcacheException("No Memcache config found");
    }/*from  w  ww.j a  v a  2 s.  c om*/

    if (jsonConfig.getJsonArray(SERVERS_KEY) != null && jsonConfig.getString(EVENT_BUS_ADDRESS_KEY) != null
            && !jsonConfig.getString(EVENT_BUS_ADDRESS_KEY).isEmpty()) {
        this.servers.addAll(processServers(jsonConfig.getJsonArray(SERVERS_KEY)));
        this.eventBusAddress = jsonConfig.getString(EVENT_BUS_ADDRESS_KEY);
        this.namespace = jsonConfig.getString(NAMESPACE_KEY);
        this.pointsPerServer = jsonConfig.getInteger(POINTS_PER_SERVER, DEFAULT_POINTS_PER_SERVER);
        this.retryInterval = jsonConfig.getLong(RETRY_INTERVAL, DEFAULT_RETRY_INTERVAL);

        final HashAlgorithm defaultHashAlgorithm = HashAlgorithm.FNV1_32_HASH;
        String algorithmStr = jsonConfig.getString(ALGORITHM_KEY, defaultHashAlgorithm.name());
        this.algorithm = algorithmStr == null ? defaultHashAlgorithm : HashAlgorithm.valueOf(algorithmStr);

        final ContinuumType defaultContinuumType = ContinuumType.KETAMA;
        String continuumStr = jsonConfig.getString(CONTINUUM_KEY, defaultContinuumType.name());
        this.continuum = continuumStr == null ? defaultContinuumType : ContinuumType.valueOf(continuumStr);
    } else {
        log.error("initialize", "exception", "invalidConfigFound", new String[] { "config" },
                jsonConfig.encode());
        throw new MemcacheException("Invalid Memcache config defined");
    }

    log.info("initialize", "success", new String[] { "eventBusAddress", "namespace", "servers", "algorithm" },
            eventBusAddress, namespace, servers.size(), algorithm);
}

From source file:com.groupon.vertx.redis.RedisConfig.java

License:Apache License

public RedisConfig(JsonObject redisConfigObj) throws Exception {
    this.host = redisConfigObj.getString(HOST_KEY);
    this.port = redisConfigObj.getInteger(PORT_KEY, port);
    this.eventBusAddress = redisConfigObj.getString(EVENT_BUS_ADDRESS_KEY);
    this.retryInterval = redisConfigObj.getLong(RETRY_INTERVAL_KEY, retryInterval);
    this.replyTimeout = redisConfigObj.getLong(REPLY_TIMEOUT_KEY, replyTimeout);

    if (host == null || host.isEmpty() || eventBusAddress == null || eventBusAddress.isEmpty()) {
        throw new Exception("Invalid Redis config.");
    }/* ww w .j a  va2s  .  com*/
}

From source file:com.hubrick.vertx.kafka.consumer.KafkaConsumerVerticle.java

License:Apache License

@Override
public void start() throws Exception {
    super.start();

    final JsonObject config = vertx.getOrCreateContext().config();
    vertxAddress = getMandatoryStringConfig(config, KafkaConsumerProperties.KEY_VERTX_ADDRESS);

    configuration = KafkaConsumerConfiguration.create(
            getMandatoryStringConfig(config, KafkaConsumerProperties.KEY_GROUP_ID),
            getMandatoryStringConfig(config, KafkaConsumerProperties.KEY_KAFKA_TOPIC),
            getMandatoryStringConfig(config, KafkaConsumerProperties.KEY_ZOOKEEPER),
            config.getString(KafkaConsumerProperties.KEY_OFFSET_RESET, "largest"),
            config.getInteger(KafkaConsumerProperties.KEY_ZOOKEPER_TIMEOUT_MS, 100000),
            config.getInteger(KafkaConsumerProperties.KEY_MAX_UNACKNOWLEDGED, 100),
            config.getLong(KafkaConsumerProperties.KEY_MAX_UNCOMMITTED_OFFSETS, 1000L),
            config.getLong(KafkaConsumerProperties.KEY_ACK_TIMEOUT_SECONDS, 600L),
            config.getLong(KafkaConsumerProperties.KEY_COMMIT_TIMEOUT_MS, 5 * 60 * 1000L),
            config.getInteger(KafkaConsumerProperties.KEY_MAX_RETRIES, Integer.MAX_VALUE),
            config.getInteger(KafkaConsumerProperties.KEY_INITIAL_RETRY_DELAY_SECONDS, 1),
            config.getInteger(KafkaConsumerProperties.KEY_MAX_RETRY_DELAY_SECONDS, 10),
            config.getLong(KafkaConsumerProperties.EVENT_BUS_SEND_TIMEOUT, DeliveryOptions.DEFAULT_TIMEOUT));

    consumer = KafkaConsumer.create(vertx, configuration, this::handler);
    consumer.start();/*from   w w  w .j  av  a  2  s  . co m*/
}

From source file:com.hubrick.vertx.kafka.producer.KafkaProducerServiceVerticle.java

License:Apache License

@Override
public void start() {
    // Get the address of EventBus where the message was published
    final String address = config().getString(ADDRESS);
    if (Strings.isNullOrEmpty(address)) {
        throw new IllegalStateException("address must be specified in config");
    }/*  w w w.ja  v a2 s. c o  m*/

    // Get the address of EventBus where the message was published
    final String topic = config().getString(DEFAULT_TOPIC);
    if (Strings.isNullOrEmpty(topic)) {
        throw new IllegalStateException("topic must be specified in config");
    }

    final JsonObject statsDConfig = config().getJsonObject(STATSD);

    StatsDConfiguration statsDConfiguration = null;
    if (statsDConfig != null) {
        final String prefix = statsDConfig.getString(PREFIX, PREFIX_DEFAULT);
        final String host = statsDConfig.getString(HOST, HOST_DEFAULT);
        final int port = statsDConfig.getInteger(PORT, PORT_DEFAULT);
        statsDConfiguration = new StatsDConfiguration(host, port, prefix);
    }

    final KafkaProducerConfiguration kafkaProducerConfiguration = new KafkaProducerConfiguration(topic,
            config().getString(BROKER_LIST, BROKER_LIST_DEFAULT),
            config().getInteger(REQUEST_ACKS, REQUEST_ACKS_DEFAULT));
    kafkaProducerConfiguration.setStatsDConfiguration(statsDConfiguration);

    final String type = config().getString(TYPE);
    if (!Strings.isNullOrEmpty(type)) {
        kafkaProducerConfiguration.setType(ProducerType.valueOf(type));
    }

    final Integer maxRetries = config().getInteger(MAX_RETRIES);
    if (maxRetries != null) {
        kafkaProducerConfiguration.setMaxRetries(maxRetries);
    }

    final Integer retryBackoffMs = config().getInteger(RETRY_BACKOFF_MS);
    if (retryBackoffMs != null) {
        kafkaProducerConfiguration.setRetryBackoffMs(retryBackoffMs);
    }

    final Integer bufferingMaxMs = config().getInteger(BUFFERING_MAX_MS);
    if (bufferingMaxMs != null) {
        kafkaProducerConfiguration.setBufferingMaxMs(bufferingMaxMs);
    }

    final Integer bufferingMaxMessages = config().getInteger(BUFFERING_MAX_MESSAGES);
    if (bufferingMaxMessages != null) {
        kafkaProducerConfiguration.setBufferingMaxMessages(bufferingMaxMessages);
    }

    final Integer enqueueTimeout = config().getInteger(ENQUEUE_TIMEOUT);
    if (enqueueTimeout != null) {
        kafkaProducerConfiguration.setEnqueueTimeout(enqueueTimeout);
    }

    final Integer batchMessageNum = config().getInteger(BATCH_MESSAGE_NUM);
    if (batchMessageNum != null) {
        kafkaProducerConfiguration.setBatchMessageNum(batchMessageNum);
    }

    kafkaProducerService = new DefaultKafkaProducerService(kafkaProducerConfiguration);
    ProxyHelper.registerService(KafkaProducerService.class, vertx, kafkaProducerService, address);

    kafkaProducerService.start();
}

From source file:eu.rethink.mn.component.AllocationManager.java

License:Apache License

@Override
public void handle(PipeContext ctx) {
    final PipeMessage msg = ctx.getMessage();
    final JsonObject body = msg.getBody();

    if (msg.getType().equals("create")) {
        //process JSON msg requesting a number of available addresses
        final JsonObject msgBodyValue = body.getJsonObject("value");
        final String scheme = body.getString("scheme");

        int number = msgBodyValue.getInteger("number", 5);
        final List<String> allocated = allocate(ctx, scheme, number);

        final PipeMessage reply = new PipeMessage();
        reply.setId(msg.getId());//from www .j a  v a 2  s  .  com
        reply.setFrom(name);
        reply.setTo(msg.getFrom());
        reply.setReplyCode(ReplyCode.OK);

        final JsonObject value = new JsonObject();
        value.put("allocated", new JsonArray(allocated));

        reply.getBody().put("value", value);

        ctx.reply(reply);

    } else if (msg.getType().equals("delete")) {
        //process JSON msg releasing an address
        final String resource = body.getString("resource");
        final JsonArray childrenResourcesList = body.getJsonArray("childrenResources");
        if (resource != null) {
            deallocate(ctx, resource);
        } else {
            for (Object childrenResource : childrenResourcesList) {
                deallocate(ctx, childrenResource.toString());
            }
        }

        ctx.replyOK(name);
    }
}

From source file:eu.rethink.mn.component.HypertyAllocationManager.java

License:Apache License

@Override
public void handle(PipeContext ctx) {
    final PipeMessage msg = ctx.getMessage();

    if (msg.getType().equals("create")) {
        //process JSON msg requesting a number of available addresses
        final JsonObject msgBodyValue = msg.getBody().getJsonObject("value");

        int number = msgBodyValue.getInteger("number", 5);
        final List<String> allocated = allocate(ctx, number);

        final PipeMessage reply = new PipeMessage();
        reply.setId(msg.getId());/*from   w w  w. java  2 s.c o m*/
        reply.setFrom(name);
        reply.setTo(msg.getFrom());
        reply.setReplyCode(ReplyCode.OK);

        final JsonObject value = new JsonObject();
        value.put("allocated", new JsonArray(allocated));

        reply.getBody().put("value", value);

        ctx.reply(reply);
    } else {
        //TODO: deallocate !?
    }
}

From source file:eu.rethink.mn.component.ObjectAllocationManager.java

License:Apache License

@Override
public void handle(PipeContext ctx) {
    final PipeMessage msg = ctx.getMessage();
    final JsonObject body = msg.getBody();

    if (msg.getType().equals("create")) {
        //process JSON msg requesting a number of available addresses
        final String scheme = body.getString("scheme");

        //on value
        final JsonObject msgBodyValue = body.getJsonObject("value");
        final int number = msgBodyValue.getInteger("number", 5);

        final List<String> allocated = allocate(ctx, scheme, number);

        final PipeMessage reply = new PipeMessage();
        reply.setId(msg.getId());/*from w  ww.  j  a  v a2s  .  c  o m*/
        reply.setFrom(name);
        reply.setTo(msg.getFrom());
        reply.setReplyCode(ReplyCode.OK);

        final JsonObject value = new JsonObject();
        value.put("allocated", new JsonArray(allocated));

        reply.getBody().put("value", value);

        ctx.reply(reply);
    } else if (msg.getType().equals("delete")) {
        //process JSON msg releasing an address
        final String resource = body.getString("resource");

        deallocate(ctx, resource);

        ctx.replyOK(name);
    }
}