List of usage examples for java.util ArrayList add
public boolean add(E e)
From source file:deck36.storm.plan9.php.PrimeCatBadgeTopology.java
public static void main(String[] args) throws Exception { String env = null;//from w ww . j av a 2 s. co m if (args != null && args.length > 0) { env = args[0]; } if (!"dev".equals(env)) if (!"prod".equals(env)) { System.out.println("Usage: $0 (dev|prod)\n"); System.exit(1); } // Topology config Config conf = new Config(); // Load parameters and add them to the Config Map configMap = YamlLoader.loadYamlFromResource("config_" + env + ".yml"); conf.putAll(configMap); log.info(JSONValue.toJSONString((conf))); // Set topology loglevel to DEBUG conf.put(Config.TOPOLOGY_DEBUG, JsonPath.read(conf, "$.deck36_storm.debug")); // Create Topology builder TopologyBuilder builder = new TopologyBuilder(); // if there are not special reasons, start with parallelism hint of 1 // and multiple tasks. By that, you can scale dynamically later on. int parallelism_hint = JsonPath.read(conf, "$.deck36_storm.default_parallelism_hint"); int num_tasks = JsonPath.read(conf, "$.deck36_storm.default_num_tasks"); // Create Stream from RabbitMQ messages // bind new queue with name of the topology // to the main plan9 exchange (from properties config) // consuming only POINTS-related events by using the routing key 'points.#' String badgeName = PrimeCatBadgeTopology.class.getSimpleName(); String rabbitQueueName = badgeName; // use topology class name as name for the queue String rabbitExchangeName = JsonPath.read(conf, "$.deck36_storm.PrimeCatBolt.rabbitmq.exchange"); String rabbitRoutingKey = JsonPath.read(conf, "$.deck36_storm.PrimeCatBolt.rabbitmq.routing_key"); // Get JSON deserialization scheme Scheme rabbitScheme = new SimpleJSONScheme(); // Setup a Declarator to configure exchange/queue/routing key RabbitMQDeclarator rabbitDeclarator = new RabbitMQDeclarator(rabbitExchangeName, rabbitQueueName, rabbitRoutingKey); // Create Configuration for the Spout ConnectionConfig connectionConfig = new ConnectionConfig( (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.host"), (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.port"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.user"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.pass"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.vhost"), (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.heartbeat")); ConsumerConfig spoutConfig = new ConsumerConfigBuilder().connection(connectionConfig).queue(rabbitQueueName) .prefetch((Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.prefetch")).requeueOnFail() .build(); // add global parameters to topology config - the RabbitMQSpout will read them from there conf.putAll(spoutConfig.asMap()); // For production, set the spout pending value to the same value as the RabbitMQ pre-fetch // see: https://github.com/ppat/storm-rabbitmq/blob/master/README.md if ("prod".equals(env)) { conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.prefetch")); } // Add RabbitMQ spout to topology builder.setSpout("incoming", new RabbitMQSpout(rabbitScheme, rabbitDeclarator), parallelism_hint) .setNumTasks((Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.spout_tasks")); // construct command to invoke the external bolt implementation ArrayList<String> command = new ArrayList(15); // Add main execution program (php, hhvm, zend, ..) and parameters command.add((String) JsonPath.read(conf, "$.deck36_storm.php.executor")); command.addAll((List<String>) JsonPath.read(conf, "$.deck36_storm.php.executor_params")); // Add main command to be executed (app/console, the phar file, etc.) and global context parameters (environment etc.) command.add((String) JsonPath.read(conf, "$.deck36_storm.php.main")); command.addAll((List<String>) JsonPath.read(conf, "$.deck36_storm.php.main_params")); // Add main route to be invoked and its parameters command.add((String) JsonPath.read(conf, "$.deck36_storm.PrimeCatBolt.main")); List boltParams = (List<String>) JsonPath.read(conf, "$.deck36_storm.PrimeCatBolt.params"); if (boltParams != null) command.addAll(boltParams); // Log the final command log.info("Command to start bolt for PrimeCat badge: " + Arrays.toString(command.toArray())); // Add constructed external bolt command to topology using MultilangAdapterBolt builder.setBolt("badge", new MultilangAdapterTickTupleBolt(command, (Integer) JsonPath.read(conf, "$.deck36_storm.PrimeCatBolt.primecat_frequency"), "badge"), parallelism_hint).setNumTasks(num_tasks).shuffleGrouping("incoming"); builder.setBolt("rabbitmq_router", new Plan9RabbitMQRouterBolt( (String) JsonPath.read(conf, "$.deck36_storm.PrimeCatBolt.rabbitmq.target_exchange"), "PrimeCat" // RabbitMQ routing key ), parallelism_hint).setNumTasks(num_tasks).shuffleGrouping("badge"); builder.setBolt("rabbitmq_producer", new Plan9RabbitMQPushBolt(), parallelism_hint).setNumTasks(num_tasks) .shuffleGrouping("rabbitmq_router"); if ("dev".equals(env)) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(badgeName + System.currentTimeMillis(), conf, builder.createTopology()); Thread.sleep(2000000); } if ("prod".equals(env)) { StormSubmitter.submitTopology(badgeName + "-" + System.currentTimeMillis(), conf, builder.createTopology()); } }
From source file:deck36.storm.plan9.php.StatusLevelTopology.java
public static void main(String[] args) throws Exception { String env = null;// w ww . ja va 2s .c om if (args != null && args.length > 0) { env = args[0]; } if (!"dev".equals(env)) if (!"prod".equals(env)) { System.out.println("Usage: $0 (dev|prod)\n"); System.exit(1); } // Topology config Config conf = new Config(); // Load parameters and add them to the Config Map configMap = YamlLoader.loadYamlFromResource("config_" + env + ".yml"); conf.putAll(configMap); log.info(JSONValue.toJSONString((conf))); // Set topology loglevel to DEBUG conf.put(Config.TOPOLOGY_DEBUG, JsonPath.read(conf, "$.deck36_storm.debug")); // Create Topology builder TopologyBuilder builder = new TopologyBuilder(); // if there are not special reasons, start with parallelism hint of 1 // and multiple tasks. By that, you can scale dynamically later on. int parallelism_hint = JsonPath.read(conf, "$.deck36_storm.default_parallelism_hint"); int num_tasks = JsonPath.read(conf, "$.deck36_storm.default_num_tasks"); // Create Stream from RabbitMQ messages // bind new queue with name of the topology // to the main plan9 exchange (from properties config) // consuming only POINTS-related events by using the routing key 'points.#' String badgeName = StatusLevelTopology.class.getSimpleName(); String rabbitQueueName = badgeName; // use topology class name as name for the queue String rabbitExchangeName = JsonPath.read(conf, "$.deck36_storm.StatusLevelBolt.rabbitmq.exchange"); String rabbitRoutingKey = JsonPath.read(conf, "$.deck36_storm.StatusLevelBolt.rabbitmq.routing_key"); // Get JSON deserialization scheme Scheme rabbitScheme = new SimpleJSONScheme(); // Setup a Declarator to configure exchange/queue/routing key RabbitMQDeclarator rabbitDeclarator = new RabbitMQDeclarator(rabbitExchangeName, rabbitQueueName, rabbitRoutingKey); // Create Configuration for the Spout ConnectionConfig connectionConfig = new ConnectionConfig( (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.host"), (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.port"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.user"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.pass"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.vhost"), (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.heartbeat")); ConsumerConfig spoutConfig = new ConsumerConfigBuilder().connection(connectionConfig).queue(rabbitQueueName) .prefetch((Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.prefetch")).requeueOnFail() .build(); // add global parameters to topology config - the RabbitMQSpout will read them from there conf.putAll(spoutConfig.asMap()); // For production, set the spout pending value to the same value as the RabbitMQ pre-fetch // see: https://github.com/ppat/storm-rabbitmq/blob/master/README.md if ("prod".equals(env)) { conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.prefetch")); } // Add RabbitMQ spout to topology builder.setSpout("incoming", new RabbitMQSpout(rabbitScheme, rabbitDeclarator), parallelism_hint) .setNumTasks((Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.spout_tasks")); // construct command to invoke the external bolt implementation ArrayList<String> command = new ArrayList(15); // Add main execution program (php, hhvm, zend, ..) and parameters command.add((String) JsonPath.read(conf, "$.deck36_storm.php.executor")); command.addAll((List<String>) JsonPath.read(conf, "$.deck36_storm.php.executor_params")); // Add main command to be executed (app/console, the phar file, etc.) and global context parameters (environment etc.) command.add((String) JsonPath.read(conf, "$.deck36_storm.php.main")); command.addAll((List<String>) JsonPath.read(conf, "$.deck36_storm.php.main_params")); // Add main route to be invoked and its parameters command.add((String) JsonPath.read(conf, "$.deck36_storm.StatusLevelBolt.main")); List boltParams = (List<String>) JsonPath.read(conf, "$.deck36_storm.StatusLevelBolt.params"); if (boltParams != null) command.addAll(boltParams); // Log the final command log.info("Command to start bolt for StatusLevel badges: " + Arrays.toString(command.toArray())); // Add constructed external bolt command to topology using MultilangAdapterBolt builder.setBolt("badge", new MultilangAdapterBolt(command, "badge"), 1).setNumTasks(1) .shuffleGrouping("incoming"); builder.setBolt("rabbitmq_router", new Plan9RabbitMQRouterBolt( (String) JsonPath.read(conf, "$.deck36_storm.StatusLevelBolt.rabbitmq.target_exchange"), "StatusLevel" // RabbitMQ routing key ), parallelism_hint).setNumTasks(num_tasks).shuffleGrouping("badge"); builder.setBolt("rabbitmq_producer", new Plan9RabbitMQPushBolt(), parallelism_hint).setNumTasks(num_tasks) .shuffleGrouping("rabbitmq_router"); if ("dev".equals(env)) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(badgeName + System.currentTimeMillis(), conf, builder.createTopology()); Thread.sleep(2000000); } if ("prod".equals(env)) { StormSubmitter.submitTopology(badgeName + "-" + System.currentTimeMillis(), conf, builder.createTopology()); } }
From source file:deck36.storm.plan9.php.StumbleBlunderBadgeTopology.java
public static void main(String[] args) throws Exception { String env = null;/*w w w .ja v a 2s . c om*/ if (args != null && args.length > 0) { env = args[0]; } if (!"dev".equals(env)) if (!"prod".equals(env)) { System.out.println("Usage: $0 (dev|prod)\n"); System.exit(1); } // Topology config Config conf = new Config(); // Load parameters and add them to the Config Map configMap = YamlLoader.loadYamlFromResource("config_" + env + ".yml"); conf.putAll(configMap); log.info(JSONValue.toJSONString((conf))); // Set topology loglevel to DEBUG conf.put(Config.TOPOLOGY_DEBUG, JsonPath.read(conf, "$.deck36_storm.debug")); // Create Topology builder TopologyBuilder builder = new TopologyBuilder(); // if there are not special reasons, start with parallelism hint of 1 // and multiple tasks. By that, you can scale dynamically later on. int parallelism_hint = JsonPath.read(conf, "$.deck36_storm.default_parallelism_hint"); int num_tasks = JsonPath.read(conf, "$.deck36_storm.default_num_tasks"); // Create Stream from RabbitMQ messages // bind new queue with name of the topology // to the main plan9 exchange (from properties config) // consuming only CBT-related events by using the rounting key 'cbt.#' String badgeName = StumbleBlunderBadgeTopology.class.getSimpleName(); String rabbitQueueName = badgeName; // use topology class name as name for the queue String rabbitExchangeName = JsonPath.read(conf, "$.deck36_storm.StumbleBlunderBolt.rabbitmq.exchange"); String rabbitRoutingKey = JsonPath.read(conf, "$.deck36_storm.StumbleBlunderBolt.rabbitmq.routing_key"); // Get JSON deserialization scheme Scheme rabbitScheme = new SimpleJSONScheme(); // Setup a Declarator to configure exchange/queue/routing key RabbitMQDeclarator rabbitDeclarator = new RabbitMQDeclarator(rabbitExchangeName, rabbitQueueName, rabbitRoutingKey); // Create Configuration for the Spout ConnectionConfig connectionConfig = new ConnectionConfig( (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.host"), (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.port"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.user"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.pass"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.vhost"), (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.heartbeat")); ConsumerConfig spoutConfig = new ConsumerConfigBuilder().connection(connectionConfig).queue(rabbitQueueName) .prefetch((Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.prefetch")).requeueOnFail() .build(); // add global parameters to topology config - the RabbitMQSpout will read them from there conf.putAll(spoutConfig.asMap()); // For production, set the spout pending value to the same value as the RabbitMQ pre-fetch // see: https://github.com/ppat/storm-rabbitmq/blob/master/README.md if ("prod".equals(env)) { conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.prefetch")); } // Add RabbitMQ spout to topology builder.setSpout("incoming", new RabbitMQSpout(rabbitScheme, rabbitDeclarator), parallelism_hint) .setNumTasks((Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.spout_tasks")); // construct command to invoke the external bolt implementation ArrayList<String> command = new ArrayList(15); // Add main execution program (php, hhvm, zend, ..) and parameters command.add((String) JsonPath.read(conf, "$.deck36_storm.php.executor")); command.addAll((List<String>) JsonPath.read(conf, "$.deck36_storm.php.executor_params")); // Add main command to be executed (app/console, the phar file, etc.) and global context parameters (environment etc.) command.add((String) JsonPath.read(conf, "$.deck36_storm.php.main")); command.addAll((List<String>) JsonPath.read(conf, "$.deck36_storm.php.main_params")); // Add main route to be invoked and its parameters command.add((String) JsonPath.read(conf, "$.deck36_storm.StumbleBlunderBolt.main")); List boltParams = (List<String>) JsonPath.read(conf, "$.deck36_storm.StumbleBlunderBolt.params"); if (boltParams != null) command.addAll(boltParams); // Log the final command log.info("Command to start bolt for StumbleBlunder badge: " + Arrays.toString(command.toArray())); // Add constructed external bolt command to topology using MultilangAdapterBolt builder.setBolt("badge", new MultilangAdapterBolt(command, "badge"), parallelism_hint) .setNumTasks(num_tasks).shuffleGrouping("incoming"); builder.setBolt("rabbitmq_router", new Plan9RabbitMQRouterBolt( (String) JsonPath.read(conf, "$.deck36_storm.StumbleBlunderBolt.rabbitmq.target_exchange"), "StumbleBlunder" // RabbitMQ routing key ), parallelism_hint).setNumTasks(num_tasks).shuffleGrouping("badge"); builder.setBolt("rabbitmq_producer", new Plan9RabbitMQPushBolt(), parallelism_hint).setNumTasks(num_tasks) .shuffleGrouping("rabbitmq_router"); if ("dev".equals(env)) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(badgeName + System.currentTimeMillis(), conf, builder.createTopology()); Thread.sleep(2000000); } if ("prod".equals(env)) { StormSubmitter.submitTopology(badgeName + "-" + System.currentTimeMillis(), conf, builder.createTopology()); } }
From source file:deck36.storm.plan9.php.DeludedKittenRobbersTopology.java
public static void main(String[] args) throws Exception { String env = null;/* ww w . j av a 2 s. co m*/ if (args != null && args.length > 0) { env = args[0]; } if (!"dev".equals(env)) if (!"prod".equals(env)) { System.out.println("Usage: $0 (dev|prod)\n"); System.exit(1); } // Topology config Config conf = new Config(); // Load parameters and add them to the Config Map configMap = YamlLoader.loadYamlFromResource("config_" + env + ".yml"); conf.putAll(configMap); log.info(JSONValue.toJSONString((conf))); // Set topology loglevel to DEBUG conf.put(Config.TOPOLOGY_DEBUG, JsonPath.read(conf, "$.deck36_storm.debug")); // Create Topology builder TopologyBuilder builder = new TopologyBuilder(); // if there are not special reasons, start with parallelism hint of 1 // and multiple tasks. By that, you can scale dynamically later on. int parallelism_hint = JsonPath.read(conf, "$.deck36_storm.default_parallelism_hint"); int num_tasks = JsonPath.read(conf, "$.deck36_storm.default_num_tasks"); // Create Stream from RabbitMQ messages // bind new queue with name of the topology // to the main plan9 exchange (from properties config) // consuming only CBT-related events by using the rounting key 'cbt.#' String badgeName = DeludedKittenRobbersTopology.class.getSimpleName(); String rabbitQueueName = badgeName; // use topology class name as name for the queue String rabbitExchangeName = JsonPath.read(conf, "$.deck36_storm.DeludedKittenRobbersBolt.rabbitmq.exchange"); String rabbitRoutingKey = JsonPath.read(conf, "$.deck36_storm.DeludedKittenRobbersBolt.rabbitmq.routing_key"); // Get JSON deserialization scheme Scheme rabbitScheme = new SimpleJSONScheme(); // Setup a Declarator to configure exchange/queue/routing key RabbitMQDeclarator rabbitDeclarator = new RabbitMQDeclarator(rabbitExchangeName, rabbitQueueName, rabbitRoutingKey); // Create Configuration for the Spout ConnectionConfig connectionConfig = new ConnectionConfig( (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.host"), (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.port"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.user"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.pass"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.vhost"), (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.heartbeat")); ConsumerConfig spoutConfig = new ConsumerConfigBuilder().connection(connectionConfig).queue(rabbitQueueName) .prefetch((Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.prefetch")).requeueOnFail() .build(); // add global parameters to topology config - the RabbitMQSpout will read them from there conf.putAll(spoutConfig.asMap()); // For production, set the spout pending value to the same value as the RabbitMQ pre-fetch // see: https://github.com/ppat/storm-rabbitmq/blob/master/README.md if ("prod".equals(env)) { conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.prefetch")); } // Add RabbitMQ spout to topology builder.setSpout("incoming", new RabbitMQSpout(rabbitScheme, rabbitDeclarator), parallelism_hint) .setNumTasks((Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.spout_tasks")); // construct command to invoke the external bolt implementation ArrayList<String> command = new ArrayList(15); // Add main execution program (php, hhvm, zend, ..) and parameters command.add((String) JsonPath.read(conf, "$.deck36_storm.php.executor")); command.addAll((List<String>) JsonPath.read(conf, "$.deck36_storm.php.executor_params")); // Add main command to be executed (app/console, the phar file, etc.) and global context parameters (environment etc.) command.add((String) JsonPath.read(conf, "$.deck36_storm.php.main")); command.addAll((List<String>) JsonPath.read(conf, "$.deck36_storm.php.main_params")); // Add main route to be invoked and its parameters command.add((String) JsonPath.read(conf, "$.deck36_storm.DeludedKittenRobbersBolt.main")); List boltParams = (List<String>) JsonPath.read(conf, "$.deck36_storm.DeludedKittenRobbersBolt.params"); if (boltParams != null) command.addAll(boltParams); // Log the final command log.info("Command to start bolt for Deluded Kitten Robbers: " + Arrays.toString(command.toArray())); // Add constructed external bolt command to topology using MultilangAdapterTickTupleBolt builder.setBolt("badge", new MultilangAdapterTickTupleBolt(command, (Integer) JsonPath.read(conf, "$.deck36_storm.DeludedKittenRobbersBolt.attack_frequency_secs"), "badge"), parallelism_hint).setNumTasks(num_tasks).shuffleGrouping("incoming"); builder.setBolt("rabbitmq_router", new Plan9RabbitMQRouterBolt( (String) JsonPath.read(conf, "$.deck36_storm.DeludedKittenRobbersBolt.rabbitmq.target_exchange"), "DeludedKittenRobbers" // RabbitMQ routing key ), parallelism_hint).setNumTasks(num_tasks).shuffleGrouping("badge"); builder.setBolt("rabbitmq_producer", new Plan9RabbitMQPushBolt(), parallelism_hint).setNumTasks(num_tasks) .shuffleGrouping("rabbitmq_router"); if ("dev".equals(env)) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(badgeName + System.currentTimeMillis(), conf, builder.createTopology()); Thread.sleep(2000000); } if ("prod".equals(env)) { StormSubmitter.submitTopology(badgeName + "-" + System.currentTimeMillis(), conf, builder.createTopology()); } }
From source file:deck36.storm.plan9.php.RaiderOfTheKittenRobbersTopology.java
public static void main(String[] args) throws Exception { String env = null;//ww w .j a v a 2 s . c om if (args != null && args.length > 0) { env = args[0]; } if (!"dev".equals(env)) if (!"prod".equals(env)) { System.out.println("Usage: $0 (dev|prod)\n"); System.exit(1); } // Topology config Config conf = new Config(); // Load parameters and add them to the Config Map configMap = YamlLoader.loadYamlFromResource("config_" + env + ".yml"); conf.putAll(configMap); log.info(JSONValue.toJSONString((conf))); // Set topology loglevel to DEBUG conf.put(Config.TOPOLOGY_DEBUG, JsonPath.read(conf, "$.deck36_storm.debug")); // Create Topology builder TopologyBuilder builder = new TopologyBuilder(); // if there are not special reasons, start with parallelism hint of 1 // and multiple tasks. By that, you can scale dynamically later on. int parallelism_hint = JsonPath.read(conf, "$.deck36_storm.default_parallelism_hint"); int num_tasks = JsonPath.read(conf, "$.deck36_storm.default_num_tasks"); // Create Stream from RabbitMQ messages // bind new queue with name of the topology // to the main plan9 exchange (from properties config) // consuming only CBT-related events by using the rounting key 'cbt.#' String badgeName = RaiderOfTheKittenRobbersTopology.class.getSimpleName(); String rabbitQueueName = badgeName; // use topology class name as name for the queue String rabbitExchangeName = (String) JsonPath.read(conf, "$.deck36_storm.RaiderOfTheKittenRobbersBolt.rabbitmq.target_exchange"); String rabbitRoutingKey = "#"; // Get JSON deserialization scheme Scheme rabbitScheme = new SimpleJSONScheme(); // Setup a Declarator to configure exchange/queue/routing key RabbitMQDeclarator rabbitDeclarator = new RabbitMQDeclarator(rabbitExchangeName, rabbitQueueName, rabbitRoutingKey); // Create Configuration for the Spout ConnectionConfig connectionConfig = new ConnectionConfig( (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.host"), (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.port"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.user"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.pass"), (String) JsonPath.read(conf, "$.deck36_storm.rabbitmq.vhost"), (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.heartbeat")); ConsumerConfig spoutConfig = new ConsumerConfigBuilder().connection(connectionConfig).queue(rabbitQueueName) .prefetch((Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.prefetch")).requeueOnFail() .build(); // add global parameters to topology config - the RabbitMQSpout will read them from there conf.putAll(spoutConfig.asMap()); // For production, set the spout pending value to the same value as the RabbitMQ pre-fetch // see: https://github.com/ppat/storm-rabbitmq/blob/master/README.md if ("prod".equals(env)) { conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, (Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.prefetch")); } // Add RabbitMQ spout to topology builder.setSpout("incoming", new RabbitMQSpout(rabbitScheme, rabbitDeclarator), parallelism_hint) .setNumTasks((Integer) JsonPath.read(conf, "$.deck36_storm.rabbitmq.spout_tasks")); // construct command to invoke the external bolt implementation ArrayList<String> command = new ArrayList(15); // Add main execution program (php, hhvm, zend, ..) and parameters command.add((String) JsonPath.read(conf, "$.deck36_storm.php.executor")); command.addAll((List<String>) JsonPath.read(conf, "$.deck36_storm.php.executor_params")); // Add main command to be executed (app/console, the phar file, etc.) and global context parameters (environment etc.) command.add((String) JsonPath.read(conf, "$.deck36_storm.php.main")); command.addAll((List<String>) JsonPath.read(conf, "$.deck36_storm.php.main_params")); // Add main route to be invoked and its parameters command.add((String) JsonPath.read(conf, "$.deck36_storm.RaiderOfTheKittenRobbersBolt.main")); List boltParams = (List<String>) JsonPath.read(conf, "$.deck36_storm.RaiderOfTheKittenRobbersBolt.params"); if (boltParams != null) command.addAll(boltParams); // Log the final command log.info("Command to start bolt for RaiderOfTheKittenRobbers badge: " + Arrays.toString(command.toArray())); // Add constructed external bolt command to topology using MultilangAdapterBolt builder.setBolt("badge", new MultilangAdapterBolt(command, "badge"), parallelism_hint) .setNumTasks(num_tasks).shuffleGrouping("incoming"); builder.setBolt("rabbitmq_router", new Plan9RabbitMQRouterBolt( (String) JsonPath.read(conf, "$.deck36_storm.RaiderOfTheKittenRobbersBolt.rabbitmq.target_exchange"), "RaiderOfTheKittenRobbers" // RabbitMQ routing key ), parallelism_hint).setNumTasks(num_tasks).shuffleGrouping("badge"); builder.setBolt("rabbitmq_producer", new Plan9RabbitMQPushBolt(), parallelism_hint).setNumTasks(num_tasks) .shuffleGrouping("rabbitmq_router"); if ("dev".equals(env)) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(badgeName + System.currentTimeMillis(), conf, builder.createTopology()); Thread.sleep(2000000); } if ("prod".equals(env)) { StormSubmitter.submitTopology(badgeName + "-" + System.currentTimeMillis(), conf, builder.createTopology()); } }
From source file:de.prozesskraft.pkraft.Wrap.java
public static void main(String[] args) throws org.apache.commons.cli.ParseException, IOException { // try//from ww w .j av a 2 s . c o m // { // if (args.length != 3) // { // System.out.println("Please specify processdefinition file (xml) and an outputfilename"); // } // // } // catch (ArrayIndexOutOfBoundsException e) // { // System.out.println("***ArrayIndexOutOfBoundsException: Please specify processdefinition.xml, openoffice_template.od*, newfile_for_processdefinitions.odt\n" + e.toString()); // } /*---------------------------- get options from ini-file ----------------------------*/ File inifile = new java.io.File( WhereAmI.getInstallDirectoryAbsolutePath(Wrap.class) + "/" + "../etc/pkraft-wrap.ini"); if (inifile.exists()) { try { ini = new Ini(inifile); } catch (InvalidFileFormatException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } catch (IOException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } } else { System.err.println("ini file does not exist: " + inifile.getAbsolutePath()); System.exit(1); } /*---------------------------- create boolean options ----------------------------*/ Option ohelp = new Option("help", "print this message"); Option ov = new Option("v", "prints version and build-date"); /*---------------------------- create argument options ----------------------------*/ Option ooutput = OptionBuilder.withArgName("FILE").hasArg() .withDescription("[mandatory; default: .] file for generated wrapper process.") // .isRequired() .create("output"); Option odefinition = OptionBuilder.withArgName("FILE").hasArg() .withDescription("[mandatory] process definition file.") // .isRequired() .create("definition"); /*---------------------------- create options object ----------------------------*/ Options options = new Options(); options.addOption(ohelp); options.addOption(ov); options.addOption(ooutput); options.addOption(odefinition); /*---------------------------- create the parser ----------------------------*/ CommandLineParser parser = new GnuParser(); try { // parse the command line arguments commandline = parser.parse(options, args); } catch (Exception exp) { // oops, something went wrong System.err.println("Parsing failed. Reason: " + exp.getMessage()); exiter(); } /*---------------------------- usage/help ----------------------------*/ if (commandline.hasOption("help")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("wrap", options); System.exit(0); } else if (commandline.hasOption("v")) { System.out.println("web: www.prozesskraft.de"); System.out.println("version: [% version %]"); System.out.println("date: [% date %]"); System.exit(0); } /*---------------------------- ueberpruefen ob eine schlechte kombination von parametern angegeben wurde ----------------------------*/ if (!(commandline.hasOption("definition"))) { System.err.println("option -definition is mandatory."); exiter(); } if (!(commandline.hasOption("output"))) { System.err.println("option -output is mandatory."); exiter(); } /*---------------------------- die lizenz ueberpruefen und ggf abbrechen ----------------------------*/ // check for valid license ArrayList<String> allPortAtHost = new ArrayList<String>(); allPortAtHost.add(ini.get("license-server", "license-server-1")); allPortAtHost.add(ini.get("license-server", "license-server-2")); allPortAtHost.add(ini.get("license-server", "license-server-3")); MyLicense lic = new MyLicense(allPortAtHost, "1", "user-edition", "0.1"); // lizenz-logging ausgeben for (String actLine : (ArrayList<String>) lic.getLog()) { System.err.println(actLine); } // abbruch, wenn lizenz nicht valide if (!lic.isValid()) { System.exit(1); } /*---------------------------- die eigentliche business logic ----------------------------*/ Process p1 = new Process(); java.io.File output = new java.io.File(commandline.getOptionValue("output")); if (output.exists()) { System.err.println("warn: already exists: " + output.getCanonicalPath()); exiter(); } p1.setInfilexml(commandline.getOptionValue("definition")); System.err.println("info: reading process definition " + commandline.getOptionValue("definition")); // dummy process Process p2 = null; try { p2 = p1.readXml(); } catch (JAXBException e) { // TODO Auto-generated catch block e.printStackTrace(); System.err.println("error"); exiter(); } // den wrapper process generieren Process p3 = p2.getProcessAsWrapper(); p3.setOutfilexml(output.getAbsolutePath()); // den neuen wrap-process rausschreiben p3.writeXml(); }
From source file:com.chimpler.example.FacetLuceneIndexer.java
public static void main(String args[]) throws Exception { // if (args.length != 3) { // System.err.println("Parameters: [index directory] [taxonomy directory] [json file]"); // System.exit(1); // }/* w w w . j a v a2 s .c om*/ String indexDirectory = "index"; String taxonomyDirectory = "taxonomy"; String jsonFileName = "/home/qiuqiang/workspace/facet-lucene-example/books.json"; IndexWriterConfig writerConfig = new IndexWriterConfig(LUCENE_VERSION, new WhitespaceAnalyzer(LUCENE_VERSION)); writerConfig.setOpenMode(OpenMode.APPEND); IndexWriter indexWriter = new IndexWriter(FSDirectory.open(new File(indexDirectory)), writerConfig); TaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(MMapDirectory.open(new File(taxonomyDirectory)), OpenMode.APPEND); TaxonomyReader taxonomyReader = new DirectoryTaxonomyReader(FSDirectory.open(new File(taxonomyDirectory))); String content = IOUtils.toString(new FileInputStream(jsonFileName)); JSONArray bookArray = new JSONArray(content); Field idField = new IntField("id", 0, Store.YES); Field titleField = new TextField("title", "", Store.YES); Field authorsField = new TextField("authors", "", Store.YES); Field bookCategoryField = new TextField("book_category", "", Store.YES); indexWriter.deleteAll(); FacetFields facetFields = new FacetFields(taxonomyWriter); for (int i = 0; i < bookArray.length(); i++) { Document document = new Document(); JSONObject book = bookArray.getJSONObject(i); int id = book.getInt("id"); String title = book.getString("title"); String bookCategory = book.getString("book_category"); List<CategoryPath> categoryPaths = new ArrayList<CategoryPath>(); String authorsString = ""; JSONArray authors = book.getJSONArray("authors"); for (int j = 0; j < authors.length(); j++) { String author = authors.getString(j); if (j > 0) { authorsString += ", "; } categoryPaths.add(new CategoryPath("author", author)); authorsString += author; } categoryPaths.add(new CategoryPath("book_category" + bookCategory, '/')); idField.setIntValue(id); titleField.setStringValue(title); authorsField.setStringValue(authorsString); bookCategoryField.setStringValue(bookCategory); facetFields.addFields(document, categoryPaths); document.add(idField); document.add(titleField); document.add(authorsField); document.add(bookCategoryField); indexWriter.addDocument(document); System.out.printf("Book: id=%d, title=%s, book_category=%s, authors=%s\n", id, title, bookCategory, authors); } taxonomyWriter.prepareCommit(); try { taxonomyWriter.commit(); } catch (Exception e) { taxonomyWriter.rollback(); } // taxonomyWriter.close(); // // indexWriter.commit(); // indexWriter.close(); String query = "story"; IndexReader indexReader = DirectoryReader.open(indexWriter, false); IndexReader indexReader2 = DirectoryReader.open(indexWriter, false); System.out.println(indexReader == indexReader2); IndexSearcher indexSearcher = new IndexSearcher(indexReader); TaxonomyReader newTaxonomyReader = DirectoryTaxonomyReader.openIfChanged(taxonomyReader); if (newTaxonomyReader != null) { TaxonomyReader tmp = taxonomyReader; taxonomyReader = newTaxonomyReader; tmp.close(); } else { System.out.println("null"); } ArrayList<FacetRequest> facetRequests = new ArrayList<FacetRequest>(); facetRequests.add(new CountFacetRequest(new CategoryPath("author"), 100)); facetRequests.add(new CountFacetRequest(new CategoryPath("book_category"), 100)); FacetSearchParams searchParams = new FacetSearchParams(facetRequests); ComplexPhraseQueryParser queryParser = new ComplexPhraseQueryParser(LUCENE_VERSION, "title", new StandardAnalyzer(LUCENE_VERSION)); Query luceneQuery = queryParser.parse(query); // Collectors to get top results and facets TopScoreDocCollector topScoreDocCollector = TopScoreDocCollector.create(10, true); FacetsCollector facetsCollector = FacetsCollector.create(searchParams, indexReader, taxonomyReader); indexSearcher.search(luceneQuery, MultiCollector.wrap(topScoreDocCollector, facetsCollector)); System.out.println("Found:"); for (ScoreDoc scoreDoc : topScoreDocCollector.topDocs().scoreDocs) { Document document = indexReader.document(scoreDoc.doc); System.out.printf("- book: id=%s, title=%s, book_category=%s, authors=%s, score=%f\n", document.get("id"), document.get("title"), document.get("book_category"), document.get("authors"), scoreDoc.score); } System.out.println("Facets:"); for (FacetResult facetResult : facetsCollector.getFacetResults()) { System.out.println("- " + facetResult.getFacetResultNode().label); for (FacetResultNode facetResultNode : facetResult.getFacetResultNode().subResults) { System.out.printf(" - %s (%f)\n", facetResultNode.label.toString(), facetResultNode.value); for (FacetResultNode subFacetResultNode : facetResultNode.subResults) { System.out.printf(" - %s (%f)\n", subFacetResultNode.label.toString(), subFacetResultNode.value); } } } taxonomyReader.close(); indexReader.close(); taxonomyWriter.commit(); taxonomyWriter.close(); indexWriter.commit(); indexWriter.close(); }
From source file:com.cloudera.hts.utils.math.MyFunc2.java
public static void main(String[] args) { double[] x = { -5.0, -4.0, -3.0, -2.0, 0, 2, 3, 4 }; double[] y = { 21, 13, 7, 3, 1, 7, 13, 21 }; FunctionFitTest fitter = new FunctionFitTest(); ArrayList<WeightedObservedPoint> points = new ArrayList<WeightedObservedPoint>(); // Add points here; for instance, int i = 0;/*from w w w. ja va2s .c o m*/ for (double xc : x) { if (i < 1) { WeightedObservedPoint point = new WeightedObservedPoint(xc, y[i], 1.0); points.add(point); System.out.println(xc + " " + y[i]); } i++; } final double coeffs[] = fitter.fit(points); System.out.println(Arrays.toString(coeffs)); }
From source file:edu.oregonstate.eecs.mcplan.domains.fuelworld.FuelWorldMDP.java
public static void main(final String[] argv) { final RandomGenerator rng = new MersenneTwister(42); final double discount = 0.99; final boolean choices = true; final FuelWorldState template; if (choices) { template = FuelWorldState.createDefaultWithChoices(rng); } else {//from www .j av a 2 s . c om template = FuelWorldState.createDefault(rng); } for (int i = 0; i < template.adjacency.size(); ++i) { System.out.print(i); System.out.print(" -> {"); final TIntList succ = template.adjacency.get(i); for (int j = 0; j < succ.size(); ++j) { System.out.print(" " + succ.get(j)); } System.out.println(" }"); } final FuelWorldMDP mdp = new FuelWorldMDP(template); final int Nfeatures = new PrimitiveFuelWorldRepresentation(template).phi().length; final SparseValueIterationSolver<FuelWorldState, FuelWorldAction> vi = new SparseValueIterationSolver<FuelWorldState, FuelWorldAction>( mdp, discount); vi.run(); final PrimitiveFuelWorldRepresenter repr = new PrimitiveFuelWorldRepresenter(); final ArrayList<Attribute> attr = new ArrayList<Attribute>(); attr.addAll(repr.attributes()); attr.add(WekaUtil.createNominalAttribute("__label__", mdp.A().cardinality())); final Instances instances = WekaUtil .createEmptyInstances("fuelworld" + (choices ? "_choices" : "") + "_pistar", attr); final Policy<FuelWorldState, FuelWorldAction> pistar = vi.pistar(); final Generator<FuelWorldState> g = mdp.S().generator(); while (g.hasNext()) { final FuelWorldState s = g.next(); if (s.location == s.goal) { continue; } pistar.setState(s, 0L); final FuelWorldAction astar = pistar.getAction(); System.out.println("" + s + " -> " + astar); final double[] phi = new double[Nfeatures + 1]; Fn.memcpy_as_double(phi, new PrimitiveFuelWorldRepresentation(s).phi(), Nfeatures); phi[Nfeatures] = mdp.A().index(astar); WekaUtil.addInstance(instances, new DenseInstance(1.0, phi)); } WekaUtil.writeDataset(new File("."), instances); final MeanVarianceAccumulator ret = new MeanVarianceAccumulator(); final MeanVarianceAccumulator steps = new MeanVarianceAccumulator(); final int Ngames = 100000; for (int i = 0; i < Ngames; ++i) { final FuelWorldState s0; if (choices) { s0 = FuelWorldState.createDefaultWithChoices(rng); } else { s0 = FuelWorldState.createDefault(rng); } final FuelWorldSimulator sim = new FuelWorldSimulator(s0); final Episode<FuelWorldState, FuelWorldAction> episode = new Episode<FuelWorldState, FuelWorldAction>( sim, JointPolicy.create(pistar)); final RewardAccumulator<FuelWorldState, FuelWorldAction> racc = new RewardAccumulator<FuelWorldState, FuelWorldAction>( sim.nagents(), discount); episode.addListener(racc); final long tstart = System.nanoTime(); episode.run(); final long tend = System.nanoTime(); final double elapsed_ms = (tend - tstart) * 1e-6; ret.add(racc.v()[0]); steps.add(racc.steps()); } System.out.println("****************************************"); System.out.println("Average return: " + ret.mean()); System.out.println("Return variance: " + ret.variance()); System.out.println("Confidence: " + ret.confidence()); System.out.println("Steps (mean): " + steps.mean()); System.out.println("Steps (var): " + steps.variance()); }
From source file:edu.oregonstate.eecs.mcplan.domains.voyager.Main.java
/** * @param args//from w w w.java 2 s . c o m */ public static void main(final String[] args) { System.out.println(args.toString()); final String batch_name = args[0]; final String[] instance_args = args[1].split(","); final String[] pi_args = args[2].split(","); final String[] phi_args = args[3].split(","); final File root_directory = createDirectory(args); final int Nplanets = Integer.parseInt(instance_args[0]); final int policy_epoch = Integer.parseInt(instance_args[1]); final int Nworlds = Integer.parseInt(instance_args[2]); final int max_time = Integer.parseInt(instance_args[3]); final int Nanytime = Integer.parseInt(instance_args[4]); // FIXME: This default_params thing is too error-prone! There's no // easy way to know whether you need to set a parameter in // 1) default_params // 2) an element of ps // 3) both places final VoyagerParameters default_params = new VoyagerParameters.Builder().Nplanets(Nplanets) .policy_epoch(policy_epoch).finish(); final Environment default_environment = new Environment.Builder().root_directory(root_directory) .rng(new MersenneTwister(default_params.master_seed)).finish(); final int[] anytime_times = new int[Nanytime]; anytime_times[Nanytime - 1] = max_time; for (int i = Nanytime - 2; i >= 0; --i) { anytime_times[i] = anytime_times[i + 1] / 2; } final List<VoyagerParameters> ps = new ArrayList<VoyagerParameters>(Nanytime); for (final int t : anytime_times) { ps.add(new VoyagerParameters.Builder().max_time(t).Nplanets(Nplanets).policy_epoch(policy_epoch) .finish()); } final List<VoyagerInstance> ws = new ArrayList<VoyagerInstance>(Nworlds); for (int i = 0; i < Nworlds; ++i) { // FIXME: Why default_params and not ps.get( i ) ? ws.add(new VoyagerInstance(default_params, default_environment.rng.nextInt())); } final MultipleInstanceMultipleWorldGenerator<VoyagerParameters, VoyagerInstance> experimental_setups = new MultipleInstanceMultipleWorldGenerator<VoyagerParameters, VoyagerInstance>( default_environment, ps, ws); final ArrayList<EpisodeListener<VoyagerState, VoyagerEvent>> extra_listeners = new ArrayList<EpisodeListener<VoyagerState, VoyagerEvent>>(); if (default_params.use_monitor) { // TODO: Actually create the viewport! extra_listeners.add(new VisualizationUpdater()); } final Experiment<VoyagerParameters, VoyagerInstance> experiment = new PolicyComparison<VoyagerState, VoyagerEvent, VoyagerParameters, VoyagerInstance>( createPolicy(pi_args), createPolicy(phi_args), extra_listeners); while (experimental_setups.hasNext()) { final ExperimentalSetup<VoyagerParameters, VoyagerInstance> setup = experimental_setups.next(); experiment.setup(setup.environment, setup.parameters, setup.world); experiment.run(); experiment.finish(); } System.exit(0); }