List of usage examples for java.util.concurrent ExecutorService execute
void execute(Runnable command);
From source file:com.netcore.hsmart.smsconsumers.SmsConsumer561Multipart.java
public static void main(String args[]) throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); for (int i = 1; i <= COUNTERS; i++) { Runnable worker = new SmsConsumer561MultipartRunnable(Integer.toString(i)); executor.execute(worker); }//from ww w . j a v a2 s . com }
From source file:fr.inria.edelweiss.kgdqp.core.CentralizedInferrencing.java
public static void main(String args[]) throws ParseException, EngineException, InterruptedException, IOException { List<String> endpoints = new ArrayList<String>(); String queryPath = null;/*from www. j a v a 2 s. c om*/ boolean rulesSelection = false; File rulesDir = null; File ontDir = null; ///////////////// Graph graph = Graph.create(); QueryProcess exec = QueryProcess.create(graph); Options options = new Options(); Option helpOpt = new Option("h", "help", false, "print this message"); // Option queryOpt = new Option("q", "query", true, "specify the sparql query file"); // Option endpointOpt = new Option("e", "endpoint", true, "a federated sparql endpoint URL"); Option versionOpt = new Option("v", "version", false, "print the version information and exit"); Option rulesOpt = new Option("r", "rulesDir", true, "directory containing the inference rules"); Option ontOpt = new Option("o", "ontologiesDir", true, "directory containing the ontologies for rules selection"); // Option locOpt = new Option("c", "centralized", false, "performs centralized inferences"); Option dataOpt = new Option("l", "load", true, "data file or directory to be loaded"); // Option selOpt = new Option("s", "rulesSelection", false, "if set to true, only the applicable rules are run"); // options.addOption(queryOpt); // options.addOption(endpointOpt); options.addOption(helpOpt); options.addOption(versionOpt); options.addOption(rulesOpt); options.addOption(ontOpt); // options.addOption(selOpt); // options.addOption(locOpt); options.addOption(dataOpt); String header = "Corese/KGRAM rule engine experiment command line interface"; String footer = "\nPlease report any issue to alban.gaignard@cnrs.fr, olivier.corby@inria.fr"; CommandLineParser parser = new BasicParser(); CommandLine cmd = parser.parse(options, args); if (cmd.hasOption("h")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("kgdqp", header, options, footer, true); System.exit(0); } if (cmd.hasOption("o")) { rulesSelection = true; String ontDirPath = cmd.getOptionValue("o"); ontDir = new File(ontDirPath); if (!ontDir.isDirectory()) { logger.warn(ontDirPath + " is not a valid directory path."); System.exit(0); } } if (!cmd.hasOption("r")) { logger.info("You must specify a path for inference rules directory !"); System.exit(0); } if (cmd.hasOption("l")) { String[] dataPaths = cmd.getOptionValues("l"); for (String path : dataPaths) { Load ld = Load.create(graph); ld.load(path); logger.info("Loaded " + path); } } if (cmd.hasOption("v")) { logger.info("version 3.0.4-SNAPSHOT"); System.exit(0); } String rulesDirPath = cmd.getOptionValue("r"); rulesDir = new File(rulesDirPath); if (!rulesDir.isDirectory()) { logger.warn(rulesDirPath + " is not a valid directory path."); System.exit(0); } // Local rules graph initialization Graph rulesG = Graph.create(); Load ld = Load.create(rulesG); if (rulesSelection) { // Ontology loading if (ontDir.isDirectory()) { for (File o : ontDir.listFiles()) { logger.info("Loading " + o.getAbsolutePath()); ld.load(o.getAbsolutePath()); } } } // Rules loading if (rulesDir.isDirectory()) { for (File r : rulesDir.listFiles()) { logger.info("Loading " + r.getAbsolutePath()); ld.load(r.getAbsolutePath()); } } // Rule engine initialization RuleEngine ruleEngine = RuleEngine.create(graph); ruleEngine.set(exec); ruleEngine.setOptimize(true); ruleEngine.setConstructResult(true); ruleEngine.setTrace(true); StopWatch sw = new StopWatch(); logger.info("Federated graph size : " + graph.size()); logger.info("Rules graph size : " + rulesG.size()); // Rule selection logger.info("Rules selection"); QueryProcess localKgram = QueryProcess.create(rulesG); ArrayList<String> applicableRules = new ArrayList<String>(); sw.start(); String rulesSelQuery = ""; if (rulesSelection) { rulesSelQuery = pertinentRulesQuery; } else { rulesSelQuery = allRulesQuery; } Mappings maps = localKgram.query(rulesSelQuery); logger.info("Rules selected in " + sw.getTime() + " ms"); logger.info("Applicable rules : " + maps.size()); // Selected rule loading for (Mapping map : maps) { IDatatype dt = (IDatatype) map.getValue("?res"); String rule = dt.getLabel(); //loading rule in the rule engine // logger.info("Adding rule : "); // System.out.println("-------"); // System.out.println(rule); // System.out.println(""); // if (! rule.toLowerCase().contains("sameas")) { applicableRules.add(rule); ruleEngine.addRule(rule); // } } // Rules application on distributed sparql endpoints logger.info("Rules application (" + applicableRules.size() + " rules)"); ExecutorService threadPool = Executors.newCachedThreadPool(); RuleEngineThread ruleThread = new RuleEngineThread(ruleEngine); sw.reset(); sw.start(); // ruleEngine.process(); threadPool.execute(ruleThread); threadPool.shutdown(); //monitoring loop while (!threadPool.isTerminated()) { // System.out.println("******************************"); // System.out.println(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter, QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter)); // System.out.println("Rule engine running for " + sw.getTime() + " ms"); // System.out.println("Federated graph size : " + graph.size()); System.out.println(sw.getTime() + " , " + graph.size()); Thread.sleep(5000); } logger.info("Federated graph size : " + graph.size()); // logger.info(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter, QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter)); // TripleFormat f = TripleFormat.create(graph, true); // f.write("/tmp/gAll.ttl"); }
From source file:com.netcore.hsmart.dlrconsumers.DlrConsumer562.java
public static void main(String args[]) throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); for (int i = 1; i <= COUNTERS; i++) { Runnable worker = new DlrConsumer562Runnable(Integer.toString(i)); executor.execute(worker); }/*w ww . jav a 2s . co m*/ }
From source file:com.netcore.hsmart.dlrconsumers.DlrConsumer563.java
public static void main(String args[]) throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); for (int i = 1; i <= COUNTERS; i++) { Runnable worker = new DlrConsumer563Runnable(Integer.toString(i)); executor.execute(worker); }//from w ww .ja v a 2 s. c o m }
From source file:com.netcore.hsmart.dlrconsumers.DlrConsumer564.java
public static void main(String args[]) throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); for (int i = 1; i <= COUNTERS; i++) { Runnable worker = new DlrConsumer564Runnable(Integer.toString(i)); executor.execute(worker); }//w ww .j a v a 2 s.co m }
From source file:com.netcore.hsmart.dlrconsumers.DlrConsumer565.java
public static void main(String args[]) throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); for (int i = 1; i <= COUNTERS; i++) { Runnable worker = new DlrConsumer565Runnable(Integer.toString(i)); executor.execute(worker); }/*from w w w. j av a2s.c o m*/ }
From source file:fr.inria.edelweiss.kgdqp.core.CentralizedInferrencingNoSpin.java
public static void main(String args[]) throws ParseException, EngineException, InterruptedException, IOException, LoadException { List<String> endpoints = new ArrayList<String>(); String queryPath = null;/*w ww .java 2s. c o m*/ boolean rulesSelection = false; File rulesDir = null; File ontDir = null; ///////////////// Graph graph = Graph.create(); QueryProcess exec = QueryProcess.create(graph); Options options = new Options(); Option helpOpt = new Option("h", "help", false, "print this message"); // Option queryOpt = new Option("q", "query", true, "specify the sparql query file"); // Option endpointOpt = new Option("e", "endpoint", true, "a federated sparql endpoint URL"); Option versionOpt = new Option("v", "version", false, "print the version information and exit"); Option rulesOpt = new Option("r", "rulesDir", true, "directory containing the inference rules"); Option ontOpt = new Option("o", "ontologiesDir", true, "directory containing the ontologies for rules selection"); // Option locOpt = new Option("c", "centralized", false, "performs centralized inferences"); Option dataOpt = new Option("l", "load", true, "data file or directory to be loaded"); // Option selOpt = new Option("s", "rulesSelection", false, "if set to true, only the applicable rules are run"); // options.addOption(queryOpt); // options.addOption(endpointOpt); options.addOption(helpOpt); options.addOption(versionOpt); options.addOption(rulesOpt); options.addOption(ontOpt); // options.addOption(selOpt); // options.addOption(locOpt); options.addOption(dataOpt); String header = "Corese/KGRAM rule engine experiment command line interface"; String footer = "\nPlease report any issue to alban.gaignard@cnrs.fr, olivier.corby@inria.fr"; CommandLineParser parser = new BasicParser(); CommandLine cmd = parser.parse(options, args); if (cmd.hasOption("h")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("kgdqp", header, options, footer, true); System.exit(0); } if (cmd.hasOption("o")) { rulesSelection = true; String ontDirPath = cmd.getOptionValue("o"); ontDir = new File(ontDirPath); if (!ontDir.isDirectory()) { logger.warn(ontDirPath + " is not a valid directory path."); System.exit(0); } } if (!cmd.hasOption("r")) { logger.info("You must specify a path for inference rules directory !"); System.exit(0); } if (cmd.hasOption("l")) { String[] dataPaths = cmd.getOptionValues("l"); for (String path : dataPaths) { Load ld = Load.create(graph); ld.load(path); logger.info("Loaded " + path); } } if (cmd.hasOption("v")) { logger.info("version 3.0.4-SNAPSHOT"); System.exit(0); } String rulesDirPath = cmd.getOptionValue("r"); rulesDir = new File(rulesDirPath); if (!rulesDir.isDirectory()) { logger.warn(rulesDirPath + " is not a valid directory path."); System.exit(0); } // Local rules graph initialization Graph rulesG = Graph.create(); Load ld = Load.create(rulesG); if (rulesSelection) { // Ontology loading if (ontDir.isDirectory()) { for (File o : ontDir.listFiles()) { logger.info("Loading " + o.getAbsolutePath()); ld.load(o.getAbsolutePath()); } } } // Rules loading if (rulesDir.isDirectory()) { for (File r : rulesDir.listFiles()) { if (r.getAbsolutePath().endsWith(".rq")) { logger.info("Loading " + r.getAbsolutePath()); // ld.load(r.getAbsolutePath()); // byte[] encoded = Files.readAllBytes(Paths.get(r.getAbsolutePath())); // String construct = new String(encoded, "UTF-8"); //StandardCharsets.UTF_8); FileInputStream f = new FileInputStream(r); QueryLoad ql = QueryLoad.create(); String construct = ql.read(f); f.close(); SPINProcess sp = SPINProcess.create(); String spinConstruct = sp.toSpin(construct); ld.load(new ByteArrayInputStream(spinConstruct.getBytes()), Load.TURTLE_FORMAT); logger.info("Rules graph size : " + rulesG.size()); } } } // Rule engine initialization RuleEngine ruleEngine = RuleEngine.create(graph); ruleEngine.set(exec); ruleEngine.setOptimize(true); ruleEngine.setConstructResult(true); ruleEngine.setTrace(true); StopWatch sw = new StopWatch(); logger.info("Federated graph size : " + graph.size()); logger.info("Rules graph size : " + rulesG.size()); // Rule selection logger.info("Rules selection"); QueryProcess localKgram = QueryProcess.create(rulesG); ArrayList<String> applicableRules = new ArrayList<String>(); sw.start(); String rulesSelQuery = ""; if (rulesSelection) { rulesSelQuery = pertinentRulesQuery; } else { rulesSelQuery = allRulesQuery; } Mappings maps = localKgram.query(rulesSelQuery); logger.info("Rules selected in " + sw.getTime() + " ms"); logger.info("Applicable rules : " + maps.size()); // Selected rule loading for (Mapping map : maps) { IDatatype dt = (IDatatype) map.getValue("?res"); String rule = dt.getLabel(); //loading rule in the rule engine // logger.info("Adding rule : "); // System.out.println("-------"); // System.out.println(rule); // System.out.println(""); // if (! rule.toLowerCase().contains("sameas")) { applicableRules.add(rule); ruleEngine.addRule(rule); // } } // Rules application on distributed sparql endpoints logger.info("Rules application (" + applicableRules.size() + " rules)"); ExecutorService threadPool = Executors.newCachedThreadPool(); RuleEngineThread ruleThread = new RuleEngineThread(ruleEngine); sw.reset(); sw.start(); // ruleEngine.process(); threadPool.execute(ruleThread); threadPool.shutdown(); //monitoring loop while (!threadPool.isTerminated()) { // System.out.println("******************************"); // System.out.println(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter, QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter)); // System.out.println("Rule engine running for " + sw.getTime() + " ms"); // System.out.println("Federated graph size : " + graph.size()); System.out.println(sw.getTime() + " , " + graph.size()); Thread.sleep(5000); } logger.info("Federated graph size : " + graph.size()); // logger.info(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter, QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter)); // TripleFormat f = TripleFormat.create(graph, true); // f.write("/tmp/gAll.ttl"); }
From source file:com.netcore.hsmart.dlrconsumers.DlrConsumer561.java
public static void main(String args[]) throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); for (int i = 1; i <= COUNTERS; i++) { Runnable worker = new DlrConsumer561Runnable(Integer.toString(i)); executor.execute(worker); }//w ww .j av a2s. c o m }
From source file:org.timconrad.vmstats.Main.java
public static void main(String[] args) { Logger logger = LoggerFactory.getLogger(Main.class); Properties config = new Properties(); Boolean showPerfMgr = false;//from ww w.j a va 2 s . co m Boolean showEstimate = false; Boolean noThreads = false; Boolean noGraphite = false; File configFile = new File("vmstats.properties"); Hashtable<String, String> appConfig = new Hashtable<String, String>(); CommandLineParser parser = new PosixParser(); Options options = new Options(); options.addOption("P", "perfMgr", false, "Display Performance Manager Counters and exit"); options.addOption("E", "estimate", false, "Estimate the # of counters written to graphite and exit"); options.addOption("N", "noThreads", false, "Don't start any threads, just run the main part (helpful for troubleshooting initial issues"); options.addOption("g", "noGraphite", false, "Don't send anything to graphite"); options.addOption("c", "configFile", true, "Configuration file for vmstats - defaults to 'vmstats.properties' in the .jar directory"); options.addOption("O", "runOnce", false, "Run the stats gatherer one time - useful for debugging"); options.addOption("D", "debugOutput", false, "Dump the output to a thread-named file."); options.addOption("h", "help", false, "show help"); try { CommandLine line = parser.parse(options, args); if (line.hasOption("help")) { System.out.println("vmstats.jar -Dlog4j.configuration=file:/path/to/log4j.properties [options]"); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("vmstats.jar", options); System.exit(0); } if (line.hasOption("perfMgr")) { showPerfMgr = true; } if (line.hasOption("estimate")) { showEstimate = true; } if (line.hasOption("noThreads")) { noThreads = true; } if (line.hasOption("noGraphite")) { noGraphite = true; } if (line.hasOption("runOnce")) { appConfig.put("runOnce", "true"); } else { appConfig.put("runOnce", "false"); } if (line.hasOption("debugOutput")) { appConfig.put("debugOutput", "true"); } else { appConfig.put("debugOutput", "false"); } if (line.hasOption("configFile")) { // if the user adds a custom config flag, use it. Otherwise they'll get the default. String file = line.getOptionValue("configFile"); File optFile = new File(file); boolean exists = optFile.exists(); // check to make sure the file exists. if (!exists) { System.out.println("The configuration file doesn't seem to exist in path: " + file); System.exit(0); } else { configFile = optFile; } } } catch (org.apache.commons.cli.ParseException e) { System.out.println("CLI options exception: " + e.getMessage()); e.printStackTrace(); } try { config.load(new FileInputStream(configFile)); } catch (FileNotFoundException e) { logger.info("Configuration file not found!\n\tException: " + e); System.exit(-1); } catch (IOException e) { logger.info("Configuration file not found!\n\tException: " + e); System.exit(-1); } Enumeration configOpts = config.propertyNames(); // this will have to be manually updated. String[] expectedOptions = { "VCS_TAG", "VCS_USER", "GRAPHITE_PORT", "GRAPHITE_TAG", "VCS_HOST", "VCS_PASS", "MAX_VMSTAT_THREADS", "GRAPHITE_HOST", "ESX_STATS", "USE_FQDN", "SLEEP_TIME", "SEND_ALL_ABSOLUTE", "SEND_ALL_DELTA", "SEND_ALL_PERIODS" }; ArrayList<String> matchedOptions = new ArrayList<String>(); while (configOpts.hasMoreElements()) { String optTmp = (String) configOpts.nextElement(); for (int i = 0; i < expectedOptions.length; i++) { if (optTmp.equals(expectedOptions[i])) { matchedOptions.add(optTmp); } } } if (expectedOptions.length != matchedOptions.size()) { // this kinda blows, but better than throwing a null pointer exception // or doing try/catch for each possible option below. System.out.println("Configuration file options are missing"); System.exit(-1); } // Get settings from config file String vcsHostRaw = config.getProperty("VCS_HOST"); String vcsUser = config.getProperty("VCS_USER"); String vcsPass = config.getProperty("VCS_PASS"); String vcsTag = config.getProperty("VCS_TAG"); String vcsFilter = config.getProperty("FILTERFILE"); if (vcsFilter != null) { String filterfile = vcsFilter; File FilterFile = new File(filterfile); List<String> FilterList = null; try { FilterList = FileUtils.readLines(FilterFile); } catch (IOException e) { e.printStackTrace(); } FilterArrays = FilterList.toArray(new String[] {}); } appConfig.put("vcsTag", vcsTag); // vcs information // this needs to be https://host/sdk String vcsHost = "https://" + vcsHostRaw + "/sdk"; String graphEsx = config.getProperty("ESX_STATS"); appConfig.put("USE_FQDN", config.getProperty("USE_FQDN")); appConfig.put("graphEsx", graphEsx); // graphite information String graphiteHost = config.getProperty("GRAPHITE_HOST"); int graphitePort = Integer.parseInt(config.getProperty("GRAPHITE_PORT")); String graphiteTag = config.getProperty("GRAPHITE_TAG"); try { appConfig.put("graphiteTag", graphiteTag); } catch (NullPointerException e) { System.out.println("Issue with configuration file - Missing GRAPHITE_TAG"); System.exit(-1); } // TODO: make this dynamic. maybe. int MAX_VMSTAT_THREADS = Integer.parseInt(config.getProperty("MAX_VMSTAT_THREADS")); int MAX_ESXSTAT_THREADS = Integer.parseInt(config.getProperty("MAX_ESXSTAT_THREADS")); int MAX_GRAPHITE_THREADS = Integer.parseInt(config.getProperty("MAX_GRAPHITE_THREADS")); appConfig.put("MAX_VMSTAT_THREADS", String.valueOf(MAX_VMSTAT_THREADS)); appConfig.put("MAX_ESXSTAT_THREADS", String.valueOf(MAX_ESXSTAT_THREADS)); String SLEEP_TIME = config.getProperty("SLEEP_TIME"); appConfig.put("SLEEP_TIME", SLEEP_TIME); String SEND_ALL_PERIODS = config.getProperty("SEND_ALL_PERIODS"); appConfig.put("SEND_ALL_PERIODS", SEND_ALL_PERIODS); int sleep_time = Integer.parseInt(SLEEP_TIME); if ((sleep_time % 20) == 0) { int periods = sleep_time / 20; appConfig.put("PERIODS", String.valueOf(periods)); } else { System.out.println("SLEEP_TIME needs to be divisible by 20, please fix"); System.exit(-1); } appConfig.put("SEND_ALL_ABSOLUTE", config.getProperty("SEND_ALL_ABSOLUTE")); appConfig.put("SEND_ALL_DELTA", config.getProperty("SEND_ALL_DELTA")); // Build internal data structures. // use a hashtable to store performance id information Hashtable<String, Hashtable<String, String>> perfKeys = new Hashtable<String, Hashtable<String, String>>(); // BlockingQueue to store managed objects - basically anything that vmware knows about BlockingQueue<Object> vm_mob_queue = new ArrayBlockingQueue<Object>(10000); BlockingQueue<Object> esx_mob_queue = new ArrayBlockingQueue<Object>(10000); // BlockingQueue to store arrays of stats - each managed object generates a bunch of strings that are stored in BlockingQueue<Object> sender = new ArrayBlockingQueue<Object>(60000); // Initialize these vmware types as nulls so we can see if things work properly ServiceInstance si = null; PerformanceManager perfMgr = null; try { // TODO: this doesn't handle some ASCII characters well, not sure why. si = new ServiceInstance(new URL(vcsHost), vcsUser, vcsPass, true); } catch (InvalidLogin e) { logger.info("Invalid login vCenter: " + vcsHost + " User: " + vcsUser); System.exit(-1); } catch (RemoteException e) { logger.info("Remote exception: " + e); e.printStackTrace(); } catch (MalformedURLException e) { logger.info("MalformedURLexception: " + e); e.printStackTrace(); } if (si != null) { perfMgr = si.getPerformanceManager(); PerfCounterInfo[] counters = perfMgr.getPerfCounter(); // build a hash lookup to turn the counter 23 into 'disk.this.that.the.other' // These are not sequential. for (int i = 0; i < counters.length; i++) { // create a temp hash to push onto the big hash Hashtable<String, String> temp_hash = new Hashtable<String, String>(); String path = counters[i].getGroupInfo().getKey() + "." + counters[i].getNameInfo().getKey(); // this is a key like cpu.run.0.summation temp_hash.put("key", path); // one of average, latest, maximum, minimum, none, summation temp_hash.put("rollup", counters[i].getRollupType().toString()); // one of absolute, delta, rate temp_hash.put("statstype", counters[i].getStatsType().toString()); // it's important to understand that the counters aren't sequential, so they have their own id. perfKeys.put("" + counters[i].getKey(), temp_hash); } } else { logger.info("Issues with the service instance that wasn't properly handled"); System.exit(-1); } if (showPerfMgr) { // show the performance keys that are available to the user System.out.println("Showing Performance Counter Entities available:"); System.out.println("Read the following link for more information:"); System.out.println( "http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vim.PerformanceManager.html"); Enumeration<String> keys = perfKeys.keys(); System.out.println("ID|Tag|Rollup"); while (keys.hasMoreElements()) { String key = (String) keys.nextElement(); System.out .println(key + "|" + perfKeys.get(key).get("key") + "|" + perfKeys.get(key).get("rollup")); } System.exit(0); } if (showEstimate) { // estimate the number of keys that will be updated/written to Graphite per minute System.out.println("Currently Disabled"); } // this gets the lists of vm's from vCenter if (!noThreads) { if (si != null && perfMgr != null) { logger.info("ServiceInstance: " + si); logger.info("PerformanceManager: " + perfMgr); meGrabber me_grabber = new meGrabber(si, vm_mob_queue, esx_mob_queue, appConfig, sender); ExecutorService grab_exe = Executors.newCachedThreadPool(); grab_exe.execute(me_grabber); // it's easier sometimes to debug things without stats being sent to graphite. make noGraphite = true; to // change this. if (!noGraphite) { for (int i = 1; i <= MAX_GRAPHITE_THREADS; i++) { GraphiteWriter graphite = new GraphiteWriter(graphiteHost, graphitePort, sender, appConfig); ExecutorService graph_exe = Executors.newCachedThreadPool(); graph_exe.execute(graphite); } } else { System.out.println("Graphite output has been disabled via the -g flag."); } for (int i = 1; i <= MAX_VMSTAT_THREADS; i++) { statsGrabber vm_stats_grabber = new statsGrabber(perfMgr, perfKeys, vm_mob_queue, sender, appConfig, "vm", FilterArrays); ExecutorService vm_stat_exe = Executors.newCachedThreadPool(); vm_stat_exe.execute(vm_stats_grabber); } if (graphEsx.contains("true")) { for (int i = 1; i <= MAX_ESXSTAT_THREADS; i++) { statsGrabber esx_stats_grabber = new statsGrabber(perfMgr, perfKeys, esx_mob_queue, sender, appConfig, "ESX", FilterArrays); ExecutorService esx_stat_exe = Executors.newCachedThreadPool(); esx_stat_exe.execute(esx_stats_grabber); } } } else { logger.info("Either ServiceInstance or PerformanceManager is null, bailing."); } } else { System.out.println("Not running any of the main threads"); System.exit(0); } }
From source file:fr.inria.edelweiss.kgdqp.core.FedInferrencingCLI.java
public static void main(String args[]) throws ParseException, EngineException, InterruptedException { List<String> endpoints = new ArrayList<String>(); String queryPath = null;//from w ww . java2 s .c om boolean rulesSelection = false; File rulesDir = null; File ontDir = null; Options options = new Options(); Option helpOpt = new Option("h", "help", false, "print this message"); Option queryOpt = new Option("q", "query", true, "specify the sparql query file"); Option endpointOpt = new Option("e", "endpoint", true, "a federated sparql endpoint URL"); Option versionOpt = new Option("v", "version", false, "print the version information and exit"); Option rulesOpt = new Option("r", "rulesDir", true, "directory containing the inference rules"); Option ontOpt = new Option("o", "ontologiesDir", true, "directory containing the ontologies for rules selection"); // Option selOpt = new Option("s", "rulesSelection", false, "if set to true, only the applicable rules are run"); options.addOption(queryOpt); options.addOption(endpointOpt); options.addOption(helpOpt); options.addOption(versionOpt); options.addOption(rulesOpt); options.addOption(ontOpt); // options.addOption(selOpt); String header = "Corese/KGRAM distributed rule engine command line interface"; String footer = "\nPlease report any issue to alban.gaignard@cnrs.fr, olivier.corby@inria.fr"; CommandLineParser parser = new BasicParser(); CommandLine cmd = parser.parse(options, args); if (cmd.hasOption("h")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("kgdqp", header, options, footer, true); System.exit(0); } if (!cmd.hasOption("e")) { logger.info("You must specify at least the URL of one sparql endpoint !"); System.exit(0); } else { endpoints = new ArrayList<String>(Arrays.asList(cmd.getOptionValues("e"))); } if (cmd.hasOption("o")) { rulesSelection = true; String ontDirPath = cmd.getOptionValue("o"); ontDir = new File(ontDirPath); if (!ontDir.isDirectory()) { logger.warn(ontDirPath + " is not a valid directory path."); System.exit(0); } } if (!cmd.hasOption("r")) { logger.info("You must specify a path for inference rules directory !"); System.exit(0); } else if (rulesSelection) { } if (cmd.hasOption("v")) { logger.info("version 3.0.4-SNAPSHOT"); System.exit(0); } String rulesDirPath = cmd.getOptionValue("r"); rulesDir = new File(rulesDirPath); if (!rulesDir.isDirectory()) { logger.warn(rulesDirPath + " is not a valid directory path."); System.exit(0); } ///////////////// Graph graph = Graph.create(); QueryProcessDQP execDQP = QueryProcessDQP.create(graph); for (String url : endpoints) { try { execDQP.addRemote(new URL(url), WSImplem.REST); } catch (MalformedURLException ex) { logger.error(url + " is not a well-formed URL"); System.exit(1); } } // Local rules graph initialization Graph rulesG = Graph.create(); Load ld = Load.create(rulesG); if (rulesSelection) { // Ontology loading if (ontDir.isDirectory()) { for (File o : ontDir.listFiles()) { logger.info("Loading " + o.getAbsolutePath()); ld.load(o.getAbsolutePath()); } } } // Rules loading if (rulesDir.isDirectory()) { for (File r : rulesDir.listFiles()) { logger.info("Loading " + r.getAbsolutePath()); ld.load(r.getAbsolutePath()); } } // Rule engine initialization RuleEngine ruleEngine = RuleEngine.create(graph); ruleEngine.set(execDQP); StopWatch sw = new StopWatch(); logger.info("Federated graph size : " + graph.size()); logger.info("Rules graph size : " + rulesG.size()); // Rule selection logger.info("Rules selection"); QueryProcess localKgram = QueryProcess.create(rulesG); ArrayList<String> applicableRules = new ArrayList<String>(); sw.start(); String rulesSelQuery = ""; if (rulesSelection) { rulesSelQuery = pertinentRulesQuery; } else { rulesSelQuery = allRulesQuery; } Mappings maps = localKgram.query(rulesSelQuery); logger.info("Rules selected in " + sw.getTime() + " ms"); logger.info("Applicable rules : " + maps.size()); // Selected rule loading for (Mapping map : maps) { IDatatype dt = (IDatatype) map.getValue("?res"); String rule = dt.getLabel(); //loading rule in the rule engine // logger.info("Adding rule : " + rule); applicableRules.add(rule); ruleEngine.addRule(rule); } // Rules application on distributed sparql endpoints logger.info("Rules application (" + applicableRules.size() + " rules)"); ExecutorService threadPool = Executors.newCachedThreadPool(); RuleEngineThread ruleThread = new RuleEngineThread(ruleEngine); sw.reset(); sw.start(); // ruleEngine.process(); threadPool.execute(ruleThread); threadPool.shutdown(); //monitoring loop while (!threadPool.isTerminated()) { System.out.println("******************************"); System.out.println(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter, QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter)); System.out.println("Rule engine running for " + sw.getTime() + " ms"); System.out.println("Federated graph size : " + graph.size()); Thread.sleep(10000); } logger.info("Federated graph size : " + graph.size()); logger.info(Util.jsonDqpCost(QueryProcessDQP.queryCounter, QueryProcessDQP.queryVolumeCounter, QueryProcessDQP.sourceCounter, QueryProcessDQP.sourceVolumeCounter)); ///////////// Query file processing // StringBuffer fileData = new StringBuffer(1000); // BufferedReader reader = null; // try { // reader = new BufferedReader(new FileReader(queryPath)); // } catch (FileNotFoundException ex) { // logger.error("Query file "+queryPath+" not found !"); // System.exit(1); // } // char[] buf = new char[1024]; // int numRead = 0; // try { // while ((numRead = reader.read(buf)) != -1) { // String readData = String.valueOf(buf, 0, numRead); // fileData.append(readData); // buf = new char[1024]; // } // reader.close(); // } catch (IOException ex) { // logger.error("Error while reading query file "+queryPath); // System.exit(1); // } // // String sparqlQuery = fileData.toString(); // // Query q = exec.compile(sparqlQuery,null); // System.out.println(q); // // StopWatch sw = new StopWatch(); // sw.start(); // Mappings map = exec.query(sparqlQuery); // int dqpSize = map.size(); // System.out.println("--------"); // long time = sw.getTime(); // System.out.println(time + " " + dqpSize); }