List of usage examples for java.util Map get
V get(Object key);
From source file:net.itransformers.idiscover.v2.core.Main.java
public static void main(String[] args) throws MalformedURLException { logger.debug("iDiscover v2. gearing up"); Map<String, String> params = CmdLineParser.parseCmdLine(args); // String connectionDetailsFileName = params.get("-f"); // if (connectionDetailsFileName == null) { // printUsage("fileName"); return; // }/*from ww w . j ava 2 s . com*/ String depthCmdArg = params.get("-d"); // if (depthCmdArg == null) { // printUsage("depth"); return; // } String projectPath = params.get("-p"); if (projectPath == null) { File cwd = new File("."); System.out.println("Project path is not specified. Will use current dir: " + cwd.getAbsolutePath()); projectPath = cwd.getAbsolutePath(); } File workingDir = new File(projectPath); if (!workingDir.exists()) { System.out.println("Invalid project path!"); return; } System.out.println("Loading beans!!"); File conDetails = new File(projectPath, "iDiscover/conf/txt/connection-details.txt"); File generic = new File(projectPath, "iDiscover/conf/xml/generic.xml"); String genericContextPath = generic.toURI().toURL().toString(); File snmpDiscovery = new File(projectPath, "iDiscover/conf/xml/snmpNetworkDiscovery.xml"); String snmpDiscoveryContextPath = snmpDiscovery.toURI().toURL().toString(); File connectionsDetails = new File(projectPath, "iDiscover/conf/xml/connectionsDetails.xml"); String connectionsDetailsContextPath = connectionsDetails.toURI().toURL().toString(); DefaultListableBeanFactory beanFactory = new DefaultListableBeanFactory(); BeanDefinition beanDefinition = BeanDefinitionBuilder.rootBeanDefinition(String.class) .addConstructorArgValue(projectPath).getBeanDefinition(); String labelDirName = autolabel(projectPath); BeanDefinition beanDefinition2 = BeanDefinitionBuilder.rootBeanDefinition(String.class) .addConstructorArgValue(labelDirName).getBeanDefinition(); beanFactory.registerBeanDefinition("projectPath", beanDefinition); beanFactory.registerBeanDefinition("labelDirName", beanDefinition2); GenericApplicationContext cmdArgCxt = new GenericApplicationContext(beanFactory); // Must call refresh to initialize context cmdArgCxt.refresh(); String[] paths = new String[] { genericContextPath, snmpDiscoveryContextPath, connectionsDetailsContextPath }; // ,project.getAbsolutePath()+project.getAbsolutePath()+File.separator+"iDiscover/conf/xml/snmpNetworkDiscovery.xml", project.getAbsolutePath()+File.separator+"iDiscover/src/main/resources/connectionsDetails.xml" FileSystemXmlApplicationContext applicationContext = new FileSystemXmlApplicationContext(paths, cmdArgCxt); // ClassPathXmlApplicationContext applicationContext = new ClassPathXmlApplicationContext(workingDir+File.separator+"iDiscover/conf/xml/generic.xml",workingDir+File.separator+"/iDiscover/conf/xml/snmpNetworkDiscovery.xml","connectionsDetails.xml"); // NetworkDiscoverer discoverer = fileApplicationContext.getBean("bgpPeeringMapDiscovery", NetworkDiscoverer.class); //NetworkDiscoverer discoverer = fileApplicationContext.getBean("floodLightNodeDiscoverer", NetworkDiscoverer.class); NetworkDiscoverer discoverer = applicationContext.getBean("snmpDiscovery", NetworkDiscoverer.class); LinkedHashMap<String, ConnectionDetails> connectionList = (LinkedHashMap) applicationContext .getBean("connectionList", conDetails); int depth = (Integer) applicationContext.getBean("discoveryDepth", depthCmdArg == null ? "-1" : depthCmdArg); NetworkDiscoveryResult result = discoverer .discoverNetwork(new ArrayList<ConnectionDetails>(connectionList.values()), depth); if (result != null) { for (String s : result.getNodes().keySet()) { System.out.println("\nNode: " + s); } } // }
From source file:com.act.analysis.similarity.SubstructureSearch.java
public static void main(String[] args) throws Exception { Options opts = new Options(); for (Option.Builder b : OPTION_BUILDERS) { opts.addOption(b.build());/*w w w. j a va2s . c om*/ } CommandLine cl = null; try { CommandLineParser parser = new DefaultParser(); cl = parser.parse(opts, args); } catch (ParseException e) { System.err.format("Argument parsing failed: %s\n", e.getMessage()); HELP_FORMATTER.printHelp(SubstructureSearch.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } if (cl.hasOption("help")) { HELP_FORMATTER.printHelp(SubstructureSearch.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); return; } if (cl.hasOption(OPTION_LICENSE_FILE)) { LicenseManager.setLicenseFile(cl.getOptionValue(OPTION_LICENSE_FILE)); } List<String> searchOpts = Collections.emptyList(); if (cl.hasOption(OPTION_SEARCH_OPTIONS)) { searchOpts = Arrays.asList(cl.getOptionValues(OPTION_SEARCH_OPTIONS)); } // Make sure we can initialize correctly before opening any file handles for writing. SubstructureSearch matcher = new SubstructureSearch(); try { matcher.init(cl.getOptionValue(OPTION_QUERY), searchOpts); } catch (IllegalArgumentException e) { System.err.format("Unable to initialize substructure search. %s\n", e.getMessage()); HELP_FORMATTER.printHelp(SubstructureSearch.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } catch (MolFormatException e) { System.err.format("Invalid SMILES structure query. %s\n", e.getMessage()); HELP_FORMATTER.printHelp(SubstructureSearch.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } Pair<List<String>, Iterator<Map<String, String>>> iterPair = null; if (cl.hasOption(OPTION_INPUT_FILE)) { File inFile = new File(cl.getOptionValue(OPTION_INPUT_FILE)); if (!inFile.exists()) { System.err.format("File at %s does not exist", inFile.getAbsolutePath()); HELP_FORMATTER.printHelp(SubstructureSearch.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } iterPair = iterateOverTSV(inFile); } else if (cl.hasOption(OPTION_INPUT_DB)) { iterPair = iterateOverDB(cl.getOptionValue(OPTION_INPUT_DB_HOST, DEFAULT_HOST), Integer.parseInt(cl.getOptionValue(OPTION_INPUT_DB_HOST, DEFAULT_PORT)), cl.getOptionValue(OPTION_INPUT_DB)); } else { System.err.format("Must specify either input TSV file or input DB from which to read.\n"); HELP_FORMATTER.printHelp(SubstructureSearch.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } TSVWriter<String, String> writer = new TSVWriter<>(iterPair.getLeft()); writer.open(new File(cl.getOptionValue(OPTION_OUTPUT_FILE))); LOGGER.info("Seaching for substructure '%s'", cl.getOptionValue(OPTION_QUERY)); try { int rowNum = 0; while (iterPair.getRight().hasNext()) { Map<String, String> row = iterPair.getRight().next(); rowNum++; try { String inchi = row.get(FIELD_INCHI); Molecule target = null; try { target = MolImporter.importMol(inchi); } catch (Exception e) { LOGGER.warn("Skipping molecule %d due to exception: %s\n", rowNum, e.getMessage()); continue; } if (matcher.matchSubstructure(target)) { writer.append(row); writer.flush(); } else { // Don't output if not a match. LOGGER.debug("Found non-matching molecule: %s", inchi); } } catch (SearchException e) { LOGGER.error("Exception on input line %d: %s\n", rowNum, e.getMessage()); throw e; } } } finally { writer.close(); } LOGGER.info("Done with substructure search"); }
From source file:edu.illinois.cs.cogcomp.datalessclassification.ta.ESADatalessAnnotator.java
/** * @param args config: config file path testFile: Test File */// w w w . ja v a 2 s . c o m public static void main(String[] args) { CommandLine cmd = getCMDOpts(args); ResourceManager rm; try { String configFile = cmd.getOptionValue("config", "config/project.properties"); ResourceManager nonDefaultRm = new ResourceManager(configFile); rm = new ESADatalessConfigurator().getConfig(nonDefaultRm); } catch (IOException e) { rm = new ESADatalessConfigurator().getDefaultConfig(); } String testFile = cmd.getOptionValue("testFile", "data/graphicsTestDocument.txt"); StringBuilder sb = new StringBuilder(); String line; try (BufferedReader br = new BufferedReader(new FileReader(new File(testFile)))) { while ((line = br.readLine()) != null) { sb.append(line); sb.append(" "); } String text = sb.toString().trim(); TokenizerTextAnnotationBuilder taBuilder = new TokenizerTextAnnotationBuilder(new StatefulTokenizer()); TextAnnotation ta = taBuilder.createTextAnnotation(text); ESADatalessAnnotator datalessAnnotator = new ESADatalessAnnotator(rm); datalessAnnotator.addView(ta); List<Constituent> annots = ta.getView(ViewNames.DATALESS_ESA).getConstituents(); System.out.println("Predicted LabelIDs:"); for (Constituent annot : annots) { System.out.println(annot.getLabel()); } Map<String, String> labelNameMap = DatalessAnnotatorUtils .getLabelNameMap(rm.getString(DatalessConfigurator.LabelName_Path.key)); System.out.println("Predicted Labels:"); for (Constituent annot : annots) { System.out.println(labelNameMap.get(annot.getLabel())); } } catch (FileNotFoundException e) { e.printStackTrace(); logger.error("Test File not found at " + testFile + " ... exiting"); System.exit(-1); } catch (IOException e) { e.printStackTrace(); logger.error("IO Error while reading the test file ... exiting"); System.exit(-1); } catch (AnnotatorException e) { e.printStackTrace(); logger.error("Error Annotating the Test Document with the Dataless View ... exiting"); System.exit(-1); } }
From source file:eu.europeana.solr.SolrServerTester.java
public static void main(String[] args) throws SolrServerException, IOException { SolrServerTester tester = new SolrServerTester(); tester.setSolrdir(new File(new File(new File(new File("src"), "test"), "resources"), "solr/" + CORE1)); SolrServerIndexer indexer = new SolrServerIndexer(); indexer.index(tester);//from w w w . j a va 2 s .c o m tester.commit(); SolrQuery q = new SolrQuery("Watermark"); q.set("debugQuery", "on"); q.set("defType", "bm25f"); q.set("qf", "title text"); q.setRows(10); // don't actually request any data QueryResponse qr = tester.query(q); Map<String, String> explainmap = qr.getExplainMap(); System.out.println("results " + qr.getResults().getNumFound()); for (SolrDocument doc : qr.getResults()) { System.out.println("Title: " + doc.getFieldValue("title")); System.out.println("Expl: " + explainmap.get(doc.getFieldValue("europeana_id"))); } tester.close(); }
From source file:de.tudarmstadt.ukp.experiments.dip.wp1.documents.Step7CollectMTurkResults.java
public static void main(String[] args) throws Exception { // input dir - list of xml query containers // /home/user-ukp/research/data/dip/wp1-documents/step4-boiler-plate/ File inputDir = new File(args[0] + "/"); // MTurk result file // output dir File outputDir = new File(args[2]); if (!outputDir.exists()) { outputDir.mkdirs();/*w w w . j av a 2 s. c om*/ } // Folder with success files File mturkSuccessDir = new File(args[1]); Collection<File> files = FileUtils.listFiles(mturkSuccessDir, new String[] { "result" }, false); if (files.isEmpty()) { throw new IllegalArgumentException("Input folder is empty. " + mturkSuccessDir); } HashMap<String, List<MTurkAnnotation>> mturkAnnotations = new HashMap<>(); // parsing all CSV files for (File mturkCSVResultFile : files) { System.out.println("Parsing " + mturkCSVResultFile.getName()); MTurkOutputReader outputReader = new MTurkOutputReader( new HashSet<>(Arrays.asList("annotation", "workerid")), mturkCSVResultFile); // for fixing broken data input: for each hit, collect all sentence IDs Map<String, SortedSet<String>> hitSentences = new HashMap<>(); // first iteration: collect the sentences for (Map<String, String> record : outputReader) { String hitID = record.get("hitid"); if (!hitSentences.containsKey(hitID)) { hitSentences.put(hitID, new TreeSet<>()); } String relevantSentences = record.get("Answer.relevant_sentences"); String irrelevantSentences = record.get("Answer.irrelevant_sentences"); if (relevantSentences != null) { hitSentences.get(hitID).addAll(Arrays.asList(relevantSentences.split(","))); } if (irrelevantSentences != null) { hitSentences.get(hitID).addAll(Arrays.asList(irrelevantSentences.split(","))); } } // and now second iteration for (Map<String, String> record : outputReader) { String hitID = record.get("hitid"); String annotatorID = record.get("workerid"); String acceptTime = record.get("assignmentaccepttime"); String submitTime = record.get("assignmentsubmittime"); String relevantSentences = record.get("Answer.relevant_sentences"); String irrelevantSentences = record.get("Answer.irrelevant_sentences"); String reject = record.get("reject"); String filename[]; String comment; String clueWeb; String[] relevant = {}; String[] irrelevant = {}; filename = record.get("annotation").split("_"); String fileXml = filename[0]; clueWeb = filename[1].trim(); comment = record.get("Answer.comment"); if (relevantSentences != null) { relevant = relevantSentences.split(","); } if (irrelevantSentences != null) { irrelevant = irrelevantSentences.split(","); } // sanitizing data: if both relevant and irrelevant are empty, that's a bug // we're gonna look up all sentences from this HIT and treat this assignment // as if there were only irrelevant ones if (relevant.length == 0 && irrelevant.length == 0) { SortedSet<String> strings = hitSentences.get(hitID); irrelevant = new String[strings.size()]; strings.toArray(irrelevant); } if (reject != null) { System.out.println(" HIT " + hitID + " annotated by " + annotatorID + " was rejected "); } else { /* // relevant sentences is a comma-delimited string, // this regular expression is rather strange // it must contain digits, it might be that there is only one space or a comma or some other char // digits are the sentence ids. if relevant sentences do not contain digits then it is wrong if (relevantSentences.matches("^\\D*$") && irrelevantSentences.matches("^\\D*$")) { try { throw new IllegalStateException( "No annotations found for HIT " + hitID + " in " + fileXml + " for document " + clueWeb); } catch (IllegalStateException ex) { ex.printStackTrace(); } } */ MTurkAnnotation mturkAnnotation; try { mturkAnnotation = new MTurkAnnotation(hitID, annotatorID, acceptTime, submitTime, comment, clueWeb, relevant, irrelevant); } catch (IllegalArgumentException ex) { throw new IllegalArgumentException("Record: " + record, ex); } List<MTurkAnnotation> listOfAnnotations = mturkAnnotations.get(fileXml); if (listOfAnnotations == null) { listOfAnnotations = new ArrayList<>(); } listOfAnnotations.add(mturkAnnotation); mturkAnnotations.put(fileXml, listOfAnnotations); } } // parser.close(); } // Debugging: output number of HITs of a query System.out.println("Accepted HITs for a query:"); for (Map.Entry e : mturkAnnotations.entrySet()) { ArrayList<MTurkAnnotation> a = (ArrayList<MTurkAnnotation>) e.getValue(); System.out.println(e.getKey() + " " + a.size()); } for (File f : FileUtils.listFiles(inputDir, new String[] { "xml" }, false)) { QueryResultContainer queryResultContainer = QueryResultContainer .fromXML(FileUtils.readFileToString(f, "utf-8")); String fileName = f.getName(); List<MTurkAnnotation> listOfAnnotations = mturkAnnotations.get(fileName); if (listOfAnnotations == null || listOfAnnotations.isEmpty()) { throw new IllegalStateException("No annotations for " + f.getName()); } for (QueryResultContainer.SingleRankedResult rankedResults : queryResultContainer.rankedResults) { for (MTurkAnnotation mtAnnotation : listOfAnnotations) { String clueWeb = mtAnnotation.clueWeb; if (rankedResults.clueWebID.equals(clueWeb)) { List<QueryResultContainer.MTurkRelevanceVote> mTurkRelevanceVotes = rankedResults.mTurkRelevanceVotes; QueryResultContainer.MTurkRelevanceVote relevanceVote = new QueryResultContainer.MTurkRelevanceVote(); String annotatorID = mtAnnotation.annotatorID; String hitID = mtAnnotation.hitID; String acceptTime = mtAnnotation.acceptTime; String submitTime = mtAnnotation.submitTime; String comment = mtAnnotation.comment; String[] relevant = mtAnnotation.relevant; String[] irrelevant = mtAnnotation.irrelevant; relevanceVote.turkID = annotatorID.trim(); relevanceVote.hitID = hitID.trim(); relevanceVote.acceptTime = acceptTime.trim(); relevanceVote.submitTime = submitTime.trim(); relevanceVote.comment = comment != null ? comment.trim() : null; if (relevant.length == 0 && irrelevant.length == 0) { try { throw new IllegalStateException("the length of the annotations is 0" + rankedResults.clueWebID + " for HIT " + relevanceVote.hitID); } catch (IllegalStateException e) { e.printStackTrace(); } } for (String r : relevant) { String sentenceId = r.trim(); if (!sentenceId.isEmpty() && sentenceId.matches("\\d+")) { QueryResultContainer.SingleSentenceRelevanceVote singleSentenceVote = new QueryResultContainer.SingleSentenceRelevanceVote(); singleSentenceVote.sentenceID = sentenceId; singleSentenceVote.relevant = "true"; relevanceVote.singleSentenceRelevanceVotes.add(singleSentenceVote); } } for (String r : irrelevant) { String sentenceId = r.trim(); if (!sentenceId.isEmpty() && sentenceId.matches("\\d+")) { QueryResultContainer.SingleSentenceRelevanceVote singleSentenceVote = new QueryResultContainer.SingleSentenceRelevanceVote(); singleSentenceVote.sentenceID = sentenceId; singleSentenceVote.relevant = "false"; relevanceVote.singleSentenceRelevanceVotes.add(singleSentenceVote); } } mTurkRelevanceVotes.add(relevanceVote); } } } File outputFile = new File(outputDir, f.getName()); FileUtils.writeStringToFile(outputFile, queryResultContainer.toXML(), "utf-8"); System.out.println("Finished " + outputFile); } }
From source file:org.jetbrains.webdemo.executors.JunitExecutor.java
public static void main(String[] args) { try {//from ww w .j a v a 2 s . c o m JUnitCore jUnitCore = new JUnitCore(); jUnitCore.addListener(new MyRunListener()); List<Class> classes = getAllClassesFromTheDir(new File(args[0])); for (Class cl : classes) { boolean hasTestMethods = false; for (Method method : cl.getMethods()) { if (method.isAnnotationPresent(Test.class)) { hasTestMethods = true; break; } } if (!hasTestMethods) continue; Request request = Request.aClass(cl); jUnitCore.run(request); } try { ObjectMapper objectMapper = new ObjectMapper(); SimpleModule module = new SimpleModule(); module.addSerializer(Throwable.class, new ThrowableSerializer()); module.addSerializer(junit.framework.ComparisonFailure.class, new JunitFrameworkComparisonFailureSerializer()); module.addSerializer(org.junit.ComparisonFailure.class, new OrgJunitComparisonFailureSerializer()); objectMapper.registerModule(module); System.setOut(standardOutput); Map<String, List<TestRunInfo>> groupedTestResults = new HashMap<>(); for (TestRunInfo testRunInfo : output) { if (!groupedTestResults.containsKey(testRunInfo.className)) { groupedTestResults.put(testRunInfo.className, new ArrayList<TestRunInfo>()); } groupedTestResults.get(testRunInfo.className).add(testRunInfo); } System.out.print(objectMapper.writeValueAsString(groupedTestResults)); } catch (IOException e) { e.printStackTrace(); } } catch (Throwable e) { System.setOut(standardOutput); System.out.print("[\""); e.printStackTrace(); System.out.print("\"]"); } }
From source file:com.oneops.search.msg.processor.CIMessageProcessor.java
public static void main(String[] args) { Map<String, String> map = new HashMap<>(); map.put("test", "Nov 5 21:08:38 2019 GMT"); map.put("test1", "Nov 5 21:08:38 2019 GMT"); convertIllegalDateFormat(map, "test"); convertIllegalDateFormat(map, "test1"); System.out.println(map);/*from ww w . j a v a 2 s . c om*/ System.out.println(ISODateTimeFormat.dateOptionalTimeParser().parseDateTime(map.get("test"))); System.out.println(ISODateTimeFormat.dateOptionalTimeParser().parseDateTime(map.get("test1"))); }
From source file:WordCount.java
public static void main(String args[]) throws Exception { String filename = "WordCount.java"; // Map File from filename to byte buffer FileInputStream input = new FileInputStream(filename); FileChannel channel = input.getChannel(); int fileLength = (int) channel.size(); MappedByteBuffer buffer = channel.map(FileChannel.MapMode.READ_ONLY, 0, fileLength); // Convert to character buffer Charset charset = Charset.forName("ISO-8859-1"); CharsetDecoder decoder = charset.newDecoder(); CharBuffer charBuffer = decoder.decode(buffer); // Create line pattern Pattern linePattern = Pattern.compile(".*$", Pattern.MULTILINE); // Create word pattern Pattern wordBreakPattern = Pattern.compile("[\\p{Punct}\\s}]"); // Match line pattern to buffer Matcher lineMatcher = linePattern.matcher(charBuffer); Map map = new TreeMap(); Integer ONE = new Integer(1); // For each line while (lineMatcher.find()) { // Get line CharSequence line = lineMatcher.group(); // Get array of words on line String words[] = wordBreakPattern.split(line); // For each word for (int i = 0, n = words.length; i < n; i++) { if (words[i].length() > 0) { Integer frequency = (Integer) map.get(words[i]); if (frequency == null) { frequency = ONE;//from w ww. jav a2 s. c om } else { int value = frequency.intValue(); frequency = new Integer(value + 1); } map.put(words[i], frequency); } } } System.out.println(map); }
From source file:com.joliciel.jochre.search.JochreSearch.java
/** * @param args/*from w ww . j a va 2 s. c om*/ */ public static void main(String[] args) { try { Map<String, String> argMap = new HashMap<String, String>(); for (String arg : args) { int equalsPos = arg.indexOf('='); String argName = arg.substring(0, equalsPos); String argValue = arg.substring(equalsPos + 1); argMap.put(argName, argValue); } String command = argMap.get("command"); argMap.remove("command"); String logConfigPath = argMap.get("logConfigFile"); if (logConfigPath != null) { argMap.remove("logConfigFile"); Properties props = new Properties(); props.load(new FileInputStream(logConfigPath)); PropertyConfigurator.configure(props); } LOG.debug("##### Arguments:"); for (Entry<String, String> arg : argMap.entrySet()) { LOG.debug(arg.getKey() + ": " + arg.getValue()); } SearchServiceLocator locator = SearchServiceLocator.getInstance(); SearchService searchService = locator.getSearchService(); if (command.equals("buildIndex")) { String indexDirPath = argMap.get("indexDir"); String documentDirPath = argMap.get("documentDir"); File indexDir = new File(indexDirPath); indexDir.mkdirs(); File documentDir = new File(documentDirPath); JochreIndexBuilder builder = searchService.getJochreIndexBuilder(indexDir); builder.updateDocument(documentDir); } else if (command.equals("updateIndex")) { String indexDirPath = argMap.get("indexDir"); String documentDirPath = argMap.get("documentDir"); boolean forceUpdate = false; if (argMap.containsKey("forceUpdate")) { forceUpdate = argMap.get("forceUpdate").equals("true"); } File indexDir = new File(indexDirPath); indexDir.mkdirs(); File documentDir = new File(documentDirPath); JochreIndexBuilder builder = searchService.getJochreIndexBuilder(indexDir); builder.updateIndex(documentDir, forceUpdate); } else if (command.equals("search")) { HighlightServiceLocator highlightServiceLocator = HighlightServiceLocator.getInstance(locator); HighlightService highlightService = highlightServiceLocator.getHighlightService(); String indexDirPath = argMap.get("indexDir"); File indexDir = new File(indexDirPath); JochreQuery query = searchService.getJochreQuery(argMap); JochreIndexSearcher searcher = searchService.getJochreIndexSearcher(indexDir); TopDocs topDocs = searcher.search(query); Set<Integer> docIds = new LinkedHashSet<Integer>(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { docIds.add(scoreDoc.doc); } Set<String> fields = new HashSet<String>(); fields.add("text"); Highlighter highlighter = highlightService.getHighlighter(query, searcher.getIndexSearcher()); HighlightManager highlightManager = highlightService .getHighlightManager(searcher.getIndexSearcher()); highlightManager.setDecimalPlaces(query.getDecimalPlaces()); highlightManager.setMinWeight(0.0); highlightManager.setIncludeText(true); highlightManager.setIncludeGraphics(true); Writer out = new PrintWriter(new OutputStreamWriter(System.out, StandardCharsets.UTF_8)); if (command.equals("highlight")) { highlightManager.highlight(highlighter, docIds, fields, out); } else { highlightManager.findSnippets(highlighter, docIds, fields, out); } } else { throw new RuntimeException("Unknown command: " + command); } } catch (RuntimeException e) { LogUtils.logError(LOG, e); throw e; } catch (IOException e) { LogUtils.logError(LOG, e); throw new RuntimeException(e); } }
From source file:com.edgenius.wiki.ext.textnut.NutParser.java
public static void main(String[] args) throws IOException { NutParser parser = new NutParser(); // parser.parseHTML(FileUtils.readFileToString(new File("c:/temp/a.html"))); Map<String, File> map = parser.parseBPlist( new FileInputStream(new File("C:/Dapeng/Future/webarchive/TextNut.nut/20110312/P1.webarchive"))); if (map != null) { for (Entry<String, File> entry : map.entrySet()) { System.out.println(entry.getKey() + ":" + entry.getValue()); }/*from ww w . j av a 2 s . co m*/ File file = map.get(MAIN_RESOURCE_URL); String content = parser.convertNutHTMLToPageHTML(FileUtils.readFileToString(file)); System.out.println("======="); System.out.println(content); System.out.println("======="); } }