List of usage examples for java.lang System setErr
public static void setErr(PrintStream err)
From source file:com.intel.ssg.dcst.panthera.parse.sql.SqlParseDriver.java
/** * Translate from SQL Command string to Hive AST. * This comes in two stages:/* www . j av a 2 s.c om*/ * 1. SQL command -> SQL AST (SqlLexer & SqlParser)(using antlr) * 2. SQL AST -> Hive AST (SqlASTTranslator) * * @param command * input SQL string * @param ctx * - pass context * @return result Hive AST * @throws SqlParseException * exception thrown during lexer & parser stage * @throws SqlXlateException * exception thrown during translation from SQL AST to Hive AST */ public ASTNode sqlParse(String command, Context ctx) throws HiveParseException, SqlParseException, SqlXlateException { LOG.info("Input SQL Command :" + command); // pre-parse phase command = preparse(command); LOG.info("Pre-Parsing Completed"); // Lexing phase SqlLexer lexer = new SqlLexer(new ANTLRStringStream(command)); TokenRewriteStream tokens = new TokenRewriteStream(lexer); //TODO as TokenRewriteStream is under antlr33 while Hive tokenstream is under antlr //we can not set the token stream into ctx. This may lead to error when creating view //Fix this later. //if (ctx != null) { // ctx.setTokenRewriteStream(tokens); //} // Parsing phase SqlParser parser = new SqlParser(tokens); parser.setTreeAdaptor(adaptor); // //the root grammar is "seq_of_statements" PantheraParser_PLSQLParser.seq_of_statements_return r = null; PrintStream sysErr = System.err; try { System.setErr(null); r = parser.seq_of_statements(); } catch (org.antlr.runtime.RecognitionException e) { // TODO, throw more readable information LOG.error("SQL parse Error :" + e.toString()); throw new SqlParseException(e); } catch (Exception e) { throw new SqlParseException("Unknown parse Error, check your input please."); } finally { System.setErr(sysErr); } // check if the query is a SELECT Statement or EXPLAIN Statement, only Select and Explain are supported. SqlXlateUtil.checkPantheraSupportQueries(r.getTree()); //check the tree SqlASTChecker checker = new SqlASTChecker(); try { checker.checkSqlAST(r.getTree(), command); } catch (SqlXlateException e) { LOG.error("SQL parse Error :" + e.toString()); e.outputException(command); throw e; } LOG.info("Parsing Completed."); // Translate phase SqlASTNode sqlAST = (SqlASTNode) r.getTree(); LOG.info("SQL AST before translation : " + sqlAST.toStringTree().replace('(', '[').replace(')', ']')); SqlASTTranslator trans = null; ASTNode hiveAST = null; try { trans = new SqlASTTranslator(this.conf); hiveAST = trans.translate(sqlAST); } catch (SqlXlateException e) { LOG.error("SQL transform error: " + e.toString()); e.outputException(command); throw e; } catch (Exception e) { LOG.error("Panthera encountered a known bug"); throw new SqlParseException("Panthera encountered a known bug"); } LOG.info("Hive AST after translation : " + hiveAST.toStringTree()); LOG.info("Translation Completed."); return hiveAST; }
From source file:org.kepler.build.RunTestWorkflows.java
/** Load a workflow or actor and parse the output. */ private void _loadWorkflowOrActor(TestInfo test) throws Exception { if (_parseWorkflowMain == null) { Class<?> clazz = Class.forName("org.kepler.loader.util.ParseWorkflow"); _parseWorkflowMain = clazz.getMethod("main", String[].class); }//from w ww.ja va 2 s . c o m // redirect stdout and stderr PrintStream stdoutOrig = System.out; PrintStream stderrOrig = System.err; ByteArrayOutputStream stdoutByteStream = new ByteArrayOutputStream(); PrintStream stdoutPrintStream = new PrintStream(stdoutByteStream); System.setOut(stdoutPrintStream); ByteArrayOutputStream stderrByteStream = new ByteArrayOutputStream(); PrintStream stderrPrintStream = new PrintStream(stderrByteStream); System.setErr(stderrPrintStream); // call the method String[] args; // if test is for an actor, add -a argument for ParseWorkflow.main() if (test.getTestType() == TestType.Actor) { args = new String[] { "-a", test.getPath() }; } else { args = new String[] { test.getPath() }; } long startTime = System.nanoTime(); _parseWorkflowMain.invoke(null, (Object) args); long elapsed = System.nanoTime() - startTime; if (elapsed > 0) { elapsed = elapsed / 1000000000; } test.setTime(elapsed); // restore stdout and stderr System.setOut(stdoutOrig); System.setErr(stderrOrig); // parse stdout and stderr InputStream stream = new ByteArrayInputStream(stdoutByteStream.toByteArray()); _parseOutput(test, stream, true); stdoutByteStream.close(); stream.close(); stream = new ByteArrayInputStream(stderrByteStream.toByteArray()); _parseOutput(test, stream, false); stderrByteStream.close(); stream.close(); }
From source file:fr.eurecom.hybris.demogui.HybrisDemoGui.java
private void initializeGUI() { frame = new JFrame("Hybris Demo GUI"); frame.setIconImage(new ImageIcon(getClass().getResource("/clouds.png")).getImage()); frame.setBounds(100, 100, 650, 500); frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); frame.getContentPane().setLayout(new GridBagLayout()); JPanel cloudParentPanel = new JPanel(new GridLayout(1, 2, 10, 10)); JPanel hybrisPanel = new JPanel(new GridBagLayout()); JPanel cloudsPanel = new JPanel(new GridBagLayout()); GridBagConstraints gbc = new GridBagConstraints(); gbc.weightx = 1.0;//from w w w . j av a 2s .c o m gbc.weighty = 1.0; gbc.insets = new Insets(5, 5, 5, 5); gbc.anchor = GridBagConstraints.NORTH; gbc.fill = GridBagConstraints.BOTH; gbc.gridwidth = 3; gbc.gridheight = 1; gbc.gridx = 0; gbc.gridy = 0; hybrisPanel.add(new JLabel("<html><b>Hybris</b></html>"), gbc); gbc.gridwidth = 3; gbc.gridheight = 3; gbc.gridx = 0; gbc.gridy = 1; lstHybris = new JList<String>(lmHybris); lstHybris.setPreferredSize(new java.awt.Dimension(100, 500)); lstHybris.setMinimumSize(new java.awt.Dimension(100, 440)); hybrisPanel.add(lstHybris, gbc); gbc.anchor = GridBagConstraints.SOUTH; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.gridwidth = 1; gbc.gridx = 0; gbc.gridy = 4; btnPut = new JButton("Put"); hybrisPanel.add(btnPut, gbc); gbc.gridx = 1; gbc.gridy = 4; btnGet = new JButton("Get"); hybrisPanel.add(btnGet, gbc); gbc.gridx = 2; gbc.gridy = 4; btnDelete = new JButton("Delete"); hybrisPanel.add(btnDelete, gbc); gbc.fill = GridBagConstraints.BOTH; gbc.gridx = 0; gbc.gridy = 0; gbc.gridwidth = 1; gbc.gridheight = 1; cloudsPanel.add(new JLabel("<html><b>Amazon S3</b></html>"), gbc); gbc.gridheight = 2; gbc.gridy = 1; lstAmazon = new JList<String>(lmAmazon); lstAmazon.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); lstAmazon.setPreferredSize(new java.awt.Dimension(100, 100)); cloudsPanel.add(lstAmazon, gbc); gbc.gridy = 3; gbc.gridheight = 1; cloudsPanel.add(new JLabel("<html><b>Microsoft Azure</b></html>"), gbc); gbc.gridheight = 2; gbc.gridy = 4; lstAzure = new JList<String>(lmAzure); lstAzure.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); lstAzure.setPreferredSize(new java.awt.Dimension(100, 100)); cloudsPanel.add(lstAzure, gbc); gbc.gridy = 6; gbc.gridheight = 1; cloudsPanel.add(new JLabel("<html><b>Google Cloud Storage</b></html>"), gbc); gbc.gridheight = 2; gbc.gridy = 7; lstGoogle = new JList<String>(lmGoogle); lstGoogle.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); lstGoogle.setPreferredSize(new java.awt.Dimension(100, 100)); cloudsPanel.add(lstGoogle, gbc); gbc.gridy = 9; gbc.gridheight = 1; cloudsPanel.add(new JLabel("<html><b>Rackspace Cloud Files</b></html>"), gbc); gbc.gridheight = 2; gbc.gridy = 10; lstRackspace = new JList<String>(lmRackspace); lstRackspace.setSelectionMode(ListSelectionModel.SINGLE_SELECTION); lstRackspace.setPreferredSize(new java.awt.Dimension(100, 100)); cloudsPanel.add(lstRackspace, gbc); cloudParentPanel.add(hybrisPanel); cloudParentPanel.add(cloudsPanel); gbc.gridx = 0; gbc.gridy = 0; gbc.gridwidth = 1; gbc.gridheight = 1; frame.add(cloudParentPanel, gbc); gbc.gridx = 0; gbc.gridy = 1; JTextArea jt = new JTextArea(10, 30); JScrollPane scrollPane = new JScrollPane(jt); frame.add(scrollPane, gbc); PrintStream printStream = new PrintStream(new CustomOutputStream(jt)); System.setOut(printStream); System.setErr(printStream); frame.pack(); frame.setSize(550, 800); frame.setResizable(false); lstAmazon.addKeyListener(this); lstAzure.addKeyListener(this); lstGoogle.addKeyListener(this); lstRackspace.addKeyListener(this); lstHybris.addKeyListener(this); lstAmazon.setCellRenderer(this.new MyListRenderer("amazon")); lstGoogle.setCellRenderer(this.new MyListRenderer("google")); lstAzure.setCellRenderer(this.new MyListRenderer("azure")); lstRackspace.setCellRenderer(this.new MyListRenderer("rackspace")); btnGet.addActionListener(this); btnPut.addActionListener(this); btnDelete.addActionListener(this); }
From source file:org.apache.tools.ant.listener.CommonsLoggingListener.java
private void realLog(final Log log, final String message, final int priority, final Throwable t) { final PrintStream tmpOut = System.out; final PrintStream tmpErr = System.err; System.setOut(out);//from w ww .ja v a2 s .c om System.setErr(err); switch (priority) { case Project.MSG_ERR: if (t == null) { log.error(message); } else { log.error(message, t); } break; case Project.MSG_WARN: if (t == null) { log.warn(message); } else { log.warn(message, t); } break; case Project.MSG_INFO: if (t == null) { log.info(message); } else { log.info(message, t); } break; case Project.MSG_VERBOSE: log.debug(message); break; case Project.MSG_DEBUG: log.debug(message); break; default: log.error(message); break; } System.setOut(tmpOut); System.setErr(tmpErr); }
From source file:pt.ua.tm.neji.cli.Main.java
public static void main(String[] args) { // installUncaughtExceptionHandler(); int NUM_THREADS = Runtime.getRuntime().availableProcessors() - 1; NUM_THREADS = NUM_THREADS > 0 ? NUM_THREADS : 1; CommandLineParser parser = new GnuParser(); Options options = new Options(); options.addOption("h", "help", false, "Print this usage information."); options.addOption("i", "input", true, "Folder with corpus files."); options.addOption("o", "output", true, "Folder to save the annotated corpus files."); options.addOption("f", "input-filter", true, "Wildcard to filter files in input folder"); options.addOption("p", "parser", true, "Folder that contains the parsing tool."); Option o = new Option("m", "models", true, "Folder that contains the ML models."); o.setArgs(Integer.MAX_VALUE); options.addOption(o);/*from w ww. ja v a2 s . co m*/ options.addOption("d", "dictionaires", true, "Folder that contains the dictionaries."); options.addOption("if", "input-format", true, "BIOC, RAW or XML"); o = new Option("of", "output-formats", true, "A1, B64, BIOC, CONLL, JSON, NEJI or XML"); o.setArgs(Integer.MAX_VALUE); options.addOption(o); options.addOption("ptool", "parsing-tool", true, "GDEP or OPENNLP (GDEP is set by default)"); options.addOption("plang", "parsing-language", true, "DANISH, DUTCH, ENGLISH, FRENCH, GERMAN, PORTUGUESE or SWEDISH (ENGLISH is set by default)"); options.addOption("plvl", "parsing-level", true, "TOKENIZATION, POS, LEMMATIZATION, CHUNKING or DEPENDENCY (TOKENIZATION is set by default)"); options.addOption("pcls", "processor-class", true, "Full name of pipeline processor class."); options.addOption("custom", "custom-modules", true, "Names of custom modules to be used in order, separated by pipes. If a specified module are not a reader or a writer, it will be executed after dictionary and model processing."); options.addOption("x", "xml-tags", true, "XML tags to be considered, separated by commas."); options.addOption("v", "verbose", false, "Verbose mode."); options.addOption("s", "server", false, "Generate server."); options.addOption("c", "compressed", false, "If files are compressed using GZip."); options.addOption("noids", "include-no-ids", false, "If annotations without IDs should be included."); options.addOption("t", "threads", true, "Number of threads. By default, if more than one core is available, it is the number of cores minus 1."); options.addOption("fp", "false-positives-filter", true, "File that contains the false positive terms."); options.addOption("gn", "semantic-groups-normalization", true, "File that contains the semantic groups normalization terms."); CommandLine commandLine = null; try { // Parse the program arguments commandLine = parser.parse(options, args); } catch (ParseException ex) { logger.error("There was a problem processing the input arguments.", ex); return; } // Show help text if (commandLine.hasOption('h')) { printHelp(options, ""); return; } // No options if (commandLine.getOptions().length == 0) { printHelp(options, ""); return; } // Generate server from dictionary, model and parser parameters boolean generateServer = false; if (commandLine.hasOption('s')) { generateServer = true; } // Get corpus folder for input String folderCorpusIn = null; if (commandLine.hasOption('i')) { folderCorpusIn = commandLine.getOptionValue('i'); File test = new File(folderCorpusIn); if (!test.isDirectory() || !test.canRead()) { logger.error("The specified path is not a folder or is not readable."); return; } folderCorpusIn = test.getAbsolutePath(); folderCorpusIn += File.separator; } else { printHelp(options, "Please specify the input corpus folder."); return; } String inputFolderWildcard = null; if (commandLine.hasOption("f")) { inputFolderWildcard = commandLine.getOptionValue("f"); } // Get Input format InputFormat inputFormat; if (commandLine.hasOption("if")) { inputFormat = InputFormat.valueOf(commandLine.getOptionValue("if")); } else { printHelp(options, "Please specify the input format."); return; } // Get corpus folder for output String folderCorpusOut = null; if (commandLine.hasOption('o')) { folderCorpusOut = commandLine.getOptionValue('o'); File test = new File(folderCorpusOut); if (!test.isDirectory() || !test.canWrite()) { logger.error("The specified path is not a folder or is not writable."); return; } folderCorpusOut = test.getAbsolutePath(); folderCorpusOut += File.separator; } else { printHelp(options, "Please specify the output corpus folder."); return; } // Get Output format List<OutputFormat> outputFormats = new ArrayList<>(); if (commandLine.hasOption("of")) { String[] command = commandLine.getOptionValues("of"); for (String s : command) { OutputFormat f = OutputFormat.valueOf(s); if (f.equals(OutputFormat.A1) || f.equals(OutputFormat.JSON) || f.equals(OutputFormat.NEJI)) { if (inputFormat.equals(InputFormat.XML)) { logger.error("XML input format only supports XML and CoNLL output formats, " + "since other formats are based on character positions."); return; } } outputFormats.add(f); } } else { printHelp(options, "Please specify the output formats (in case of multiple formats, " + "separate them with a \"\\|\")."); return; } // Get XML tags String[] xmlTags = null; if (inputFormat.equals(InputFormat.XML)) { if (commandLine.hasOption("x")) { xmlTags = commandLine.getOptionValue("x").split(","); } else { printHelp(options, "Please specify XML tags to be used."); return; } } // Get models folder String modelsFolder = null; if (commandLine.hasOption('m')) { modelsFolder = commandLine.getOptionValue('m'); File test = new File(modelsFolder); if (!test.isDirectory() || !test.canRead()) { logger.error("The specified models path is not a folder or is not readable."); return; } modelsFolder = test.getAbsolutePath(); modelsFolder += File.separator; } // Get dictionaries folder String dictionariesFolder = null; if (commandLine.hasOption('d')) { dictionariesFolder = commandLine.getOptionValue('d'); File test = new File(dictionariesFolder); if (!test.isDirectory() || !test.canRead()) { logger.error("The specified dictionaries path is not a folder or is not readable."); return; } dictionariesFolder = test.getAbsolutePath(); dictionariesFolder += File.separator; } // Get parser folder String parserFolder = null; if (commandLine.hasOption("p")) { parserFolder = commandLine.getOptionValue("p"); File test = new File(parserFolder); if (!test.isDirectory() || !test.canRead()) { logger.error("The specified parser path is not a folder or is not readable."); return; } parserFolder = test.getAbsolutePath(); parserFolder += File.separator; } // Get processing modules String modulesCommandLine = ""; if (commandLine.hasOption("custom")) { modulesCommandLine = commandLine.getOptionValue("custom"); } // Get verbose mode boolean verbose = commandLine.hasOption('v'); Constants.verbose = verbose; if (Constants.verbose) { MalletLogger.getGlobal().setLevel(Level.INFO); // Redirect sout LoggingOutputStream los = new LoggingOutputStream(LoggerFactory.getLogger("stdout"), false); System.setOut(new PrintStream(los, true)); // Redirect serr los = new LoggingOutputStream(LoggerFactory.getLogger("sterr"), true); System.setErr(new PrintStream(los, true)); } else { MalletLogger.getGlobal().setLevel(Level.OFF); } // Redirect JUL to SLF4 SLF4JBridgeHandler.removeHandlersForRootLogger(); SLF4JBridgeHandler.install(); // Get compressed mode boolean compressed = false; if (commandLine.hasOption('c')) { compressed = true; } // Get threads String threadsText = null; if (commandLine.hasOption('t')) { threadsText = commandLine.getOptionValue('t'); NUM_THREADS = Integer.parseInt(threadsText); if (NUM_THREADS <= 0 || NUM_THREADS > 32) { logger.error("Illegal number of threads. Must be between 1 and 32."); return; } } // Load pipeline processor Class processor = FileProcessor.class; if (commandLine.hasOption("pcls")) { String processorName = commandLine.getOptionValue("pcls"); try { processor = Class.forName(processorName); } catch (ClassNotFoundException ex) { logger.error("Could not load pipeline processor \"" + processorName + "\""); return; } } // Load parsing tool ParserTool parsingTool = ParserTool.GDEP; if (commandLine.hasOption("ptool")) { String parsingToolName = commandLine.getOptionValue("ptool"); try { parsingTool = ParserTool.valueOf(parsingToolName); } catch (IllegalArgumentException ex) { logger.error("Invalid parsing tool \"" + parsingToolName + "\". " + "Must be one of " + StringUtils.join(ParserTool.values(), ", ")); return; } } // Load parsing language ParserLanguage parsingLanguage = ParserLanguage.ENGLISH; if (commandLine.hasOption("plang")) { String parsingLanguageName = commandLine.getOptionValue("plang"); try { parsingLanguage = ParserLanguage.valueOf(parsingLanguageName); } catch (IllegalArgumentException ex) { logger.error("Invalid parsing language \"" + parsingLanguageName + "\". " + "Must be one of " + StringUtils.join(ParserLanguage.values(), ", ")); return; } } // Load parsing level ParserLevel parsingLevel = ParserLevel.TOKENIZATION; if (commandLine.hasOption("plvl")) { String parsingLevelName = commandLine.getOptionValue("plvl"); try { parsingLevel = ParserLevel.valueOf(parsingLevelName); } catch (IllegalArgumentException ex) { logger.error("Invalid parsing level \"" + parsingLevelName + "\". " + "Must be one of " + StringUtils.join(ParserLevel.values(), ", ")); return; } } else { // Set model parsing level if ML will be used to annotate and no parsing level has been setted if (modelsFolder != null) { try { parsingLevel = getModelsParsingLevel(modelsFolder); } catch (NejiException ex) { logger.error("Could not load models parsing level."); return; } } } // Get if annotations without ids should be included boolean includeAnnotationsWithoutIDs = false; if (commandLine.hasOption("noids")) { includeAnnotationsWithoutIDs = true; } // Get false positives filter byte[] fpByteArray = null; if (commandLine.hasOption("fp")) { String fpPath = commandLine.getOptionValue("fp"); File test = new File(fpPath); if (!test.isFile() || !test.canRead()) { logger.error("The specified false positives path is not a file or is not readable."); return; } fpPath = test.getAbsolutePath(); fpPath += File.separator; try { fpByteArray = IOUtils.toByteArray(new FileInputStream(new File(fpPath))); } catch (IOException ex) { logger.error("There was a problem loading the false positives " + "file.", ex); return; } } // Get semantic groups normalization byte[] groupsNormByteArray = null; if (commandLine.hasOption("gn")) { String gnPath = commandLine.getOptionValue("gn"); File test = new File(gnPath); if (!test.isFile() || !test.canRead()) { logger.error( "The specified semantic groups normalization path " + "is not a file or is not readable."); return; } gnPath = test.getAbsolutePath(); gnPath += File.separator; try { groupsNormByteArray = IOUtils.toByteArray(new FileInputStream(new File(gnPath))); } catch (IOException ex) { logger.error("There was a problem loading the semantic groups " + "normalization file.", ex); return; } } // Context is built through a descriptor first, so that the pipeline can be validated before any processing ContextConfiguration descriptor = null; try { descriptor = new ContextConfiguration.Builder().withInputFormat(inputFormat) .withOutputFormats(outputFormats).withParserTool(parsingTool) .withParserLanguage(parsingLanguage).withParserLevel(parsingLevel).parseCLI(modulesCommandLine) .build(); descriptor.setFalsePositives(fpByteArray); descriptor.setSemanticGroupsNormalization(groupsNormByteArray); } catch (NejiException ex) { ex.printStackTrace(); System.exit(1); } if (generateServer) { try { generateServer(descriptor, modelsFolder, dictionariesFolder, includeAnnotationsWithoutIDs); } catch (IOException ex) { ex.printStackTrace(); System.exit(1); } } else { boolean storeDocuments = false; Context context = new Context(descriptor, modelsFolder, // Models dictionariesFolder, // Dictionaries folder parserFolder // Parser folder ); try { BatchExecutor batchExecutor = new FileBatchExecutor(folderCorpusIn, folderCorpusOut, compressed, NUM_THREADS, inputFolderWildcard, storeDocuments, includeAnnotationsWithoutIDs); if (xmlTags == null) { batchExecutor.run(processor, context); } else { batchExecutor.run(processor, context, new Object[] { xmlTags }); } } catch (Exception ex) { logger.error("There was a problem running the batch.", ex); } } }
From source file:de.tudarmstadt.ukp.dariah.pipeline.RunPipeline.java
public static void main(String[] args) { Date startDate = new Date(); PrintStream ps;/*from ww w .j ava2 s.c o m*/ try { ps = new PrintStream("error.log"); System.setErr(ps); } catch (FileNotFoundException e) { System.out.println("Errors cannot be redirected"); } try { if (!parseArgs(args)) { System.out.println("Usage: java -jar pipeline.jar -help"); System.out.println("Usage: java -jar pipeline.jar -input <Input File> -output <Output Folder>"); System.out.println( "Usage: java -jar pipeline.jar -config <Config File> -input <Input File> -output <Output Folder>"); return; } } catch (ParseException e) { e.printStackTrace(); System.out.println( "Error when parsing command line arguments. Use\njava -jar pipeline.jar -help\n to get further information"); System.out.println("See error.log for further details"); return; } LinkedList<String> configFiles = new LinkedList<>(); String configFolder = "configs/"; configFiles.add(configFolder + "default.properties"); //Language dependent properties file String path = configFolder + "default_" + optLanguage + ".properties"; File f = new File(path); if (f.exists()) { configFiles.add(path); } else { System.out.println("Language config file: " + path + " not found"); } String[] configFileArg = new String[0]; for (int i = 0; i < args.length - 1; i++) { if (args[i].equals("-config")) { configFileArg = args[i + 1].split("[,;]"); break; } } for (String configFile : configFileArg) { f = new File(configFile); if (f.exists()) { configFiles.add(configFile); } else { //Check in configs folder path = configFolder + configFile; f = new File(path); if (f.exists()) { configFiles.add(path); } else { System.out.println("Config file: " + configFile + " not found"); return; } } } for (String configFile : configFiles) { try { parseConfig(configFile); } catch (Exception e) { e.printStackTrace(); System.out.println("Exception when parsing config file: " + configFile); System.out.println("See error.log for further details"); } } printConfiguration(configFiles.toArray(new String[0])); try { // Read in the input files String defaultFileExtension = (optReader == ReaderType.XML) ? ".xml" : ".txt"; GlobalFileStorage.getInstance().readFilePaths(optInput, defaultFileExtension, optOutput, optResume); System.out.println("Process " + GlobalFileStorage.getInstance().size() + " files"); CollectionReaderDescription reader; if (optReader == ReaderType.XML) { reader = createReaderDescription(XmlReader.class, XmlReader.PARAM_LANGUAGE, optLanguage); } else { reader = createReaderDescription(TextReaderWithInfo.class, TextReaderWithInfo.PARAM_LANGUAGE, optLanguage); } AnalysisEngineDescription paragraph = createEngineDescription(ParagraphSplitter.class, ParagraphSplitter.PARAM_SPLIT_PATTERN, (optParagraphSingleLineBreak) ? ParagraphSplitter.SINGLE_LINE_BREAKS_PATTERN : ParagraphSplitter.DOUBLE_LINE_BREAKS_PATTERN); AnalysisEngineDescription seg = createEngineDescription(optSegmenterCls, optSegmenterArguments); AnalysisEngineDescription paragraphSentenceCorrector = createEngineDescription( ParagraphSentenceCorrector.class); AnalysisEngineDescription frenchQuotesSeg = createEngineDescription(PatternBasedTokenSegmenter.class, PatternBasedTokenSegmenter.PARAM_PATTERNS, "+|[]"); AnalysisEngineDescription quotesSeg = createEngineDescription(PatternBasedTokenSegmenter.class, PatternBasedTokenSegmenter.PARAM_PATTERNS, "+|[\"\"]"); AnalysisEngineDescription posTagger = createEngineDescription(optPOSTaggerCls, optPOSTaggerArguments); AnalysisEngineDescription lemma = createEngineDescription(optLemmatizerCls, optLemmatizerArguments); AnalysisEngineDescription chunker = createEngineDescription(optChunkerCls, optChunkerArguments); AnalysisEngineDescription morph = createEngineDescription(optMorphTaggerCls, optMorphTaggerArguments); AnalysisEngineDescription hyphenation = createEngineDescription(optHyphenationCls, optHyphenationArguments); AnalysisEngineDescription depParser = createEngineDescription(optDependencyParserCls, optDependencyParserArguments); AnalysisEngineDescription constituencyParser = createEngineDescription(optConstituencyParserCls, optConstituencyParserArguments); AnalysisEngineDescription ner = createEngineDescription(optNERCls, optNERArguments); AnalysisEngineDescription directSpeech = createEngineDescription(DirectSpeechAnnotator.class, DirectSpeechAnnotator.PARAM_START_QUOTE, optStartQuote); AnalysisEngineDescription srl = createEngineDescription(optSRLCls, optSRLArguments); //Requires DKPro 1.8.0 AnalysisEngineDescription coref = createEngineDescription(optCorefCls, optCorefArguments); //StanfordCoreferenceResolver.PARAM_POSTPROCESSING, true AnalysisEngineDescription writer = createEngineDescription(DARIAHWriter.class, DARIAHWriter.PARAM_TARGET_LOCATION, optOutput, DARIAHWriter.PARAM_OVERWRITE, true); AnalysisEngineDescription annWriter = createEngineDescription(AnnotationWriter.class); AnalysisEngineDescription noOp = createEngineDescription(NoOpAnnotator.class); System.out.println("\nStart running the pipeline (this may take a while)..."); while (!GlobalFileStorage.getInstance().isEmpty()) { try { SimplePipeline.runPipeline(reader, paragraph, (optSegmenter) ? seg : noOp, paragraphSentenceCorrector, frenchQuotesSeg, quotesSeg, (optPOSTagger) ? posTagger : noOp, (optLemmatizer) ? lemma : noOp, (optChunker) ? chunker : noOp, (optMorphTagger) ? morph : noOp, (optHyphenation) ? hyphenation : noOp, directSpeech, (optDependencyParser) ? depParser : noOp, (optConstituencyParser) ? constituencyParser : noOp, (optNER) ? ner : noOp, (optSRL) ? srl : noOp, //Requires DKPro 1.8.0 (optCoref) ? coref : noOp, writer // ,annWriter ); } catch (OutOfMemoryError e) { System.out.println("Out of Memory at file: " + GlobalFileStorage.getInstance().getLastPolledFile().getAbsolutePath()); } } Date enddate = new Date(); double duration = (enddate.getTime() - startDate.getTime()) / (1000 * 60.0); System.out.println("---- DONE -----"); System.out.printf("All files processed in %.2f minutes", duration); } catch (ResourceInitializationException e) { System.out.println("Error when initializing the pipeline."); if (e.getCause() instanceof FileNotFoundException) { System.out.println("File not found. Maybe the input / output path is incorrect?"); System.out.println(e.getCause().getMessage()); } e.printStackTrace(); System.out.println("See error.log for further details"); } catch (UIMAException e) { e.printStackTrace(); System.out.println("Error in the pipeline."); System.out.println("See error.log for further details"); } catch (IOException e) { e.printStackTrace(); System.out .println("Error while reading or writing to the file system. Maybe some paths are incorrect?"); System.out.println("See error.log for further details"); } }
From source file:org.apache.jasper.compiler.JspRuntimeContext.java
/** * Process a "destory" event for this web application context. */// w w w . ja v a 2s.co m public void destroy() { if (System.err instanceof SystemLogHandler) System.setErr(((SystemLogHandler) System.err).getWrapped()); threadStop(); Iterator servlets = jsps.values().iterator(); while (servlets.hasNext()) { ((JspServletWrapper) servlets.next()).destroy(); } }
From source file:org.apache.hive.beeline.cli.TestHiveCli.java
private void redirectOutputStream() { // Setup output stream to redirect output to os = new ByteArrayOutputStream(); ps = new PrintStream(os); errS = new ByteArrayOutputStream(); errPs = new PrintStream(errS); System.setOut(ps);//w w w .ja v a2s .c o m System.setErr(errPs); }
From source file:org.apache.tika.cli.TikaCLIIT.java
/** * Tears down the test. Returns the System.out *///w w w .j av a 2s. co m @After public void tearDown() throws Exception { System.setOut(stdout); System.setErr(errout); //System.out.println("Output: " + outContent.toString(UTF_8.name())); //System.out.println("Error: " + errContent.toString(UTF_8.name())); }
From source file:com.github.jessemull.microflex.math.mathdouble.AdditionDoubleTest.java
/** * Toggles system error. */ @AfterClass public static void restoreErrorOut() { System.setErr(originalOut); }