Example usage for opennlp.tools.tokenize TokenizerME TokenizerME

List of usage examples for opennlp.tools.tokenize TokenizerME TokenizerME

Introduction

In this page you can find the example usage for opennlp.tools.tokenize TokenizerME TokenizerME.

Prototype

public TokenizerME(TokenizerModel model) 

Source Link

Usage

From source file:com.civprod.writerstoolbox.OpenNLP.training.TokenizerTrainer.java

private void cmdTrainActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_cmdTrainActionPerformed
    final TokenizerTrainer tempThis = this;
    new Thread(() -> {
        textTestResults.setText("");
        Charset charset = Charset.forName("UTF-8");
        //create TokenizerFactory part of the training context
        String alphaNumericRegex = txtAlphaNumericPattern.getText();
        alphaNumericRegex = alphaNumericRegex.trim();
        if (alphaNumericRegex.isEmpty()) {
            alphaNumericRegex = "^[A-Za-z0-9]+$";
        }//from   w w  w.ja  va 2 s .  com
        Pattern alphaNumericPattern = Pattern.compile(alphaNumericRegex);
        TokenizerFactory myTokenizerFactory = new TokenizerFactory("EN", mAbbreviationDictionary,
                this.cbUseAlphaNumericOptimization.isSelected(), alphaNumericPattern);

        Tokenizer stdTokenizer = null;
        try {
            stdTokenizer = OpenNLPUtils.createTokenizer();
        } catch (IOException ex) {
            Logger.getLogger(TokenizerTrainer.class.getName()).log(Level.SEVERE, null, ex);
        }
        List<FileSplit> FileSplits = FileSplit.generateFileSplitsLOO(mFileCollectionListModel);
        File trainingFile = new File("en-token.train");
        File testFile = new File("en-token.test");
        SummaryStatistics curFStats = new SummaryStatistics();
        SummaryStatistics curRecallStats = new SummaryStatistics();
        SummaryStatistics curPrecisionStats = new SummaryStatistics();
        SummaryStatistics stdFStats = new SummaryStatistics();
        SummaryStatistics stdRecallStats = new SummaryStatistics();
        SummaryStatistics stdPrecisionStats = new SummaryStatistics();
        java.io.BufferedOutputStream trainingFileWriter = null;
        for (FileSplit curFileSplit : FileSplits) {
            try {
                //create training file
                trainingFileWriter = new java.io.BufferedOutputStream(
                        new java.io.FileOutputStream(trainingFile));
                for (File curTrainingFile : curFileSplit.getTrainingFiles()) {
                    java.io.BufferedInputStream curTrainingFileReader = null;
                    try {
                        curTrainingFileReader = new java.io.BufferedInputStream(
                                new java.io.FileInputStream(curTrainingFile));
                        while (curTrainingFileReader.available() > 0) {
                            trainingFileWriter.write(curTrainingFileReader.read());
                        }
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    } finally {
                        if (curTrainingFileReader != null) {
                            curTrainingFileReader.close();
                        }
                    }
                }
                trainingFileWriter.write('\n');
            } catch (IOException ex) {
                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
            } finally {
                if (trainingFileWriter != null) {
                    try {
                        trainingFileWriter.close();
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    }
                }
            }
            //create test file
            java.io.BufferedOutputStream testFileWriter = null;
            try {
                //create training file
                testFileWriter = new java.io.BufferedOutputStream(new java.io.FileOutputStream(testFile));
                for (File curTrainingFile : curFileSplit.getTestFiles()) {
                    String testingFileName = curTrainingFile.getCanonicalPath();
                    textTestResults
                            .setText(textTestResults.getText() + "testing with " + testingFileName + "\n");
                    java.io.BufferedInputStream curTrainingFileReader = null;
                    try {
                        curTrainingFileReader = new java.io.BufferedInputStream(
                                new java.io.FileInputStream(curTrainingFile));
                        while (curTrainingFileReader.available() > 0) {
                            int read = curTrainingFileReader.read();
                            testFileWriter.write(read);
                        }
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    } finally {
                        if (curTrainingFileReader != null) {
                            curTrainingFileReader.close();
                        }
                    }
                }
                testFileWriter.write('\n');
            } catch (IOException ex) {
                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
            } finally {
                if (testFileWriter != null) {
                    try {
                        testFileWriter.close();
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    }
                }
            }
            //create and train model
            ObjectStream<String> trainingLineStream = null;
            TokenizerModel train = null;
            try {
                trainingLineStream = new PlainTextByLineStream(new FileInputStream(trainingFile), charset);
                ObjectStream<TokenSample> sampleStream = null;
                try {
                    sampleStream = new TokenSampleStream(trainingLineStream);
                    train = TokenizerME.train(sampleStream, myTokenizerFactory,
                            TrainingParameters.defaultParams());
                } catch (IOException ex) {
                    Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                } finally {
                    if (sampleStream != null) {
                        try {
                            sampleStream.close();
                        } catch (IOException ex) {
                            Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null,
                                    ex);
                        }
                    }
                }
            } catch (FileNotFoundException ex) {
                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
            } finally {
                if (trainingLineStream != null) {
                    try {
                        trainingLineStream.close();
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    }
                }
            }
            if (train != null) {
                ObjectStream<String> testingLineStream = null;
                try {
                    testingLineStream = new PlainTextByLineStream(new FileInputStream(testFile), charset);
                    ObjectStream<TokenSample> sampleStream = null;
                    try {
                        sampleStream = new TokenSampleStream(testingLineStream);
                        TokenizerME testDetector = new TokenizerME(train);
                        TokenizerEvaluator evaluator = new TokenizerEvaluator(testDetector);
                        evaluator.evaluate(sampleStream);
                        FMeasure testFMeasure = evaluator.getFMeasure();
                        curFStats.addValue(testFMeasure.getFMeasure());
                        curRecallStats.addValue(testFMeasure.getRecallScore());
                        curPrecisionStats.addValue(testFMeasure.getPrecisionScore());
                        textTestResults.setText(textTestResults.getText() + testFMeasure.getFMeasure() + " "
                                + testFMeasure.getPrecisionScore() + " " + testFMeasure.getRecallScore()
                                + "\n");
                        if (stdTokenizer != null) {
                            testingLineStream = new PlainTextByLineStream(new FileInputStream(testFile),
                                    charset);
                            sampleStream = new TokenSampleStream(testingLineStream);
                            TokenizerEvaluator stdEvaluator = new TokenizerEvaluator(stdTokenizer);
                            stdEvaluator.evaluate(sampleStream);
                            FMeasure stdFMeasure = stdEvaluator.getFMeasure();
                            stdFStats.addValue(stdFMeasure.getFMeasure());
                            stdRecallStats.addValue(stdFMeasure.getRecallScore());
                            stdPrecisionStats.addValue(stdFMeasure.getPrecisionScore());
                            textTestResults.setText(textTestResults.getText() + " " + stdFMeasure.getFMeasure()
                                    + " " + stdFMeasure.getPrecisionScore() + " " + stdFMeasure.getRecallScore()
                                    + "\n");
                        }
                        textTestResults.setText(textTestResults.getText() + "\n");
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    } finally {
                        if (sampleStream != null) {
                            try {
                                sampleStream.close();
                            } catch (IOException ex) {
                                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE,
                                        null, ex);
                            }
                        }
                    }
                } catch (FileNotFoundException ex) {
                    Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                } finally {
                    if (testingLineStream != null) {
                        try {
                            testingLineStream.close();
                        } catch (IOException ex) {
                            Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null,
                                    ex);
                        }
                    }
                }
            }
        }
        textTestResults.setText(textTestResults.getText() + "\n");
        textTestResults.setText(textTestResults.getText() + "test model\n");
        textTestResults.setText(textTestResults.getText() + "f score mean " + curFStats.getMean() + " stdDev "
                + curFStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "recall mean " + curRecallStats.getMean()
                + " stdDev " + curRecallStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "precision score mean "
                + curPrecisionStats.getMean() + " stdDev " + curPrecisionStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "std model\n");
        textTestResults.setText(textTestResults.getText() + "f score mean " + stdFStats.getMean() + " stdDev "
                + stdFStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "recall mean " + stdRecallStats.getMean()
                + " stdDev " + stdRecallStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "precision score mean "
                + stdPrecisionStats.getMean() + " stdDev " + stdPrecisionStats.getStandardDeviation() + "\n");
        //create combinded training file
        trainingFileWriter = null;
        try {
            trainingFileWriter = new java.io.BufferedOutputStream(new java.io.FileOutputStream(trainingFile));
            for (File curTrainingFile : mFileCollectionListModel) {
                java.io.BufferedInputStream curTrainingFileReader = null;
                try {
                    curTrainingFileReader = new java.io.BufferedInputStream(
                            new java.io.FileInputStream(curTrainingFile));
                    while (curTrainingFileReader.available() > 0) {
                        trainingFileWriter.write(curTrainingFileReader.read());
                    }
                } catch (IOException ex) {
                    Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                } finally {
                    if (curTrainingFileReader != null) {
                        curTrainingFileReader.close();
                    }
                }
            }
            trainingFileWriter.write('\n');
        } catch (IOException ex) {
            Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
        } finally {
            if (trainingFileWriter != null) {
                try {
                    trainingFileWriter.close();
                } catch (IOException ex) {
                    Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                }
            }
        }
        //create and train model
        ObjectStream<String> lineStream = null;
        this.createdObject = null;
        try {
            lineStream = new PlainTextByLineStream(new FileInputStream(trainingFile), charset);
            ObjectStream<TokenSample> sampleStream = null;
            try {
                sampleStream = new TokenSampleStream(lineStream);
                this.createdObject = TokenizerME.train(sampleStream, myTokenizerFactory,
                        TrainingParameters.defaultParams());
            } catch (IOException ex) {
                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
            } finally {
                if (sampleStream != null) {
                    try {
                        sampleStream.close();
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    }
                }
            }
        } catch (FileNotFoundException ex) {
            Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
        } finally {
            if (lineStream != null) {
                try {
                    lineStream.close();
                } catch (IOException ex) {
                    Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                }
            }
        }
        if (createdObject != null) {
            OutputStream modelOut = null;
            File modelFile = new File("en-fiction-token.bin");
            try {
                modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
                createdObject.serialize(modelOut);
            } catch (IOException ex) {
                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
            } finally {
                if (modelOut != null) {
                    try {
                        modelOut.close();
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    }
                }
            }
        }
        textTestResults.setText(textTestResults.getText() + "done");
    }).start();
}

From source file:com.civprod.writerstoolbox.OpenNLP.training.WordSplitingTokenizerTrainer.java

private void cmdTrainActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_cmdTrainActionPerformed
    final WordSplitingTokenizerTrainer tempThis = this;
    final Charset utf8 = Charset.forName("UTF-8");
    new Thread(() -> {
        textTestResults.setText("");
        //create TokenizerFactory part of the training context
        WordSplittingTokenizerFactory myTokenizerFactory = new WordSplittingTokenizerFactory("EN",
                mAbbreviationDictionary, false, null, mSpellingDictionary,
                (TimeComplexity) comboTimeComplexity.getSelectedItem());

        Tokenizer stdTokenizer = null;//from   w  ww.j  a v a 2s  .  co m
        try {
            stdTokenizer = OpenNLPUtils.createTokenizer();
        } catch (IOException ex) {
            Logger.getLogger(WordSplitingTokenizerTrainer.class.getName()).log(Level.SEVERE, null, ex);
        }
        Tokenizer myNonSplitingTokenizer = null;
        try {
            myNonSplitingTokenizer = OpenNLPUtils.createTokenizer(OpenNLPUtils.readTokenizerModel(
                    OpenNLPUtils.buildModelFileStream(".\\data\\OpenNLP\\en-fiction-token.bin")));
        } catch (IOException ex) {
            Logger.getLogger(WordSplitingTokenizerTrainer.class.getName()).log(Level.SEVERE, null, ex);
        }
        List<FileSplit> FileSplits = FileSplit.generateFileSplitsLOO(mFileCollectionListModel);
        File trainingFile = new File("en-token.train");
        File testFile = new File("en-token.test");
        SummaryStatistics curFStats = new SummaryStatistics();
        SummaryStatistics curRecallStats = new SummaryStatistics();
        SummaryStatistics curPrecisionStats = new SummaryStatistics();
        SummaryStatistics stdFStats = new SummaryStatistics();
        SummaryStatistics stdRecallStats = new SummaryStatistics();
        SummaryStatistics stdPrecisionStats = new SummaryStatistics();
        SummaryStatistics myNonSplitFStats = new SummaryStatistics();
        SummaryStatistics myNonSplitRecallStats = new SummaryStatistics();
        SummaryStatistics myNonSplitPrecisionStats = new SummaryStatistics();
        java.io.BufferedWriter trainingFileWriter = null;
        for (FileSplit curFileSplit : FileSplits) {
            try {
                //create training file
                trainingFileWriter = new java.io.BufferedWriter(
                        new java.io.OutputStreamWriter(new java.io.FileOutputStream(trainingFile), utf8));
                for (File curTrainingFile : curFileSplit.getTrainingFiles()) {
                    java.io.BufferedReader curTrainingFileReader = null;
                    try {
                        Charset fileCharset = FileUtils.determineCharset(curTrainingFile);
                        if (fileCharset == null) {
                            fileCharset = utf8;
                        }
                        curTrainingFileReader = new java.io.BufferedReader(new java.io.InputStreamReader(
                                new java.io.FileInputStream(curTrainingFile), fileCharset));
                        while (curTrainingFileReader.ready()) {
                            String curLine = curTrainingFileReader.readLine();
                            trainingFileWriter.append(curLine).append("\n");
                        }
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    } finally {
                        if (curTrainingFileReader != null) {
                            curTrainingFileReader.close();
                        }
                    }
                }
                trainingFileWriter.write('\n');
            } catch (IOException ex) {
                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
            } finally {
                if (trainingFileWriter != null) {
                    try {
                        trainingFileWriter.close();
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    }
                }
            }
            //create test file
            java.io.BufferedWriter testFileWriter = null;
            try {
                //create training file
                testFileWriter = new java.io.BufferedWriter(
                        new java.io.OutputStreamWriter(new java.io.FileOutputStream(testFile), utf8));
                for (File curTrainingFile : curFileSplit.getTestFiles()) {
                    String testingFileName = curTrainingFile.getCanonicalPath();
                    textTestResults
                            .setText(textTestResults.getText() + "testing with " + testingFileName + "\n");
                    java.io.BufferedReader curTrainingFileReader = null;
                    try {
                        Charset fileCharset = FileUtils.determineCharset(curTrainingFile);
                        if (fileCharset == null) {
                            fileCharset = utf8;
                        }
                        curTrainingFileReader = new java.io.BufferedReader(new java.io.InputStreamReader(
                                new java.io.FileInputStream(curTrainingFile), fileCharset));
                        while (curTrainingFileReader.ready()) {
                            String curLine = curTrainingFileReader.readLine();
                            testFileWriter.append(curLine).append("\n");
                        }
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    } finally {
                        if (curTrainingFileReader != null) {
                            curTrainingFileReader.close();
                        }
                    }
                }
                testFileWriter.write('\n');
            } catch (IOException ex) {
                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
            } finally {
                if (testFileWriter != null) {
                    try {
                        testFileWriter.close();
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    }
                }
            }
            //create and train model
            ObjectStream<String> trainingLineStream = null;
            TokenizerModel train = null;
            try {
                trainingLineStream = new PlainTextByLineStream(new FileInputStream(trainingFile), utf8);
                ObjectStream<TokenSample> sampleStream = null;
                try {
                    sampleStream = new TokenSampleStream(trainingLineStream);
                    train = TokenizerME.train(sampleStream, myTokenizerFactory,
                            TrainingParameters.defaultParams());
                } catch (IOException ex) {
                    Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                } finally {
                    if (sampleStream != null) {
                        try {
                            sampleStream.close();
                        } catch (IOException ex) {
                            Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null,
                                    ex);
                        }
                    }
                }
            } catch (FileNotFoundException ex) {
                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
            } finally {
                if (trainingLineStream != null) {
                    try {
                        trainingLineStream.close();
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    }
                }
            }
            if (train != null) {
                ObjectStream<String> testingLineStream = null;
                try {
                    testingLineStream = new PlainTextByLineStream(new FileInputStream(testFile), utf8);
                    ObjectStream<TokenSample> sampleStream = null;
                    try {
                        sampleStream = new TokenSampleStream(testingLineStream);
                        TokenizerME testDetector = new TokenizerME(train);
                        TokenizerEvaluator evaluator = new TokenizerEvaluator(testDetector);
                        evaluator.evaluate(sampleStream);
                        FMeasure testFMeasure = evaluator.getFMeasure();
                        curFStats.addValue(testFMeasure.getFMeasure());
                        curRecallStats.addValue(testFMeasure.getRecallScore());
                        curPrecisionStats.addValue(testFMeasure.getPrecisionScore());
                        textTestResults.setText(textTestResults.getText() + testFMeasure.getFMeasure() + " "
                                + testFMeasure.getPrecisionScore() + " " + testFMeasure.getRecallScore()
                                + "\n");
                        if (stdTokenizer != null) {
                            testingLineStream = new PlainTextByLineStream(new FileInputStream(testFile), utf8);
                            sampleStream = new TokenSampleStream(testingLineStream);
                            TokenizerEvaluator stdEvaluator = new TokenizerEvaluator(stdTokenizer);
                            stdEvaluator.evaluate(sampleStream);
                            FMeasure stdFMeasure = stdEvaluator.getFMeasure();
                            stdFStats.addValue(stdFMeasure.getFMeasure());
                            stdRecallStats.addValue(stdFMeasure.getRecallScore());
                            stdPrecisionStats.addValue(stdFMeasure.getPrecisionScore());
                            textTestResults.setText(textTestResults.getText() + " " + stdFMeasure.getFMeasure()
                                    + " " + stdFMeasure.getPrecisionScore() + " " + stdFMeasure.getRecallScore()
                                    + "\n");
                        }
                        if (myNonSplitingTokenizer != null) {
                            testingLineStream = new PlainTextByLineStream(new FileInputStream(testFile), utf8);
                            sampleStream = new TokenSampleStream(testingLineStream);
                            TokenizerEvaluator myNonSplitingEvaluator = new TokenizerEvaluator(
                                    myNonSplitingTokenizer);
                            myNonSplitingEvaluator.evaluate(sampleStream);
                            FMeasure myNonSplitFMeasure = myNonSplitingEvaluator.getFMeasure();
                            myNonSplitFStats.addValue(myNonSplitFMeasure.getFMeasure());
                            myNonSplitRecallStats.addValue(myNonSplitFMeasure.getRecallScore());
                            myNonSplitPrecisionStats.addValue(myNonSplitFMeasure.getPrecisionScore());
                            textTestResults
                                    .setText(textTestResults.getText() + " " + myNonSplitFMeasure.getFMeasure()
                                            + " " + myNonSplitFMeasure.getPrecisionScore() + " "
                                            + myNonSplitFMeasure.getRecallScore() + "\n");
                        }
                        textTestResults.setText(textTestResults.getText() + "\n");
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    } finally {
                        if (sampleStream != null) {
                            try {
                                sampleStream.close();
                            } catch (IOException ex) {
                                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE,
                                        null, ex);
                            }
                        }
                    }
                } catch (FileNotFoundException ex) {
                    Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                } finally {
                    if (testingLineStream != null) {
                        try {
                            testingLineStream.close();
                        } catch (IOException ex) {
                            Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null,
                                    ex);
                        }
                    }
                }
            }
        }
        textTestResults.setText(textTestResults.getText() + "\n");
        textTestResults.setText(textTestResults.getText() + "test model\n");
        textTestResults.setText(textTestResults.getText() + "f score mean " + curFStats.getMean() + " stdDev "
                + curFStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "recall mean " + curRecallStats.getMean()
                + " stdDev " + curRecallStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "precision score mean "
                + curPrecisionStats.getMean() + " stdDev " + curPrecisionStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "std model\n");
        textTestResults.setText(textTestResults.getText() + "f score mean " + stdFStats.getMean() + " stdDev "
                + stdFStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "recall mean " + stdRecallStats.getMean()
                + " stdDev " + stdRecallStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "precision score mean "
                + stdPrecisionStats.getMean() + " stdDev " + stdPrecisionStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "my non spliting model\n");
        textTestResults.setText(textTestResults.getText() + "f score mean " + myNonSplitFStats.getMean()
                + " stdDev " + myNonSplitFStats.getStandardDeviation() + "\n");
        textTestResults.setText(textTestResults.getText() + "recall mean " + myNonSplitRecallStats.getMean()
                + " stdDev " + myNonSplitRecallStats.getStandardDeviation() + "\n");
        textTestResults.setText(
                textTestResults.getText() + "precision score mean " + myNonSplitPrecisionStats.getMean()
                        + " stdDev " + myNonSplitPrecisionStats.getStandardDeviation() + "\n");
        //create combinded training file
        trainingFileWriter = null;
        try {
            trainingFileWriter = new java.io.BufferedWriter(
                    new java.io.OutputStreamWriter(new java.io.FileOutputStream(trainingFile), utf8));
            for (File curTrainingFile : mFileCollectionListModel) {
                java.io.BufferedReader curTrainingFileReader = null;
                try {
                    Charset fileCharset = FileUtils.determineCharset(curTrainingFile);
                    if (fileCharset == null) {
                        fileCharset = utf8;
                    }
                    curTrainingFileReader = new java.io.BufferedReader(new java.io.InputStreamReader(
                            new java.io.FileInputStream(curTrainingFile), fileCharset));
                    while (curTrainingFileReader.ready()) {
                        String curLine = curTrainingFileReader.readLine();
                        trainingFileWriter.append(curLine).append("\n");
                    }
                } catch (IOException ex) {
                    Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                } finally {
                    if (curTrainingFileReader != null) {
                        curTrainingFileReader.close();
                    }
                }
            }
            trainingFileWriter.write('\n');
        } catch (IOException ex) {
            Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
        } finally {
            if (trainingFileWriter != null) {
                try {
                    trainingFileWriter.close();
                } catch (IOException ex) {
                    Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                }
            }
        }
        //create and train model
        ObjectStream<String> lineStream = null;
        this.createdObject = null;
        try {
            lineStream = new PlainTextByLineStream(new FileInputStream(trainingFile), utf8);
            ObjectStream<TokenSample> sampleStream = null;
            try {
                sampleStream = new TokenSampleStream(lineStream);
                this.createdObject = TokenizerME.train(sampleStream, myTokenizerFactory,
                        TrainingParameters.defaultParams());
            } catch (IOException ex) {
                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
            } finally {
                if (sampleStream != null) {
                    try {
                        sampleStream.close();
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    }
                }
            }
        } catch (FileNotFoundException ex) {
            Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
        } finally {
            if (lineStream != null) {
                try {
                    lineStream.close();
                } catch (IOException ex) {
                    Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                }
            }
        }
        if (createdObject != null) {
            OutputStream modelOut = null;
            File modelFile = new File("en-fiction-token.bin");
            try {
                modelOut = new BufferedOutputStream(new FileOutputStream(modelFile));
                createdObject.serialize(modelOut);
            } catch (IOException ex) {
                Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
            } finally {
                if (modelOut != null) {
                    try {
                        modelOut.close();
                    } catch (IOException ex) {
                        Logger.getLogger(SentenceDetectorTrainer.class.getName()).log(Level.SEVERE, null, ex);
                    }
                }
            }
        }
        textTestResults.setText(textTestResults.getText() + "done");
    }).start();
}

From source file:edu.stanford.muse.index.NER.java

public static void testOpenNLP() {

    try {/*w  w w  .  j  a va2  s  .c om*/
        String s = Util.readFile("/tmp/in");
        /*
        List<Pair<String,Float>> pairs = NER.namesFromText(s);
        for (Pair<String,Float> p: pairs) {
           System.out.println (p);
        }
        System.out.println ("-----");
        */

        InputStream pis = Config.getResourceAsStream("en-ner-person.bin");
        TokenNameFinderModel pmodel = new TokenNameFinderModel(pis);
        InputStream lis = Config.getResourceAsStream("en-ner-location.bin");
        TokenNameFinderModel lmodel = new TokenNameFinderModel(lis);
        InputStream ois = Config.getResourceAsStream("en-ner-organization.bin");
        TokenNameFinderModel omodel = new TokenNameFinderModel(ois);
        InputStream tokenStream = Config.getResourceAsStream("en-token.bin");
        TokenizerModel modelTokenizer = new TokenizerModel(tokenStream);
        TokenizerME tokenizer = new TokenizerME(modelTokenizer);
        Span[] tokSpans = tokenizer.tokenizePos(s); // Util.tokenize(s).toArray(new String[0]);

        String tokens[] = new String[tokSpans.length];
        for (int i = 0; i < tokSpans.length; i++)
            tokens[i] = s.substring(tokSpans[i].getStart(), tokSpans[i].getEnd());

        NameFinderME pFinder = new NameFinderME(pmodel);
        Span[] pSpans = pFinder.find(tokens);
        NameFinderME lFinder = new NameFinderME(lmodel);
        Span[] lSpans = lFinder.find(tokens);
        NameFinderME oFinder = new NameFinderME(omodel);
        Span[] oSpans = oFinder.find(tokens);
        System.out.println("Names found:");
        for (Span span : pSpans) {
            for (int i = span.getStart(); i < span.getEnd(); i++)
                System.out.print(tokens[i] + " ");
            System.out.println();
        }

        System.out.println("Locations found:");
        for (Span span : lSpans) {
            for (int i = span.getStart(); i < span.getEnd(); i++)
                System.out.print(tokens[i] + " ");
            System.out.println();
        }

        System.out.println("Orgs found:");
        for (Span span : oSpans) {
            for (int i = span.getStart(); i < span.getEnd(); i++)
                System.out.print(tokens[i] + " ");
            System.out.println();
        }
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.stanbol.commons.opennlp.OpenNLP.java

/**
 * Getter for the Tokenizer of a given language. This first tries to
 * create an {@link TokenizerME} instance if the required 
 * {@link TokenizerModel} for the parsed language is available. if such a
 * model is not available it returns the {@link SimpleTokenizer} instance.
 * @param language the language or <code>null</code> to build a 
 * {@link SimpleTokenizer}//from  ww w .  j ava  2  s.  co m
 * @return the {@link Tokenizer} for the parsed language.
 */
public Tokenizer getTokenizer(String language) {
    Tokenizer tokenizer = null;
    if (language != null) {
        try {
            TokenizerModel model = getTokenizerModel(language);
            if (model != null) {
                tokenizer = new TokenizerME(model);
            }
        } catch (InvalidFormatException e) {
            log.warn("Unable to load Tokenizer Model for " + language + ": "
                    + "Will use Simple Tokenizer instead", e);
        } catch (IOException e) {
            log.warn("Unable to load Tokenizer Model for " + language + ": "
                    + "Will use Simple Tokenizer instead", e);
        }
    }
    if (tokenizer == null) {
        log.debug("Use Simple Tokenizer for language {}", language);
        tokenizer = SimpleTokenizer.INSTANCE;
    } else {
        log.debug("Use ME Tokenizer for language {}", language);
    }
    return tokenizer;
}

From source file:org.dbpedia.spotlight.spot.OpenNLPNGramSpotter.java

/**Extracts noun-phrase n-grams from the given piece of input text. 
 * @param text  A Text object containing the input from where to extract NP n-grams
 * @return A list of SurfaceFormOccurrence objects.
 *///from  ww  w  .j  av a2 s  . co  m
protected List<SurfaceFormOccurrence> extractNPNGrams(Text text) {
    String intext = text.text();
    //System.out.println("\n\nRR- nextractNPNGrams(...) method called! with text: " + intext + "\n\n");
    List<SurfaceFormOccurrence> npNgramSFLst = new ArrayList<SurfaceFormOccurrence>();
    SentenceDetectorME sentenceDetector = new SentenceDetectorME((SentenceModel) sentenceModel);
    TokenizerME tokenizer = new TokenizerME((TokenizerModel) tokenModel);
    POSTaggerME posTagger = new POSTaggerME((POSModel) posModel);
    ChunkerME chunker = new ChunkerME((ChunkerModel) chunkModel);

    Span[] sentSpans = sentenceDetector.sentPosDetect(intext);
    for (Span sentSpan : sentSpans) {
        String sentence = sentSpan.getCoveredText(intext).toString();
        int start = sentSpan.getStart();
        Span[] tokSpans = tokenizer.tokenizePos(sentence);
        String[] tokens = new String[tokSpans.length];
        // System.out.println("\n\nTokens:");
        for (int i = 0; i < tokens.length; i++) {
            tokens[i] = tokSpans[i].getCoveredText(sentence).toString();
            // System.out.println(tokens[i]);
        }
        String[] tags = posTagger.tag(tokens);
        Span[] chunks = chunker.chunkAsSpans(tokens, tags);
        for (Span chunk : chunks) {
            if ("NP".equals(chunk.getType())) {
                //Note: getStart()/getEnd() methods of Chunk spans only give the start and end token indexes of the chunk.
                //The actual Start/End positions of the chunk in the sentence need to be extracted from POS sentenceSpans.
                //They are offsets from the begining of the sentence in question. Need to add the start postion of the sentence
                //to compute the actual start/end offsets from the begining of the input text.
                int begin = tokSpans[chunk.getStart()].getStart();
                int end = tokSpans[chunk.getEnd() - 1].getEnd();
                List<Map<String, Integer>> ngrampos = extractNGramPos(chunk.getStart(), chunk.getEnd() + -1);
                extractNGrams(ngrampos, start, text, tokSpans, npNgramSFLst);
            }
        }
    }
    return npNgramSFLst;
}

From source file:org.wso2.uima.collectionProccesingEngine.analysisEngines.LocationIdentifier.java

@Override
public void initialize(UimaContext ctx) throws ResourceInitializationException {
    super.initialize(ctx);
    InputStream sentenceStream = null;
    InputStream tokenizerStream = null;
    InputStream nameFinderStream = null;
    try {//w w w  .j  a  va 2s .c o  m
        sentenceStream = getContext().getResourceAsStream("SentenceModel");
        SentenceModel sentenceModel = new SentenceModel(sentenceStream);
        sentenceDetector = new SentenceDetectorME(sentenceModel);
        sentenceStream.close();
        tokenizerStream = getContext().getResourceAsStream("TokenizerModel");
        TokenizerModel tokenModel = new TokenizerModel(tokenizerStream);
        tokenizer = new TokenizerME(tokenModel);
        tokenizerStream.close();
        nameFinderStream = getContext().getResourceAsStream("TokenNameFinderModel");
        TokenNameFinderModel nameFinderModel = new TokenNameFinderModel(nameFinderStream);
        locationFinder = new NameFinderME(nameFinderModel);
        nameFinderStream.close();
    } catch (Exception e) {
        throw new ResourceInitializationException(e);
    } finally {
        IOUtils.closeQuietly(nameFinderStream);
        IOUtils.closeQuietly(tokenizerStream);
        IOUtils.closeQuietly(sentenceStream);
        logger.info(LocationIdentifier.class.getSimpleName() + " Analysis Engine initialized successfully");
    }
}

From source file:os.Controller.java

public String tokenize(String teks) throws InvalidFormatException, IOException {
    InputStream is = new FileInputStream("en-token.bin");

    TokenizerModel model = new TokenizerModel(is);

    Tokenizer tokenizer = new TokenizerME(model);

    String tokens[] = tokenizer.tokenize(teks);
    String result = "";

    for (String a : tokens) {
        result = result + " " + a;
    }/*from w  ww .  j a  v  a  2 s .  c  om*/

    is.close();

    return result;
}