Example usage for java.nio.file Files isReadable

List of usage examples for java.nio.file Files isReadable

Introduction

In this page you can find the example usage for java.nio.file Files isReadable.

Prototype

public static boolean isReadable(Path path) 

Source Link

Document

Tests whether a file is readable.

Usage

From source file:org.wso2.carbon.event.template.manager.core.internal.util.TemplateManagerHelper.java

/**
 * Create a JavaScript engine packed with given scripts. If two scripts have methods with same name,
 * later method will override the previous method.
 *
 * @param domain//from   ww w. ja va  2  s  . c o  m
 * @return JavaScript engine
 * @throws TemplateDeploymentException if there are any errors in JavaScript evaluation
 */
public static ScriptEngine createJavaScriptEngine(Domain domain) throws TemplateDeploymentException {

    ScriptEngineManager scriptEngineManager = new ScriptEngineManager();
    ScriptEngine scriptEngine = scriptEngineManager
            .getEngineByName(TemplateManagerConstants.JAVASCRIPT_ENGINE_NAME);

    if (scriptEngine == null) {
        // Exception will be thrown later, only if function calls are used in the template
        log.warn("JavaScript engine is not available. Function calls in the templates cannot be evaluated");
    } else {
        if (domain != null && domain.getScripts() != null && domain.getScripts().getScript() != null) {
            Path scriptDirectory = Paths.get(TemplateManagerConstants.TEMPLATE_SCRIPT_PATH);
            if (Files.exists(scriptDirectory, LinkOption.NOFOLLOW_LINKS)
                    && Files.isDirectory(scriptDirectory)) {
                for (Script script : domain.getScripts().getScript()) {
                    String src = script.getSrc();
                    String content = script.getContent();
                    if (src != null) {
                        // Evaluate JavaScript file
                        Path scriptFile = scriptDirectory.resolve(src).normalize();
                        if (Files.exists(scriptFile, LinkOption.NOFOLLOW_LINKS)
                                && Files.isReadable(scriptFile)) {
                            if (!scriptFile.startsWith(scriptDirectory)) {
                                // The script file is not in the permitted directory
                                throw new TemplateDeploymentException("Script file "
                                        + scriptFile.toAbsolutePath() + " is not in the permitted directory "
                                        + scriptDirectory.toAbsolutePath());
                            }
                            try {
                                scriptEngine
                                        .eval(Files.newBufferedReader(scriptFile, Charset.defaultCharset()));
                            } catch (ScriptException e) {
                                throw new TemplateDeploymentException("Error in JavaScript "
                                        + scriptFile.toAbsolutePath() + ": " + e.getMessage(), e);
                            } catch (IOException e) {
                                throw new TemplateDeploymentException(
                                        "Error in reading JavaScript file: " + scriptFile.toAbsolutePath());
                            }
                        } else {
                            throw new TemplateDeploymentException("JavaScript file not exist at: "
                                    + scriptFile.toAbsolutePath() + " or not readable.");
                        }
                    }
                    if (content != null) {
                        // Evaluate JavaScript content
                        try {
                            scriptEngine.eval(content);
                        } catch (ScriptException e) {
                            throw new TemplateDeploymentException(
                                    "JavaScript declared in " + domain.getName() + " has error", e);
                        }
                    }
                }
            } else {
                log.warn("Script directory not found at: " + scriptDirectory.toAbsolutePath());
            }
        }
    }

    return scriptEngine;
}

From source file:at.tfr.securefs.ui.CopyFilesServiceBean.java

private Path validateFromPath(String fromPathName) throws IOException {
    Path from = Paths.get(fromPathName);
    if (!Files.isDirectory(from, LinkOption.NOFOLLOW_LINKS)) {
        throw new IOException("Path " + from + " is no directory");
    }//from   w w  w  .ja  va  2 s  .c  om
    if (!Files.isReadable(from)) {
        throw new IOException("Path " + from + " is not readable");
    }
    if (!Files.isExecutable(from)) {
        throw new IOException("Path " + from + " is not executable");
    }
    return from;
}

From source file:com.ontotext.s4.service.S4ServiceClient.java

/**
  * Classifies the contents of a single file returning an
  * {@link InputStream} from which the classification information can be read
  *// w  w w  .  j a  va2 s .  co  m
  * @param documentContent the file which will be classified
  * @param documentEncoding the encoding of the file which will be classified
  * @param documentMimeType the MIME type of the file which will be classified
  * 
  * @return Service response raw content
  *
  * @throws IOException if there are problems reading the contents of the file
  * @throws S4ServiceClientException
  */
public InputStream classifyFileContentsAsStream(File documentContent, Charset documentEncoding,
        SupportedMimeType documentMimeType) throws IOException, S4ServiceClientException {

    Path documentPath = documentContent.toPath();
    if (!Files.isReadable(documentPath)) {
        throw new IOException("File " + documentPath.toString() + " is not readable.");
    }
    ByteBuffer buff;
    buff = ByteBuffer.wrap(Files.readAllBytes(documentPath));
    String content = documentEncoding.decode(buff).toString();
    return classifyDocumentAsStream(content, documentMimeType);
}

From source file:org.apache.marmotta.platform.core.services.importer.ImportWatchServiceImpl.java

/**
 * Get the target context. /*from  ww  w  .j  a  va2s  .c  o  m*/
 * The algorithm is as follows:
 * <ol>
 * <li>check for a file "conf" (configurable, see {@link #CONFIG_KEY_CONF_FILE}) which specifies 
 * the target content using {@link Properties} syntax (key {@code context}), then use is; or
 * <li>check if the sub-directory is a url-encoded URI, then use it; or
 * <li>construct the context by using {@link ConfigurationService#getBaseContext()} and the relative sub-dirs and use it; or
 * <li>use the default context as a general fallback.
 * </ol>
 * 
 * @param file the file
 * @return the context URI
 * @throws URISyntaxException 
 */
private URI getTargetContext(Path file) throws URISyntaxException {
    // Check for a configFile
    final Path config = file.getParent()
            .resolve(configurationService.getStringConfiguration(CONFIG_KEY_CONF_FILE, "config"));
    if (Files.isReadable(config)) {
        Properties prop = loadConfigFile(file);
        final String _c = prop.getProperty("context");
        if (_c != null) {
            try {
                URI context = contextService.createContext(_c);
                log.debug("using context {} from config file {}", context, config);
                return context;
            } catch (URISyntaxException e) {
                log.warn("invalid context {} in config file {}, ignoring", _c, config);
            }
        } else {
            log.trace("no context defined in config file {}", config);
        }
    }

    // Check for url-encoded directory
    Path subDir = getImportRoot().relativize(file.getParent());
    if (StringUtils.isBlank(subDir.toString())) {
        log.trace("using default context for file {}", file);
        return contextService.getDefaultContext();
    } else if (StringUtils.startsWith(subDir.toString(), "http%3A%2F%2F")) {
        log.debug("using url-encoded context {} for import of {}", subDir, file);
        try {
            return contextService.createContext(URLDecoder.decode(subDir.toString(), "UTF-8"));
        } catch (UnsupportedEncodingException e) {
            log.error("Error url-decoding context name '{}', so using the default one: {}", subDir,
                    e.getMessage());
            return contextService.getDefaultContext();
        }
    } else {
        final String _c = String.format("%s/%s", configurationService.getBaseContext().replaceFirst("/$", ""),
                subDir);
        final URI context = contextService.createContext(_c);
        log.debug("using context {} based on relative subdir {} for file {}", context, subDir, file);
        return context;
    }
}

From source file:io.github.robwin.swagger2markup.builder.document.PathsDocument.java

/**
 * Reads an example/*from w w  w. j av a  2 s  .  co  m*/
 *
 * @param exampleFolder the name of the folder where the example file resides
 * @param exampleFileName the name of the example file
 * @return the content of the file
 * @throws IOException
 */
private String example(String exampleFolder, String exampleFileName) throws IOException {
    for (String fileNameExtension : markupLanguage.getFileNameExtensions()) {
        java.nio.file.Path path = Paths.get(examplesFolderPath, exampleFolder,
                exampleFileName + fileNameExtension);
        if (Files.isReadable(path)) {
            if (logger.isInfoEnabled()) {
                logger.info("Example file processed: {}", path);
            }
            return FileUtils.readFileToString(path.toFile(), StandardCharsets.UTF_8).trim();
        } else {
            if (logger.isDebugEnabled()) {
                logger.debug("Example file is not readable: {}", path);
            }
        }
    }
    if (logger.isWarnEnabled()) {
        logger.info("No example file found with correct file name extension in folder: {}",
                Paths.get(examplesFolderPath, exampleFolder));
    }
    return null;
}

From source file:io.anserini.rerank.lib.AxiomReranker.java

/**
 * Select {@code R*N} docs from the ranking results and the index as the reranking pool.
 * The process is://from   ww w  .j a v  a 2 s  . co m
 * 1. Keep the top R documents in the original ranking list
 * 2. Randomly pick {@code (N-1)*R} documents from the rest of the index so in total we have R*M documents
 *
 * @param docs The initial ranking results
 * @param context An instance of RerankerContext
 * @return a Set of {@code R*N} document Ids
 */
private Set<Integer> selectDocs(ScoredDocuments docs, RerankerContext<T> context) throws IOException {
    Set<Integer> docidSet = new HashSet<>(Arrays
            .asList(ArrayUtils.toObject(Arrays.copyOfRange(docs.ids, 0, Math.min(this.R, docs.ids.length)))));
    long targetSize = this.R * this.N;

    if (docidSet.size() < targetSize) {
        IndexReader reader;
        IndexSearcher searcher;
        if (this.externalIndexPath != null) {
            Path indexPath = Paths.get(this.externalIndexPath);
            if (!Files.exists(indexPath) || !Files.isDirectory(indexPath) || !Files.isReadable(indexPath)) {
                throw new IllegalArgumentException(
                        this.externalIndexPath + " does not exist or is not a directory.");
            }
            reader = DirectoryReader.open(FSDirectory.open(indexPath));
            searcher = new IndexSearcher(reader);
        } else {
            searcher = context.getIndexSearcher();
            reader = searcher.getIndexReader();
        }
        int availableDocsCnt = reader.getDocCount(this.field);
        if (this.deterministic) { // internal docid cannot be relied due to multi-threads indexing,
                                  // we have to rely on external docid here
            Random random = new Random(this.seed);
            while (docidSet.size() < targetSize) {
                if (this.externalDocidsCache != null) {
                    String docid = this.externalDocidsCache
                            .get(random.nextInt(this.externalDocidsCache.size()));
                    Query q = new TermQuery(new Term(LuceneDocumentGenerator.FIELD_ID, docid));
                    TopDocs rs = searcher.search(q, 1);
                    docidSet.add(rs.scoreDocs[0].doc);
                } else {
                    docidSet.add(this.internalDocidsCache[random.nextInt(this.internalDocidsCache.length)].doc);
                }
            }
        } else {
            Random random = new Random();
            while (docidSet.size() < targetSize) {
                docidSet.add(random.nextInt(availableDocsCnt));
            }
        }
    }

    return docidSet;
}

From source file:org.niord.core.batch.BatchService.java

/**
 * Returns the contents of the log file with the given file name.
 * If fromLineNo is specified, only the subsequent lines are returned.
 *
 * @param instanceId the instance id//from w  w w.j  a  v  a  2s .  co  m
 * @param logFileName the log file name
 * @param fromLineNo if specified, only the subsequent lines are returned
 * @return the contents of the log file
 */
public String getBatchJobLogFile(Long instanceId, String logFileName, Integer fromLineNo) throws IOException {

    BatchData job = findByInstanceId(instanceId);
    if (job == null) {
        throw new IllegalArgumentException("Invalid batch instance ID " + instanceId);
    }

    Path path = computeBatchJobPath(job.computeBatchJobFolderPath().resolve(logFileName));
    if (!Files.isRegularFile(path) || !Files.isReadable(path)) {
        throw new IllegalArgumentException("Invalid batch log file " + logFileName);
    }

    int skipLineNo = fromLineNo == null ? 0 : fromLineNo;
    try (Stream<String> logStream = Files.lines(path)) {
        return logStream.skip(skipLineNo).collect(Collectors.joining("\n"));
    }
}

From source file:de.codecentric.elasticsearch.plugin.kerberosrealm.AbstractUnitTest.java

private Path getAbsoluteFilePathFromClassPath(final String fileNameFromClasspath) {
    Path path = null;/*from   w  ww  .  j a  v a  2 s .c  om*/
    final URL fileUrl = PropertyUtil.class.getClassLoader().getResource(fileNameFromClasspath);
    if (fileUrl != null) {
        try {
            path = Paths.get(fileUrl.toURI());
            if (!Files.isReadable(path) && !Files.isDirectory(path)) {
                log.error("Cannot read from {}, file does not exist or is not readable", path.toString());
                return null;
            }

            if (!path.isAbsolute()) {
                log.warn("{} is not absolute", path.toString());
            }
            return path;
        } catch (final URISyntaxException e) {
            //ignore
        }
    } else {
        log.error("Failed to load " + fileNameFromClasspath);
    }
    return null;
}

From source file:io.anserini.rerank.lib.AxiomReranker.java

/**
 * Extract ALL the terms from the documents pool.
 *
 * @param docIds The reranking pool, see {@link #selectDocs} for explanations
 * @param context An instance of RerankerContext
 * @param filterPattern A Regex pattern that terms are collected only they matches the pattern, could be null
 * @return A Map of <term -> Set<docId>> kind of a small inverted list where the Set of docIds is where the term occurs
 *///from  www.  ja va  2 s  .c om
private Map<String, Set<Integer>> extractTerms(Set<Integer> docIds, RerankerContext<T> context,
        Pattern filterPattern) throws Exception, IOException {
    IndexReader reader;
    IndexSearcher searcher;
    if (this.externalIndexPath != null) {
        Path indexPath = Paths.get(this.externalIndexPath);
        if (!Files.exists(indexPath) || !Files.isDirectory(indexPath) || !Files.isReadable(indexPath)) {
            throw new IllegalArgumentException(
                    this.externalIndexPath + " does not exist or is not a directory.");
        }
        reader = DirectoryReader.open(FSDirectory.open(indexPath));
        searcher = new IndexSearcher(reader);
    } else {
        searcher = context.getIndexSearcher();
        reader = searcher.getIndexReader();
    }
    Map<String, Set<Integer>> termDocidSets = new HashMap<>();
    for (int docid : docIds) {
        Terms terms = reader.getTermVector(docid, LuceneDocumentGenerator.FIELD_BODY);
        if (terms == null) {
            LOG.warn("Document vector not stored for docid: " + docid);
            continue;
        }
        TermsEnum te = terms.iterator();
        if (te == null) {
            LOG.warn("Document vector not stored for docid: " + docid);
            continue;
        }
        while ((te.next()) != null) {
            String term = te.term().utf8ToString();
            // We do some noisy filtering here ... pure empirical heuristic
            if (term.length() < 2)
                continue;
            if (!term.matches("[a-z]+"))
                continue;
            if (filterPattern == null || filterPattern.matcher(term).matches()) {
                if (!termDocidSets.containsKey(term)) {
                    termDocidSets.put(term, new HashSet<>());
                }
                termDocidSets.get(term).add(docid);
            }
        }
    }
    return termDocidSets;
}

From source file:io.anserini.rerank.lib.AxiomReranker.java

/**
 * Calculate the scores (weights) of each term that occured in the reranking pool.
 * The Process://  w w  w.j av  a  2s .c  o  m
 * 1. For each query term, calculate its score for each term in the reranking pool. the score
 * is calcuated as
 * <pre>
 * P(both occurs)*log{P(both occurs)/P(t1 occurs)/P(t2 occurs)}
 * + P(both not occurs)*log{P(both not occurs)/P(t1 not occurs)/P(t2 not occurs)}
 * + P(t1 occurs t2 not occurs)*log{P(t1 occurs t2 not occurs)/P(t1 occurs)/P(t2 not occurs)}
 * + P(t1 not occurs t2 occurs)*log{P(t1 not occurs t2 occurs)/P(t1 not occurs)/P(t2 occurs)}
 * </pre>
 * 2. For each query term the scores of every other term in the reranking pool are stored in a
 * PriorityQueue, only the top {@code K} are kept.
 * 3. Add the scores of the same term together and pick the top {@code M} ones.
 *
 * @param termInvertedList A Map of <term -> Set<docId>> where the Set of docIds is where the term occurs
 * @param context An instance of RerankerContext
 * @return Map<String, Double> Top terms and their weight scores in a HashMap
 */
private Map<String, Double> computeTermScore(Map<String, Set<Integer>> termInvertedList,
        RerankerContext<T> context) throws IOException {
    class ScoreComparator implements Comparator<Pair<String, Double>> {
        public int compare(Pair<String, Double> a, Pair<String, Double> b) {
            int cmp = Double.compare(b.getRight(), a.getRight());
            if (cmp == 0) {
                return a.getLeft().compareToIgnoreCase(b.getLeft());
            } else {
                return cmp;
            }
        }
    }

    // get collection statistics so that we can get idf later on.
    IndexReader reader;
    if (this.externalIndexPath != null) {
        Path indexPath = Paths.get(this.externalIndexPath);
        if (!Files.exists(indexPath) || !Files.isDirectory(indexPath) || !Files.isReadable(indexPath)) {
            throw new IllegalArgumentException(
                    this.externalIndexPath + " does not exist or is not a directory.");
        }
        reader = DirectoryReader.open(FSDirectory.open(indexPath));
    } else {
        IndexSearcher searcher = context.getIndexSearcher();
        reader = searcher.getIndexReader();
    }
    final long docCount = reader.numDocs() == -1 ? reader.maxDoc() : reader.numDocs();

    //calculate the Mutual Information between term with each query term
    List<String> queryTerms = context.getQueryTokens();
    Map<String, Integer> queryTermsCounts = new HashMap<>();
    for (String qt : queryTerms) {
        queryTermsCounts.put(qt, queryTermsCounts.getOrDefault(qt, 0) + 1);
    }

    Set<Integer> allDocIds = new HashSet<>();
    for (Set<Integer> s : termInvertedList.values()) {
        allDocIds.addAll(s);
    }
    int docIdsCount = allDocIds.size();

    // Each priority queue corresponds to a query term: The p-queue itself stores all terms
    // in the reranking pool and their reranking scores to the query term.
    List<PriorityQueue<Pair<String, Double>>> allTermScoresPQ = new ArrayList<>();
    for (Map.Entry<String, Integer> q : queryTermsCounts.entrySet()) {
        String queryTerm = q.getKey();
        long df = reader.docFreq(new Term(LuceneDocumentGenerator.FIELD_BODY, queryTerm));
        if (df == 0L) {
            continue;
        }
        float idf = (float) Math.log((1 + docCount) / df);
        int qtf = q.getValue();
        if (termInvertedList.containsKey(queryTerm)) {
            PriorityQueue<Pair<String, Double>> termScorePQ = new PriorityQueue<>(new ScoreComparator());
            double selfMI = computeMutualInformation(termInvertedList.get(queryTerm),
                    termInvertedList.get(queryTerm), docIdsCount);
            for (Map.Entry<String, Set<Integer>> termEntry : termInvertedList.entrySet()) {
                double score;
                if (termEntry.getKey().equals(queryTerm)) { // The mutual information to itself will always be 1
                    score = idf * qtf;
                } else {
                    double crossMI = computeMutualInformation(termInvertedList.get(queryTerm),
                            termEntry.getValue(), docIdsCount);
                    score = idf * beta * qtf * crossMI / selfMI;
                }
                termScorePQ.add(Pair.of(termEntry.getKey(), score));
            }
            allTermScoresPQ.add(termScorePQ);
        }
    }

    Map<String, Double> aggTermScores = new HashMap<>();
    for (PriorityQueue<Pair<String, Double>> termScores : allTermScoresPQ) {
        for (int i = 0; i < Math.min(termScores.size(), this.K); i++) {
            Pair<String, Double> termScore = termScores.poll();
            String term = termScore.getLeft();
            Double score = termScore.getRight();
            if (score - 0.0 > 1e-8) {
                aggTermScores.put(term, aggTermScores.getOrDefault(term, 0.0) + score);
            }
        }
    }
    PriorityQueue<Pair<String, Double>> termScoresPQ = new PriorityQueue<>(new ScoreComparator());
    for (Map.Entry<String, Double> termScore : aggTermScores.entrySet()) {
        termScoresPQ.add(Pair.of(termScore.getKey(), termScore.getValue() / queryTerms.size()));
    }
    Map<String, Double> resultTermScores = new HashMap<>();
    for (int i = 0; i < Math.min(termScoresPQ.size(), this.M); i++) {
        Pair<String, Double> termScore = termScoresPQ.poll();
        String term = termScore.getKey();
        double score = termScore.getValue();
        resultTermScores.put(term, score);
    }

    return resultTermScores;
}