List of usage examples for java.nio.file Path getFileName
Path getFileName();
From source file:com.rptools.io.TableFileParser.java
/** * Parse file found at path {@param file} into an RPTable object. See {@link * RPTable} for expected table file format. * * Json files are read directly as a proto3 RPTable message. Text files must * be parsed, have their json equivalent written, then deleted. This is to * ease adding future tables by pasting their text content instead of trying * to convert them to json by hand./*from w w w . ja v a2 s .c om*/ * * @param file Input table text/json file. * @return {@link RPTable.Builder} created from contents of input file. */ public RPTable.Builder parseFile(Path file) { roll = 1; try { List<String> lines = Files.readAllLines(file); RPTable.Builder builder = RPTable.newBuilder(); // We want to keep json files around, and convert text files to json, then delete them. if (file.getFileName().toString().endsWith(EXT_JSON)) { JsonFormat.parser().merge(JOINER.join(lines), builder); return builder; } setTableName(file, builder); String headerRow = lines.remove(0); List<String> headers = SPLITTER.splitToList(headerRow); builder.addAllColumns(headers); lines.forEach(line -> parseLine(builder, headers, line)); // only return a table for .txt files if the json file did not also // already exist to be read separately if (updateResourceFiles(file, builder)) { return builder; } log.debug("Not renewing json/text file"); return null; } catch (IOException e) { log.error(String.format(PARSE_ERROR, file.toString(), e.toString()), e); return null; } }
From source file:fr.ortolang.diffusion.runtime.engine.task.ImportReferentialEntityTask.java
@Override public void executeTask(DelegateExecution execution) throws RuntimeEngineTaskException { checkParameters(execution);/*from w w w . j a v a 2s. c o m*/ String referentialPathParam = execution.getVariable(REFERENTIAL_PATH_PARAM_NAME, String.class); report = new StringBuilder(); File referentialPathFile = new File(referentialPathParam); if (referentialPathFile.exists()) { final PathMatcher matcher = FileSystems.getDefault().getPathMatcher("glob:*.{json}"); final Path referentialPath = Paths.get(referentialPathParam); try { Files.walkFileTree(referentialPath, new FileVisitor<Path>() { @Override public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { return FileVisitResult.CONTINUE; } @Override public FileVisitResult visitFile(Path filepath, BasicFileAttributes attrs) throws IOException { Path filename = filepath.getFileName(); if (filename != null && matcher.matches(filename)) { File jsonFile = filepath.toFile(); String content = StreamUtils.getContent(jsonFile); if (content == null) { // LOGGER.log(Level.SEVERE, "Referential entity content is empty for file " + jsonFile); report.append(" - referential entity content is empty for file ").append(jsonFile) .append("\r\n"); partial = true; return FileVisitResult.CONTINUE; } String type = extractField(content, "type"); if (type == null) { // LOGGER.log(Level.SEVERE, "Referential entity type unknown for file " + jsonFile); report.append(" - referential entity type unknown for file ").append(jsonFile) .append("\r\n"); partial = true; return FileVisitResult.CONTINUE; } String name = jsonFile.getName().substring(0, jsonFile.getName().length() - 5); try { boolean exist = exists(name); if (!exist) { createReferentialEntity(name, type, content); report.append(" + referential entity created : ").append(name).append("\r\n"); } else { updateReferentialEntity(name, type, content); report.append(" + referential entity updated : ").append(name).append("\r\n"); } } catch (RuntimeEngineTaskException e) { // LOGGER.log(Level.SEVERE, " unable to import referential entity ("+type+") named "+name, e); report.append(" - unable to import referential entity '").append(name) .append("' : ").append(e.getMessage()).append("\r\n"); partial = true; return FileVisitResult.CONTINUE; } } return FileVisitResult.CONTINUE; } @Override public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException { return FileVisitResult.CONTINUE; } @Override public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { return FileVisitResult.CONTINUE; } }); } catch (Exception e) { // LOGGER.log(Level.SEVERE, " unable to import referential : " + referentialPathFile, e); report.append("Enable to import referential ").append(referentialPathFile).append(" caused by : ") .append(e.getMessage()).append("\r\n"); partial = true; } } else { // LOGGER.log(Level.SEVERE, "Referential folder doesn't exists : " + referentialPathFile); report.append("Referential folder doesn't exists at ").append(referentialPathFile).append("\r\n"); partial = true; } if (partial) { throwRuntimeEngineEvent(RuntimeEngineEvent.createProcessLogEvent(execution.getProcessBusinessKey(), "Some entities has not been imported (see trace for detail)")); } else { throwRuntimeEngineEvent(RuntimeEngineEvent.createProcessLogEvent(execution.getProcessBusinessKey(), "All entities imported succesfully")); } throwRuntimeEngineEvent(RuntimeEngineEvent.createProcessTraceEvent(execution.getProcessBusinessKey(), "Report: \r\n" + report.toString(), null)); }
From source file:com.qwazr.library.archiver.ArchiverTool.java
public void decompress_dir(final Path sourceDir, String sourceExtension, final Path destDir, final String destExtension) throws IOException, CompressorException { if (!Files.exists(sourceDir)) throw new FileNotFoundException("The source directory does not exist: " + sourceDir.toAbsolutePath()); if (!Files.exists(destDir)) throw new FileNotFoundException( "The destination directory does not exist: " + destDir.toAbsolutePath()); final Path[] sourceFiles; try (final Stream<Path> stream = Files.list(sourceDir)) { sourceFiles = stream.filter(p -> Files.isRegularFile(p)).toArray(Path[]::new); }/*w w w. j a va2s .c o m*/ if (sourceFiles == null) return; for (Path sourceFile : sourceFiles) { final String fileName = sourceFile.getFileName().toString(); final String ext = FilenameUtils.getExtension(fileName); if (!sourceExtension.equals(ext)) continue; String newName = FilenameUtils.getBaseName(fileName); if (destExtension != null) newName += '.' + destExtension; final Path destFile = destDir.resolve(newName); if (Files.exists(destFile)) continue; decompress(sourceFile, destFile); } }
From source file:org.schedulesdirect.grabber.Auditor.java
private void auditScheds() throws IOException, JSONException, ParseException { final Map<String, JSONObject> stations = getStationMap(); final SimpleDateFormat FMT = Config.get().getDateTimeFormat(); final Path scheds = vfs.getPath("schedules"); if (Files.isDirectory(scheds)) { Files.walkFileTree(scheds, new FileVisitor<Path>() { @Override//from www .j a v a 2s. c o m public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { return dir.equals(scheds) ? FileVisitResult.CONTINUE : FileVisitResult.SKIP_SUBTREE; } @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { boolean failed = false; String id = getStationIdFromFileName(file.getFileName().toString()); JSONObject station = stations.get(id); StringBuilder msg = new StringBuilder(String.format("Inspecting %s (%s)... ", station != null ? station.getString("callsign") : String.format("[UNKNOWN: %s]", id), id)); String input; try (InputStream ins = Files.newInputStream(file)) { input = IOUtils.toString(ins, ZipEpgClient.ZIP_CHARSET.toString()); } ObjectMapper mapper = Config.get().getObjectMapper(); JSONArray jarr = mapper.readValue( mapper.readValue(input, JSONObject.class).getJSONArray("programs").toString(), JSONArray.class); for (int i = 1; i < jarr.length(); ++i) { long start, prevStart; JSONObject prev; try { start = FMT.parse(jarr.getJSONObject(i).getString("airDateTime")).getTime(); prev = jarr.getJSONObject(i - 1); prevStart = FMT.parse(prev.getString("airDateTime")).getTime() + 1000L * prev.getLong("duration"); } catch (ParseException e) { throw new RuntimeException(e); } if (prevStart != start) { msg.append(String.format("FAILED! [%s]", prev.getString("airDateTime"))); LOG.error(msg); failed = true; Auditor.this.failed = true; break; } } if (!failed) { msg.append("PASSED!"); LOG.info(msg); } return FileVisitResult.CONTINUE; } @Override public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException { LOG.error(String.format("Unable to process schedule file '%s'", file), exc); Auditor.this.failed = true; return FileVisitResult.CONTINUE; } @Override public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { return FileVisitResult.CONTINUE; } }); } }
From source file:com.romeikat.datamessie.core.base.util.FileUtil.java
private Path createZipFile(final String dir, String filename, final List<Path> files) throws IOException { filename = normalizeFilename(filename); // Create ZIP file Path zipFile = Paths.get(dir, filename + ".zip"); zipFile = getNonExisting(zipFile);/*www . j a va2 s. co m*/ Files.createFile(zipFile); final URI zipUri = URI.create("jar:file:" + zipFile.toUri().getPath()); Files.delete(zipFile); final Map<String, String> zipProperties = new HashMap<String, String>(); zipProperties.put("create", "true"); zipProperties.put("encoding", "UTF-8"); final FileSystem zipFS = FileSystems.newFileSystem(zipUri, zipProperties); // Copy TXT files to ZIP file for (final Path file : files) { final Path fileToZip = file; final Path pathInZipfile = zipFS.getPath(file.getFileName().toString()); Files.copy(fileToZip, pathInZipfile); } // Done zipFS.close(); return zipFile; }
From source file:controller.GaleriaController.java
@br.com.caelum.vraptor.Path("galeria/zipGaleria/{galeriaId}") public Download zipGaleria(long galeriaId) { validator.ensure(sessao.getIdsPermitidosDeGalerias().contains(galeriaId), new SimpleMessage("galeria", "Acesso negado")); Galeria galeria = new Galeria(); galeria.setId(galeriaId);/* w w w . ja v a 2s . c om*/ List<Imagem> imagens = imagemDao.listByGaleria(galeria); validator.addIf(imagens == null || imagens.isEmpty(), new SimpleMessage("galeria", "Galeria vazia")); validator.onErrorRedirectTo(UsuarioController.class).viewGaleria(galeriaId); List<Path> paths = new ArrayList<>(); for (Imagem imagem : imagens) { String realPath = servletContext.getRealPath("/"); java.nio.file.Path imagemPath = new File(realPath + "/" + UPLOAD_DIR + "/" + imagem.getFileName()) .toPath(); paths.add(imagemPath); } byte buffer[] = new byte[2048]; try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); ZipOutputStream zos = new ZipOutputStream(baos)) { zos.setMethod(ZipOutputStream.DEFLATED); zos.setLevel(5); for (Path path : paths) { try (FileInputStream fis = new FileInputStream(path.toFile()); BufferedInputStream bis = new BufferedInputStream(fis)) { String pathFileName = path.getFileName().toString(); zos.putNextEntry(new ZipEntry(pathFileName)); int bytesRead; while ((bytesRead = bis.read(buffer)) != -1) { zos.write(buffer, 0, bytesRead); } zos.closeEntry(); zos.flush(); } catch (IOException e) { result.include("mensagem", "Erro no download do zip"); result.forwardTo(UsuarioController.class).viewGaleria(galeriaId); return null; } } zos.finish(); byte[] zip = baos.toByteArray(); Download download = new ByteArrayDownload(zip, "application/zip", sessao.getUsuario().getNome() + ".zip"); return download; //zipDownload = new ZipDownload(sessao.getUsuario().getNome() + ".zip", paths); //return zipDownloadBuilder.build(); } catch (IOException e) { result.include("mensagem", "Erro no download do zip"); result.forwardTo(UsuarioController.class).viewGaleria(galeriaId); return null; } }
From source file:com.gitpitch.services.DiskService.java
public void copyDirectory(Path source, Path dest) { log.debug("copyDirectory: source={}, dest={}", source, dest); try {//from w w w . j a v a 2 s.c om Files.walkFileTree(source, new SimpleFileVisitor<Path>() { public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { Path relative = source.relativize(dir); Path visitPath = Paths.get(dest.toString(), relative.toString()); ensure(visitPath); return FileVisitResult.CONTINUE; } @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { Path copyTarget = Paths.get(dest.toString(), source.relativize(file).toString()); if (!file.getFileName().toString().matches("\\..*") && !copyTarget.getFileName().toString().matches("\\..*")) { Files.copy(file, copyTarget); } return FileVisitResult.CONTINUE; } }); } catch (Exception cex) { log.warn("copyDirectory: source={}, dest={}, ex={}", source, dest, cex); } }
From source file:cn.edu.zjnu.acm.judge.core.Judger.java
private boolean runProcess(RunRecord runRecord) throws IOException { Path dataPath = runRecord.getDataPath(); Objects.requireNonNull(dataPath, "dataPath"); Path specialFile = dataPath.resolve(JudgeConfiguration.VALIDATE_FILE_NAME); boolean isspecial = Files.exists(specialFile); if (!Files.isDirectory(dataPath)) { log.error("{} not exists", runRecord.getDataPath()); return false; }/* w w w .jav a2 s . co m*/ List<Path[]> files = new ArrayList<>(20); try (DirectoryStream<Path> listFiles = Files.newDirectoryStream(dataPath)) { log.debug("dataPath = {}", dataPath); for (Path inFile : listFiles) { String inFileName = inFile.getFileName().toString(); if (!inFileName.toLowerCase().endsWith(".in")) { continue; } Path outFile = dataPath.resolve(inFileName.substring(0, inFileName.length() - 3) + ".out"); if (!Files.exists(outFile)) { continue; } files.add(new Path[] { inFile, outFile });//, } } int casenum = files.size(); log.debug("casenum = {}", casenum); if (casenum == 0) { return false; } int accept = 0; //? ArrayList<String> details = new ArrayList<>(casenum << 2); long time = 0; // long memory = 0; // String command = runRecord.getLanguage().getExecuteCommand(); Path work = judgeConfiguration.getWorkDirectory(runRecord.getSubmissionId()); // command = !StringUtils.isEmptyOrWhitespace(command) ? command : work.resolve("Main." + runRecord.getLanguage().getExecutableExtension()).toString(); long extTime = runRecord.getLanguage().getExtTime(); long castTimeLimit = runRecord.getTimeLimit() * runRecord.getLanguage().getTimeFactor() + extTime; long extraMemory = runRecord.getLanguage().getExtMemory(); // long caseMemoryLimit = (runRecord.getMemoryLimit() + extraMemory) * 1024; Options[] optionses = new Options[casenum]; for (int cas = 0; cas < casenum; cas++) { Path[] entry = files.get(cas); Path in = entry[0]; Path standard = entry[1]; Path progOutput = work.resolve(standard.getFileName()); optionses[cas] = Options.builder().timeLimit(castTimeLimit) // time limit .memoryLimit(caseMemoryLimit) // memory in bytes .outputLimit(16 * 1024 * 1024) // 16M .command(command).workDirectory(work).inputFile(in).outputFile(progOutput) .standardOutput(standard).errFile(getNull(work)).build(); } String detailMessageStr = null; String scorePerCase = new DecimalFormat("0.#").format(100.0 / casenum); final Validator validator = isspecial ? new SpecialValidator(specialFile.toString(), work) : new SimpleValidator(); try { ExecuteResult[] ers = JudgeBridge.INSTANCE.judge(optionses, false, validator); for (ExecuteResult er : ers) { long tim1 = er.getTime() - extTime; tim1 = Math.max(0, tim1); long mem1 = er.getMemory() / 1024 - extraMemory; mem1 = Math.max(0, mem1); String message = er.getMessage(); int caseResult = getResultFromExecuteResult(er); time = Math.max(time, tim1); memory = Math.max(memory, mem1); log.debug("message = {}, time = {}, memory = {}", message, time, memory); details.add(String.valueOf(caseResult)); if (caseResult == 0) { details.add(scorePerCase); } else { details.add("0"); } details.add(String.valueOf(tim1)); details.add(String.valueOf(mem1)); if (caseResult == 0) { ++accept; } } } catch (JudgeException | RuntimeException | Error ex) { log.error("", ex); accept = ResultType.SYSTEM_ERROR; detailMessageStr = ex.getMessage(); } log.debug("{}", details); int score = accept >= 0 ? (int) Math.round(accept * 100.0 / casenum) : accept; if (score == 0 && accept != 0) { ++score; } else if (score == 100 && accept != casenum) { --score; } submissionMapper.updateResult(runRecord.getSubmissionId(), score, time, memory); submissionMapper.saveDetail(runRecord.getSubmissionId(), detailMessageStr != null ? detailMessageStr : details.stream().map(String::valueOf).collect(Collectors.joining(","))); updateSubmissionStatus(runRecord); return score == 100; }
From source file:lucene.IndexFiles.java
/** Indexes a single document */ static void indexDoc(IndexWriter writer, Path file, long lastModified) throws IOException { try (InputStream stream = Files.newInputStream(file)) { // make a new, empty document System.out.println("Test 3.1"); BufferedReader reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)); String line = null;// w ww . j av a 2 s. co m StringBuilder stringBuilder = new StringBuilder(); String ls = System.getProperty("line.separator"); try { while ((line = reader.readLine()) != null) { stringBuilder.append(line); stringBuilder.append(ls); } } finally { reader.close(); } //index file name Field fileNameField = new StringField("name", file.getFileName().toString(), Field.Store.YES); // Add the path of the file as a field named "path". Use a // field that is indexed (i.e. searchable), but don't tokenize // the field into separate words and don't index term frequency // or positional information: Field pathField = new StringField("path", file.toString(), Field.Store.YES); // Add the last modified date of the file a field named "modified". // Use a LongPoint that is indexed (i.e. efficiently filterable with // PointRangeQuery). This indexes to milli-second resolution, which // is often too fine. You could instead create a number based on // year/month/day/hour/minutes/seconds, down the resolution you require. // For example the long value 2011021714 would mean // February 17, 2011, 2-3 PM. // Add the contents of the file to a field named "contents". Specify a Reader, // so that the text of the file is tokenized and indexed, but not stored. // Note that FileReader expects the file to be in UTF-8 encoding. // If that's not the case searching for special characters will fail. String file_content = stringBuilder.toString(); //System.out.println(file_content); //String[] passages = file_content.split("<P|<p"); //String[] passages = file_content.split("<P"); //String[] passages = file_content.split("<P>|<H1>|<H2>|<H3>|<H4>|<H5>|<H6>|<BR>|<HR>|<TABLE>|<TD>|<TH>|<TR>|<OL>|<UL>|<p>|<br>|<hr>");//|<p|<h1|<h2|<h3|<h4|<h5|<h6|<br|<hr|<table|<td|<th|<tr|<ol|<ul"); String[] passages = file_content.split( "(?i)<P|(?i)<H1|(?i)<H2|(?i)<H3|(?i)<H4|(?i)<H5|(?i)<H6|(?i)<BR|(?i)<HR|(?i)<TABLE|(?i)<TD|(?i)<TH|(?i)<TR|(?i)<OL|(?i)<UL");//|<p|<h1|<h2|<h3|<h4|<h5|<h6|<br|<hr|<table|<td|<th|<tr|<ol|<ul"); //String[] passages = StringUtils.substringsBetween(file_content, "<P", "<P"); //String[] title = StringUtils.substringsBetween(file_content, "<body>", "</"); //System.out.println("path"); //String title = passages[0]; String title; Document dochtml;// = Jsoup.parse(title); String ptitle = ""; //= dochtml.body().text(); //System.out.println("Title is" + ptitle); //Field titleField = new StringField("title", ptitle, Field.Store.YES); ///////------FORMATING TEXT--------- StandardTokenizer stdToken = new StandardTokenizer(); //Tokenizer stdToken = new WhitespaceTokenizer(); EnglishMinimalStemmer stemmer = new EnglishMinimalStemmer(); //stdToken.setReader(new StringReader("Some stuff that is in need of analysis. stuff patients PATIENT d > 0.5 Dnn>Bnn D.N.A diseases heart attacks at cl-fo")); //You're code starts here final List<String> stopWords = new ArrayList<>(); String f = "E:/stopwords_en.txt"; try (BufferedReader br = new BufferedReader(new FileReader(f))) { String topic; //int qid = 200;//cntr=0; while ((topic = br.readLine()) != null) { stopWords.add(topic.trim()); } } final CharArraySet stopSet = new CharArraySet(stopWords, false); //////------FORMATING TEXT--------- if (passages != null) { int j = 0; if (passages.length > 1) { title = passages[1].split("</P|</H1|</H2|</H3|</H4|</H5|</H6|</p")[0]; dochtml = Jsoup.parse(title); ptitle = dochtml.body().text().toLowerCase(); System.out.println("Title is" + ptitle); } for (int i = 0; i < passages.length; i++) { //System.out.println(i); //cnames = cname.split(":"); //cname = cnames[0]; String[] passage_contents = passages[i].split("</P|</p"); //String[] passage_contents = passages[i].split("</P"); String passage_content = passage_contents[0]; //if(passage_content.trim().isEmpty()){ // System.out.println("abc"); //continue; //} dochtml = Jsoup.parse(passage_content); String plainStr = dochtml.body().text(); String[] validpas = plainStr.split(" "); if (validpas.length > 9) { j++; Field passageId = new StringField("id", file.getFileName().toString() + "." + i, Field.Store.YES); org.apache.lucene.document.Document doc = new org.apache.lucene.document.Document(); doc.add(fileNameField); doc.add(pathField); doc.add(passageId); //doc.add(titleField); doc.add(new StringField("offset", file_content.indexOf(passage_content) + "", Field.Store.YES)); doc.add(new StringField("length", passage_content.length() + "", Field.Store.YES)); doc.add(new LongPoint("modified", lastModified)); ((org.apache.lucene.document.Document) doc).add(new TextField("title", ptitle, Store.YES)); //System.out.println(passage_content); //InputStream is = new ByteArrayInputStream(passage_content.getBytes()); //String strippedText = passage_content.replaceAll("(?s)<[^>]*>(\\s*<[^>]*>)*", " "); //--------TEXT PROCESSING------------ TokenStream tokenStream; //String nplainstr = plainStr.replaceAll("-", ".zz"); //stdToken.setReader(new StringReader(nplainstr)); stdToken.setReader(new StringReader(plainStr)); tokenStream = new StopFilter( new ASCIIFoldingFilter(new ClassicFilter(new LowerCaseFilter(stdToken))), stopSet); //tokenStream = new PorterStemFilter(tokenStream); tokenStream.reset(); //int l=0; String term = ""; StringBuilder sb = new StringBuilder(); //OffsetAttribute offsetAttribute = tokenStream.addAttribute(OffsetAttribute.class); CharTermAttribute charTermAttr = tokenStream.getAttribute(CharTermAttribute.class); try { //int l; while (tokenStream.incrementToken()) { if (sb.length() > 0) { sb.append(" "); } term = charTermAttr.toString(); /*if(term.contains(".zz")){ term = term.replaceAll(".zz", "-"); String[] terms=term.split("-"); String at=""; for(String t : terms){ //l = stemmer.stem(t.toCharArray(), t.length()); //t = t.substring(0, l); //sb.append(t.toString(),0,l); sb.append(t + " "); at = at+t; } sb.append(at + " "); }*/ if (term.contains(".") && !term.matches(".*\\d+.*")) {//&& StringUtils.isAlpha(term)){ term = term.replaceAll("\\.", ""); //sb.append(term); } //int l = stemmer.stem(charTermAttr.toString().toCharArray(), charTermAttr.toString().length()); int l; l = stemmer.stem(term.toCharArray(), term.length()); //sb.append(charTermAttr.toString(),0,l); sb.append(term, 0, l); //sb.append(term); /*if(term.contains("-")){ String[] terms=term.split("-"); String at=""; for(String t : terms){ sb.append(" " + t); at = at+t; } sb.append(" " + at); }*/ /*sb.append(charTermAttr.toString()); String[] hl = charTermAttr.toString().split("-"); if (hl.length > 1){ for(int j=0; j<hl.length; j++){ sb.append(" " + hl[j]); } //sb.append(" " + charTermAttr.toString().split("-")[1]); //sb.append(charTermAttr.toString()); }*/ } } catch (IOException e) { System.out.println(e.getMessage()); } //System.out.println(sb.toString()); tokenStream.close(); ///----------END OF TExt processin---------- ((org.apache.lucene.document.Document) doc) .add(new TextField("contents", sb.toString(), Store.YES));//new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)))); //doc.add(new StringField("contents", passage_content, Field.Store.YES)); //System.out.println(plainStr); //writer.addDocument(doc); if (writer.getConfig().getOpenMode() == OpenMode.CREATE) { n++; // New index, so we just add the document (no old document can be there): System.out.println( ".......adding " + file.getFileName().toString() + " passage " + j + "--" + n); writer.addDocument(doc); } else { // Existing index (an old copy of this document may have been indexed) so // we use updateDocument instead to replace the old one matching the exact // path, if present: System.out.println("updating " + file); writer.updateDocument(new Term("path", file.toString()), doc); } } } } } }
From source file:com.qwazr.library.archiver.ArchiverTool.java
public void extract_dir(final Path sourceDir, final String sourceExtension, final Path destDir, final Boolean logErrorAndContinue) throws IOException, ArchiveException { if (!Files.exists(sourceDir)) throw new FileNotFoundException("The source directory does not exist: " + sourceDir.toAbsolutePath()); if (!Files.exists(destDir)) throw new FileNotFoundException( "The destination directory does not exist: " + destDir.toAbsolutePath()); final Path[] sourceFiles; try (final Stream<Path> stream = Files.list(sourceDir)) { sourceFiles = stream.filter(p -> Files.isRegularFile(p)).toArray(Path[]::new); }//from w ww. ja v a2 s . c o m if (sourceFiles == null) return; for (final Path sourceFile : sourceFiles) { final String ext = FilenameUtils.getExtension(sourceFile.getFileName().toString()); if (!sourceExtension.equals(ext)) continue; try { extract(sourceFile, destDir); } catch (IOException | ArchiveException e) { if (logErrorAndContinue != null && logErrorAndContinue) LOGGER.log(Level.SEVERE, e, e::getMessage); else throw e; } } }