List of usage examples for java.nio.file Files deleteIfExists
public static boolean deleteIfExists(Path path) throws IOException
From source file:de.dal33t.powerfolder.Controller.java
/** * Saves the current config to disk/*from w ww . j a v a 2 s. c o m*/ */ public synchronized void saveConfig() { if (!started) { return; } logFine("Saving config (" + getConfigName() + ".config)"); Path file; Path tempFile; Path folderFile; Path tempFolderFile; Path backupFile; if (getConfigLocationBase() == null) { file = Paths.get(getConfigName() + ".config").toAbsolutePath(); tempFile = Paths.get(getConfigName() + ".writing.config").toAbsolutePath(); folderFile = Paths.get(getConfigName() + "-Folder.config").toAbsolutePath(); tempFolderFile = Paths.get(getConfigName() + "-Folder.writing.config").toAbsolutePath(); backupFile = Paths.get(getConfigName() + ".config.backup").toAbsolutePath(); } else { file = getConfigLocationBase().resolve(getConfigName() + ".config"); tempFile = getConfigLocationBase().resolve(getConfigName() + ".writing.config").toAbsolutePath(); backupFile = getConfigLocationBase().resolve(getConfigName() + ".config.backup"); folderFile = getConfigLocationBase().resolve(getConfigName() + "-Folder.config"); tempFolderFile = getConfigLocationBase().resolve(getConfigName() + "-Folder.writing.config") .toAbsolutePath(); } try { // Backup is done in #backupConfigAssets Files.deleteIfExists(backupFile); String distName = "PowerFolder"; if (distribution != null && StringUtils.isNotBlank(distribution.getName())) { distName = distribution.getName(); } Properties prev = new Properties(); if (Files.exists(file)) { try (BufferedInputStream in = new BufferedInputStream(Files.newInputStream(file))) { prev.load(in); } } if (!prev.equals(config.getRegular())) { // Store config in misc base PropertiesUtil.saveConfig(tempFile, config.getRegular(), distName + " config file (v" + PROGRAM_VERSION + ')'); Files.deleteIfExists(file); try { Files.move(tempFile, file); } catch (IOException e) { Files.copy(tempFile, file); Files.delete(tempFile); } } else { if (isFine()) { logFine("Not storing config to " + file + ". Base config remains unchanged"); } } if (!config.getFolders().isEmpty()) { Properties prevFolders = new Properties(); if (Files.exists(folderFile)) { try (BufferedInputStream in = new BufferedInputStream(Files.newInputStream(folderFile))) { prevFolders.load(in); } } if (!prevFolders.equals(config.getFolders())) { PropertiesUtil.saveConfig(tempFolderFile, config.getFolders(), distName + " folders config file (v" + PROGRAM_VERSION + ')'); Files.deleteIfExists(folderFile); try { Files.move(tempFolderFile, folderFile); } catch (IOException e) { Files.copy(tempFolderFile, folderFile); Files.delete(tempFolderFile); } } } } catch (IOException e) { // FATAL logSevere("Unable to save config. " + e, e); exit(1); } catch (Exception e) { // major problem , setting code is wrong e.printStackTrace(); logSevere("major problem , setting code is wrong", e); } }
From source file:org.kitodo.production.services.data.ProcessService.java
/** * Download docket for given process.// w w w . ja va 2 s . c o m * * @param process * object * @throws IOException * when xslt file could not be loaded, or write to output failed */ public void downloadDocket(Process process) throws IOException { logger.debug("generate docket for process with id {}", process.getId()); URI rootPath = Paths.get(ConfigCore.getParameter(ParameterCore.DIR_XSLT)).toUri(); URI xsltFile; if (Objects.nonNull(process.getDocket())) { xsltFile = ServiceManager.getFileService().createResource(rootPath, process.getDocket().getFile()); if (!fileService.fileExist(xsltFile)) { Helper.setErrorMessage("docketMissing"); } } else { xsltFile = ServiceManager.getFileService().createResource(rootPath, "docket.xsl"); } FacesContext facesContext = FacesContext.getCurrentInstance(); if (!facesContext.getResponseComplete()) { // write run note to servlet output stream DocketInterface module = initialiseDocketModule(); File file = module.generateDocket(getDocketData(process), xsltFile); writeToOutputStream(facesContext, file, Helper.getNormalizedTitle(process.getTitle()) + ".pdf"); Files.deleteIfExists(file.toPath()); } }
From source file:org.apache.nifi.controller.repository.FileSystemRepository.java
private long destroyExpiredArchives(final String containerName, final Path container) throws IOException { archiveExpirationLog.debug("Destroying Expired Archives for Container {}", containerName); final List<ArchiveInfo> notYetExceedingThreshold = new ArrayList<>(); long removalTimeThreshold = System.currentTimeMillis() - maxArchiveMillis; long oldestArchiveDateFound = System.currentTimeMillis(); // determine how much space we must have in order to stop deleting old data final Long minRequiredSpace = minUsableContainerBytesForArchive.get(containerName); if (minRequiredSpace == null) { archiveExpirationLog//www . ja v a2 s. com .debug("Could not determine minimum required space so will not destroy any archived data"); return -1L; } final long usableSpace = getContainerUsableSpace(containerName); final ContainerState containerState = containerStateMap.get(containerName); // First, delete files from our queue final long startNanos = System.nanoTime(); final long toFree = minRequiredSpace - usableSpace; final BlockingQueue<ArchiveInfo> fileQueue = archivedFiles.get(containerName); if (archiveExpirationLog.isDebugEnabled()) { if (toFree < 0) { archiveExpirationLog.debug( "Currently {} bytes free for Container {}; requirement is {} byte free, so no need to free space until an additional {} bytes are used", usableSpace, containerName, minRequiredSpace, Math.abs(toFree)); } else { archiveExpirationLog.debug( "Currently {} bytes free for Container {}; requirement is {} byte free, so need to free {} bytes", usableSpace, containerName, minRequiredSpace, toFree); } } ArchiveInfo toDelete; int deleteCount = 0; long freed = 0L; while ((toDelete = fileQueue.peek()) != null) { try { final long fileSize = toDelete.getSize(); removalTimeThreshold = System.currentTimeMillis() - maxArchiveMillis; // we use fileQueue.peek above instead of fileQueue.poll() because we don't always want to // remove the head of the queue. Instead, we want to remove it only if we plan to delete it. // In order to accomplish this, we just peek at the head and check if it should be deleted. // If so, then we call poll() to remove it if (freed < toFree || getLastModTime(toDelete.toPath()) < removalTimeThreshold) { toDelete = fileQueue.poll(); // remove the head of the queue, which is already stored in 'toDelete' Files.deleteIfExists(toDelete.toPath()); containerState.decrementArchiveCount(); LOG.debug( "Deleted archived ContentClaim with ID {} from Container {} because the archival size was exceeding the max configured size", toDelete.getName(), containerName); freed += fileSize; deleteCount++; } // If we'd freed up enough space, we're done... unless the next file needs to be destroyed based on time. if (freed >= toFree) { // If the last mod time indicates that it should be removed, just continue loop. if (deleteBasedOnTimestamp(fileQueue, removalTimeThreshold)) { archiveExpirationLog.debug( "Freed enough space ({} bytes freed, needed to free {} bytes) but will continue to expire data based on timestamp", freed, toFree); continue; } archiveExpirationLog.debug( "Freed enough space ({} bytes freed, needed to free {} bytes). Finished expiring data", freed, toFree); final ArchiveInfo archiveInfo = fileQueue.peek(); final long oldestArchiveDate = archiveInfo == null ? System.currentTimeMillis() : getLastModTime(archiveInfo.toPath()); // Otherwise, we're done. Return the last mod time of the oldest file in the container's archive. final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); if (deleteCount > 0) { LOG.info( "Deleted {} files from archive for Container {}; oldest Archive Date is now {}; container cleanup took {} millis", deleteCount, containerName, new Date(oldestArchiveDate), millis); } else { LOG.debug( "Deleted {} files from archive for Container {}; oldest Archive Date is now {}; container cleanup took {} millis", deleteCount, containerName, new Date(oldestArchiveDate), millis); } return oldestArchiveDate; } } catch (final IOException ioe) { LOG.warn("Failed to delete {} from archive due to {}", toDelete, ioe.toString()); if (LOG.isDebugEnabled()) { LOG.warn("", ioe); } } } // Go through each container and grab the archived data into a List archiveExpirationLog.debug("Searching for more archived data to expire"); final StopWatch stopWatch = new StopWatch(true); for (int i = 0; i < SECTIONS_PER_CONTAINER; i++) { final Path sectionContainer = container.resolve(String.valueOf(i)); final Path archive = sectionContainer.resolve("archive"); if (!Files.exists(archive)) { continue; } try { final long timestampThreshold = removalTimeThreshold; Files.walkFileTree(archive, new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException { if (attrs.isDirectory()) { return FileVisitResult.CONTINUE; } final long lastModTime = getLastModTime(file); if (lastModTime < timestampThreshold) { try { Files.deleteIfExists(file); containerState.decrementArchiveCount(); LOG.debug( "Deleted archived ContentClaim with ID {} from Container {} because it was older than the configured max archival duration", file.toFile().getName(), containerName); } catch (final IOException ioe) { LOG.warn( "Failed to remove archived ContentClaim with ID {} from Container {} due to {}", file.toFile().getName(), containerName, ioe.toString()); if (LOG.isDebugEnabled()) { LOG.warn("", ioe); } } } else if (usableSpace < minRequiredSpace) { notYetExceedingThreshold .add(new ArchiveInfo(container, file, attrs.size(), lastModTime)); } return FileVisitResult.CONTINUE; } }); } catch (final IOException ioe) { LOG.warn("Failed to cleanup archived files in {} due to {}", archive, ioe.toString()); if (LOG.isDebugEnabled()) { LOG.warn("", ioe); } } } final long deleteExpiredMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS); // Sort the list according to last modified time Collections.sort(notYetExceedingThreshold, new Comparator<ArchiveInfo>() { @Override public int compare(final ArchiveInfo o1, final ArchiveInfo o2) { return Long.compare(o1.getLastModTime(), o2.getLastModTime()); } }); final long sortRemainingMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteExpiredMillis; // Delete the oldest data archiveExpirationLog.debug("Deleting data based on timestamp"); final Iterator<ArchiveInfo> itr = notYetExceedingThreshold.iterator(); int counter = 0; while (itr.hasNext()) { final ArchiveInfo archiveInfo = itr.next(); try { final Path path = archiveInfo.toPath(); Files.deleteIfExists(path); containerState.decrementArchiveCount(); LOG.debug( "Deleted archived ContentClaim with ID {} from Container {} because the archival size was exceeding the max configured size", archiveInfo.getName(), containerName); // Check if we've freed enough space every 25 files that we destroy if (++counter % 25 == 0) { if (getContainerUsableSpace(containerName) > minRequiredSpace) { // check if we can stop now LOG.debug("Finished cleaning up archive for Container {}", containerName); break; } } } catch (final IOException ioe) { LOG.warn("Failed to delete {} from archive due to {}", archiveInfo, ioe.toString()); if (LOG.isDebugEnabled()) { LOG.warn("", ioe); } } itr.remove(); } final long deleteOldestMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - sortRemainingMillis - deleteExpiredMillis; long oldestContainerArchive; if (notYetExceedingThreshold.isEmpty()) { oldestContainerArchive = System.currentTimeMillis(); } else { oldestContainerArchive = notYetExceedingThreshold.get(0).getLastModTime(); } if (oldestContainerArchive < oldestArchiveDateFound) { oldestArchiveDateFound = oldestContainerArchive; } // Queue up the files in the order that they should be destroyed so that we don't have to scan the directories for a while. for (final ArchiveInfo toEnqueue : notYetExceedingThreshold.subList(0, Math.min(100000, notYetExceedingThreshold.size()))) { fileQueue.offer(toEnqueue); } final long cleanupMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteOldestMillis - sortRemainingMillis - deleteExpiredMillis; LOG.debug( "Oldest Archive Date for Container {} is {}; delete expired = {} ms, sort remaining = {} ms, delete oldest = {} ms, cleanup = {} ms", containerName, new Date(oldestContainerArchive), deleteExpiredMillis, sortRemainingMillis, deleteOldestMillis, cleanupMillis); return oldestContainerArchive; }
From source file:org.kitodo.production.services.data.ProcessService.java
/** * Writes a multi page docket for a list of processes to an output stream. * * @param processes//from www . j a v a 2 s . co m * The list of processes * @throws IOException * when xslt file could not be loaded, or write to output failed */ public void downloadDocket(List<Process> processes) throws IOException { logger.debug("generate docket for processes {}", processes); URI rootPath = Paths.get(ConfigCore.getParameter(ParameterCore.DIR_XSLT)).toUri(); URI xsltFile = ServiceManager.getFileService().createResource(rootPath, "docket_multipage.xsl"); FacesContext facesContext = FacesContext.getCurrentInstance(); if (!facesContext.getResponseComplete()) { DocketInterface module = initialiseDocketModule(); File file = module.generateMultipleDockets(ServiceManager.getProcessService().getDocketData(processes), xsltFile); writeToOutputStream(facesContext, file, "batch_docket.pdf"); Files.deleteIfExists(file.toPath()); } }
From source file:eu.itesla_project.online.db.OnlineDbMVStore.java
@Override public void storeState(String workflowId, Integer stateId, Network network) { String stateIdStr = String.valueOf(stateId); LOGGER.info("Storing state {} of workflow {}", stateIdStr, workflowId); if (network.getStateManager().getStateIds().contains(stateIdStr)) { network.getStateManager().setWorkingState(stateIdStr); Path workflowStatesFolder = getWorkflowStatesFolder(workflowId); Path stateFolder = Paths.get(workflowStatesFolder.toString(), STORED_STATE_PREFIX + stateId); if (Files.exists(stateFolder)) { //remove current state file, if it already exists for (int i = 0; i < XIIDMEXTENSIONS.length; i++) { Path stateFile = Paths.get(stateFolder.toString(), network.getId() + XIIDMEXTENSIONS[i]); try { Files.deleteIfExists(stateFile); } catch (IOException e) { String errorMessage = "online db: folder " + workflowStatesFolder + " for workflow " + workflowId + " , state " + stateIdStr + " ; cannot remove existing state file: " + e.getMessage(); LOGGER.error(errorMessage); throw new RuntimeException(errorMessage); }// ww w . j a v a2 s. c om } } else { try { Files.createDirectories(stateFolder); } catch (IOException e) { String errorMessage = "online db: folder " + workflowStatesFolder + " for workflow " + workflowId + " and state " + stateIdStr + " cannot be created: " + e.getMessage(); LOGGER.error(errorMessage); throw new RuntimeException(errorMessage); } } DataSource dataSource = new FileDataSource(stateFolder, network.getId()); Properties parameters = new Properties(); parameters.setProperty("iidm.export.xml.indent", "true"); parameters.setProperty("iidm.export.xml.with-branch-state-variables", "true"); parameters.setProperty("iidm.export.xml.with-breakers", "true"); parameters.setProperty("iidm.export.xml.with-properties", "true"); Exporters.export("XIIDM", network, parameters, dataSource); // store network state values, for later serialization Map<HistoDbAttributeId, Object> networkValues = IIDM2DB .extractCimValues(network, new IIDM2DB.Config(network.getId(), true, true)).getSingleValueMap(); ConcurrentHashMap<Integer, Map<HistoDbAttributeId, Object>> workflowStates = new ConcurrentHashMap<Integer, Map<HistoDbAttributeId, Object>>(); if (workflowsStates.containsKey(workflowId)) workflowStates = workflowsStates.get(workflowId); workflowStates.put(stateId, networkValues); workflowsStates.put(workflowId, workflowStates); } else { String errorMessage = "online db: no state " + stateIdStr + " in network of workflow " + workflowId; LOGGER.error(errorMessage); throw new RuntimeException(errorMessage); } }
From source file:com.dell.asm.asmcore.asmmanager.app.rest.ServiceTemplateService.java
/** * Get default template customized with uploaded config file * @param configPath//from w w w. j ava2s. c om * @return * @throws WebApplicationException */ @Override public ServiceTemplate getUploadedConfigTemplate(String configPath) throws IOException { try { String configXml = ""; List<String> lines = Files.readAllLines(Paths.get(configPath), Charset.defaultCharset()); for (String line : lines) configXml += line; ServiceTemplate svcTmpl = getDefaultTemplate(); processSystemConfiguration(svcTmpl, configXml, null, null); return svcTmpl; } catch (IOException e) { LOGGER.error("Could not read imported configuration profile file at " + configPath, e); throw new LocalizedWebApplicationException(Response.Status.INTERNAL_SERVER_ERROR, AsmManagerMessages.internalError()); } catch (WSManException e) { LOGGER.error("Could not process imported configuration profile", e); throw new LocalizedWebApplicationException(Response.Status.INTERNAL_SERVER_ERROR, AsmManagerMessages.internalError()); } finally { Files.deleteIfExists(Paths.get(configPath)); } }
From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageReconstructor.java
/** * Run the OfflineImageReconstructor./*from w w w.j av a2 s. c om*/ * * @param inputPath The input path to use. * @param outputPath The output path to use. * * @throws Exception On error. */ public static void run(String inputPath, String outputPath) throws Exception { MessageDigest digester = MD5Hash.getDigester(); FileOutputStream fout = null; File foutHash = new File(outputPath + ".md5"); Files.deleteIfExists(foutHash.toPath()); // delete any .md5 file that exists CountingOutputStream out = null; FileInputStream fis = null; InputStreamReader reader = null; try { Files.deleteIfExists(Paths.get(outputPath)); fout = new FileOutputStream(outputPath); fis = new FileInputStream(inputPath); reader = new InputStreamReader(fis, Charset.forName("UTF-8")); out = new CountingOutputStream(new DigestOutputStream(new BufferedOutputStream(fout), digester)); OfflineImageReconstructor oir = new OfflineImageReconstructor(out, reader); oir.processXml(); } finally { IOUtils.cleanup(LOG, reader, fis, out, fout); } // Write the md5 file MD5FileUtils.saveMD5File(new File(outputPath), new MD5Hash(digester.digest())); }
From source file:eu.itesla_project.online.db.OnlineDbMVStore.java
@Override public boolean deleteWorkflow(String workflowId) { LOGGER.info("Deleting workflow {}", workflowId); boolean workflowDeleted = false; boolean workflowStatesDeleted = true; // if stored states for this workflow exist if (workflowStatesFolderExists(workflowId)) // delete them workflowStatesDeleted = deleteStates(workflowId); // if stored states have been deleted if (workflowStatesDeleted) { // store workflow results Path workflowFile = Paths.get(config.getOnlineDbDir().toFile().toString(), STORED_WORKFLOW_PREFIX + workflowId); if (workflowFile.toFile().exists() && workflowFile.toFile().isFile()) try { workflowDeleted = Files.deleteIfExists(workflowFile); } catch (IOException e) { LOGGER.error("Cannot delete workflow {} from online DB: {}", workflowId, e.getMessage()); }/*from w w w.j av a 2s . c o m*/ else LOGGER.warn("No workflow {} stored in the online DB", workflowId); } return workflowDeleted; }
From source file:eus.ixa.ixa.pipe.convert.AbsaSemEval.java
public static String absa2015Toabsa2015AnotatedWithMultipleDocClasModelsX(String fileName, String modelsList) { //reading the ABSA xml file SAXBuilder sax = new SAXBuilder(); XPathFactory xFactory = XPathFactory.instance(); Document doc = null;/*from w w w. ja va 2 s . c o m*/ try { doc = sax.build(fileName); XPathExpression<Element> expr = xFactory.compile("//sentence", Filters.element()); List<Element> sentences = expr.evaluate(doc); //int cantSent = 0; for (Element sent : sentences) { Element opinionsElement = sent.getChild("Opinions"); if (opinionsElement != null) { //iterating over every opinion in the opinions element List<Element> opinionList = opinionsElement.getChildren(); for (int i = opinionList.size() - 1; i >= 0; i--) { Element opinion = opinionList.get(i); opinionsElement.removeContent(opinion); } } Path pathx = FileSystems.getDefault().getPath("./", "TEXT.txt"); Files.deleteIfExists(pathx); File f = new File("TEXT.txt"); FileUtils.writeStringToFile(f, sent.getChildText("text"), "UTF-8"); /*Path path1 = FileSystems.getDefault().getPath("./", "NAF1.txt"); Files.deleteIfExists(path1); String[] cmd1 = { "/bin/sh", "-c", "cat TEXT.txt | java -jar /home/vector/Documents/Ixa-git/ixa-pipe-tok/target/ixa-pipe-tok-1.8.5-exec.jar tok -l en > NAF1.txt" }; Process proc1 = Runtime.getRuntime().exec(cmd1); try { if(!proc1.waitFor(30, TimeUnit.MINUTES)) { //timeout - kill the process. proc1.destroy(); // consider using destroyForcibly instead throw new Exception("TimeOut Expired in IXA-PIPE-TOK"); } }catch (Exception e) { System.out.println(" ERROR: "); }*/ //System.err.println(kaf.toString()); File file = new File(modelsList); FileReader fileReader = new FileReader(file); BufferedReader bufferedReader = new BufferedReader(fileReader); String line; String nextCommand = ""; //int port = 2000; while ((line = bufferedReader.readLine()) != null) { //System.err.println("-" + line + "-" + kaf.getLang()); System.err.println(" Model: " + line); //nextCommand +=" | java -jar /home/vector/Documents/Ixa-git/ixa-pipe-doc/target/ixa-pipe-doc-0.0.2-exec.jar client -p " + port; nextCommand += " | java -jar /home/vector/Documents/Ixa-git/ixa-pipe-doc/target/ixa-pipe-doc-0.0.2-exec.jar tag -m " + line; //File fileTmp = new File("NAF.txt"); //File fileTmp2 = new File("NAF1.txt"); //Files.copy(fileTmp.toPath(), fileTmp2.toPath(), StandardCopyOption.REPLACE_EXISTING); //Files.delete(fileTmp.toPath()); //port ++; } fileReader.close(); String[] cmd = { "/bin/sh", "-c", "cat TEXT.txt | java -jar /home/vector/Documents/Ixa-git/ixa-pipe-tok/target/ixa-pipe-tok-1.8.5-exec.jar tok -l en" + nextCommand + " > NAF.txt" }; //System.err.println("cat TEXT.txt | java -jar /home/vector/Documents/Ixa-git/ixa-pipe-tok/target/ixa-pipe-tok-1.8.5-exec.jar tok -l en" + nextCommand + " > NAF.txt"); Process proc = Runtime.getRuntime().exec(cmd); try { if (!proc.waitFor(30, TimeUnit.MINUTES)) { //timeout - kill the process. proc.destroy(); // consider using destroyForcibly instead throw new Exception("TimeOut Expired in IXA"); } } catch (Exception e) { System.out.println(" ERROR: "); } //System.err.println(kaf.toString()); //cantSent++; /*try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); }*/ File fileDir = new File("NAF.txt"); System.err.println("Terminado: " + sent.getChildText("text")); BufferedReader breader1 = new BufferedReader( new InputStreamReader(new FileInputStream(fileDir), "UTF-8")); KAFDocument kaf = null; try { kaf = KAFDocument.createFromStream(breader1); } catch (Exception e) { System.err.println("ENTRA A ERROR"); e.printStackTrace(); continue; } List<Topic> topicList = kaf.getTopics(); for (Topic topic : topicList) { //System.err.println(topic.getTopicValue()); if (!topic.getTopicValue().equals("NO")) { Element opinionElem = new Element("Opinion"); opinionElem.setAttribute("target", "na"); opinionElem.setAttribute("category", topic.getTopicValue()); //TODO we still do not have polarity here opinionElem.setAttribute("polarity", "na"); opinionElem.setAttribute("from", "0"); opinionElem.setAttribute("to", "0"); opinionsElement.addContent(opinionElem); } } } //end of sentence } catch (JDOMException | IOException e) { e.printStackTrace(); } XMLOutputter xmlOutput = new XMLOutputter(); Format format = Format.getPrettyFormat(); xmlOutput.setFormat(format); return xmlOutput.outputString(doc); }
From source file:edu.ucla.cs.scai.canali.core.index.utils.DBpediaOntologyUtils.java
public void createTripleLink() throws Exception { //it creates only a symbolic link Path newLink = new File(destinationPath + "triples").toPath(); Path target = new File(downloadedFilesPath + "mappingbased_properties_cleaned_en.nt").toPath(); Files.deleteIfExists(newLink); Files.createSymbolicLink(newLink, target); }