List of usage examples for java.nio.file Path toFile
default File toFile()
From source file:de.dal33t.powerfolder.util.AddLicenseHeader.java
public static void addLicInfo(Path f) { try {/*from w ww .j ava 2 s.c o m*/ if (f.toAbsolutePath().toString().contains("\\jwf\\jwf")) { System.out.println("Skip: " + f.toRealPath()); return; } if (f.toAbsolutePath().toString().contains("org\\jdesktop\\swinghelper")) { System.out.println("Skip: " + f.toRealPath()); return; } String content = FileUtils.readFileToString(f.toFile(), "UTF-8"); int i = content.indexOf("package"); // if (i != 693) { // System.out.println("Skip: " + f.getCanonicalPath() + ": " + i); // return; // } boolean dennis = content.contains("@author Dennis"); if (dennis) { System.err.println("Dennis: " + f.toRealPath() + ": " + i); content = LIC_INFO_DENNIS + content.substring(i, content.length()); } else { System.out.println("Onlyme: " + f.toRealPath() + ": " + i); content = LIC_INFO + content.substring(i, content.length()); } // // System.out.println(content); FileUtils.writeStringToFile(f.toFile(), content, "UTF-8"); // throw new RuntimeException(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
From source file:au.org.ands.vocabs.toolkit.utils.ToolkitFileUtils.java
/** Compress the files in the backup folder for a project. * @param projectId The project ID/*from w w w .j av a 2s . co m*/ * @throws IOException Any exception when reading/writing data. */ public static void compressBackupFolder(final String projectId) throws IOException { String backupPath = getBackupPath(projectId); if (!Files.isDirectory(Paths.get(backupPath))) { // No such directory, so nothing to do. return; } String projectSlug = makeSlug(projectId); // The name of the ZIP file that does/will contain all // backups for this project. Path zipFilePath = Paths.get(backupPath).resolve(projectSlug + ".zip"); // A temporary ZIP file. Any existing content in the zipFilePath // will be copied into this, followed by any other files in // the directory that have not yet been added. Path tempZipFilePath = Paths.get(backupPath).resolve("temp" + ".zip"); File tempZipFile = tempZipFilePath.toFile(); if (!tempZipFile.exists()) { tempZipFile.createNewFile(); } ZipOutputStream tempZipOut = new ZipOutputStream(new FileOutputStream(tempZipFile)); File existingZipFile = zipFilePath.toFile(); if (existingZipFile.exists()) { ZipFile zipIn = new ZipFile(existingZipFile); Enumeration<? extends ZipEntry> entries = zipIn.entries(); while (entries.hasMoreElements()) { ZipEntry e = entries.nextElement(); logger.debug("compressBackupFolder copying: " + e.getName()); tempZipOut.putNextEntry(e); if (!e.isDirectory()) { copy(zipIn.getInputStream(e), tempZipOut); } tempZipOut.closeEntry(); } zipIn.close(); } File dir = new File(backupPath); File[] files = dir.listFiles(); for (File source : files) { if (!source.getName().toLowerCase().endsWith(".zip")) { logger.debug("compressBackupFolder compressing and " + "deleting file: " + source.toString()); if (zipFile(tempZipOut, source)) { source.delete(); } } } tempZipOut.flush(); tempZipOut.close(); tempZipFile.renameTo(existingZipFile); }
From source file:de.teamgrit.grit.preprocess.fetch.SvnFetcher.java
/** * Initializes the workspace by performing a checkout in a specified * directory./*from w w w .j a v a 2 s . c o m*/ * * @param connectionData * holds login information an remote location of the svn * repository * @param targetDirectory * the local directory in which the checkout will be placed * @return true if checkout was successful, false otherwise * @throws SubmissionFetchingException * if the fetching fails */ private static boolean initializeDataSource(Connection connectionData, Path targetDirectory) throws SubmissionFetchingException { // nuke previous contents, so we can be sure that we have a clean // state. try { FileUtils.deleteDirectory(targetDirectory.toFile()); Files.createDirectories(targetDirectory); } catch (IOException e) { LOGGER.severe("Could not clean data source: " + targetDirectory.toAbsolutePath().toString() + " -> " + e.getMessage()); return false; } if (!checkConnectionToRemoteSVN(connectionData.getLocation())) { return false; } // now tell svn to checkout. try { List<String> svnCommand = new LinkedList<>(); svnCommand.add("svn"); svnCommand.add("checkout"); svnCommand.add(connectionData.getLocation()); SVNResultData svnResult = runSVNCommand(connectionData, svnCommand, targetDirectory); if (svnResult != null) { LOGGER.info("Successful SVN pull from " + connectionData.getLocation()); } // Any SVN return value != 0 implies an error and the fetch wasn't // clean. Hence we bundle the output into the exception and throw. if (svnResult.getReturnValue() != 0) { String svnOutForException = ""; for (String message : svnResult.getSvnOutputLines()) { svnOutForException = svnOutForException.concat(message + "\n"); } throw new SubmissionFetchingException(svnOutForException); } } catch (IOException e) { LOGGER.warning("unable to check out repository: " + connectionData.getLocation()); return false; } LOGGER.config("Checked out, moving internal repository path to " + targetDirectory.toString()); return true; }
From source file:fr.pilato.elasticsearch.crawler.fs.framework.FsCrawlerUtil.java
/** * Copy a single resource file from the classpath or from a JAR. * @param target The target//from w ww. j a v a2s. c om * @throws IOException If copying does not work */ public static void copyResourceFile(String source, Path target) throws IOException { InputStream resource = FsCrawlerUtil.class.getResourceAsStream(source); FileUtils.copyInputStreamToFile(resource, target.toFile()); }
From source file:info.pancancer.arch3.containerProvisioner.ContainerProvisionerThreads.java
/** * run the reaper/* w ww . j a v a 2 s . com*/ * * @param settings * @param ipAddress * specify an ip address (otherwise cleanup only failed deployments) * @throws JsonIOException * @throws IOException */ private static void runReaper(HierarchicalINIConfiguration settings, String ipAddress, String vmName) throws IOException { String param = settings.getString(Constants.PROVISION_YOUXIA_REAPER); CommandLine parse = CommandLine.parse("dummy " + (param == null ? "" : param)); List<String> arguments = new ArrayList<>(); arguments.addAll(Arrays.asList(parse.getArguments())); arguments.add("--kill-list"); // create a json file with the one targetted ip address Gson gson = new Gson(); // we can't use the full set of database records because unlike Amazon, OpenStack reuses private ip addresses (very quickly too) // String[] successfulVMAddresses = db.getSuccessfulVMAddresses(); String[] successfulVMAddresses = new String[] {}; if (ipAddress != null) { successfulVMAddresses = new String[] { ipAddress, vmName }; } LOG.info("Kill list contains: " + Arrays.asList(successfulVMAddresses)); Path createTempFile = Files.createTempFile("target", "json"); try (BufferedWriter bw = new BufferedWriter( new OutputStreamWriter(new FileOutputStream(createTempFile.toFile()), StandardCharsets.UTF_8))) { gson.toJson(successfulVMAddresses, bw); } arguments.add(createTempFile.toAbsolutePath().toString()); String[] toArray = arguments.toArray(new String[arguments.size()]); LOG.info("Running youxia reaper with following parameters:" + Arrays.toString(toArray)); // need to make sure reaper and deployer do not overlap try { Reaper.main(toArray); } catch (Exception e) { LOG.error("Youxia reaper threw the following exception", e); } }
From source file:controllers.ImageBrowser.java
@ModelAccess(AccessType.FILE_UPLOAD) public static void createDirectory(Path path) { path.toFile().mkdir(); ok(); }
From source file:controllers.ImageBrowser.java
@ModelAccess(AccessType.FILE_DELETE) public static void deleteFile(Path path) { path.toFile().delete(); ok(); }
From source file:io.Tools.java
/** * Tested method to get a PDB file from path * The chemcomp are automatically downloaded * * @param url//from w w w . j a va2s.c o m * @throws ParsingConfigFileException * @throws IOException */ private static Structure getStructure(URL url) throws ParsingConfigFileException, IOException, ExceptionInIOPackage { AlgoParameters algoParameters = Tools.generateModifiedAlgoParametersForTestWithTestFolders(); Path path = null; try { path = Paths.get(url.toURI()); } catch (URISyntaxException e1) { assertTrue(false); } Structure structure = null; BiojavaReaderIfc reader = new BiojavaReader(algoParameters); structure = reader.read(path.toFile().getAbsolutePath(), algoParameters.getPATH_TO_CHEMCOMP_FOLDER()) .getValue(); return structure; }
From source file:com.me.jvmi.Main.java
public static Collection<InputRecord> parseInput(Path csv) throws IOException { Map<String, InputRecord> records = new HashMap<>(); CSVParser parser = CSVParser.parse(csv.toFile(), Charset.forName("UTF-8"), CSVFormat.DEFAULT.withHeader()); for (CSVRecord record : parser) { InputRecord input = new InputRecord(record); records.put(input.getId(), input); }/*from www . j a va 2s. c om*/ for (InputRecord record : records.values()) { if (record.isPackage()) { for (String id : record.packageIds) { if (!records.containsKey(id)) { throw new IllegalStateException("Could not find product for package id: " + id); } record.addItem(records.get(id)); } } } return records.values(); }
From source file:com.puppycrawl.tools.checkstyle.AllChecksTest.java
/** * Gets names of checkstyle's modules which are documented in xdocs. * @param xdocsDirectoryPath xdocs directory path. * @return a set of checkstyle's modules which have xdoc documentation. * @throws ParserConfigurationException if a DocumentBuilder cannot be created which satisfies the configuration requested. * @throws IOException if any IO errors occur. * @throws SAXException if any parse errors occur. *//*w w w . j ava2s. co m*/ private static Set<String> getModulesNamesWhichHaveXdoc(String xdocsDirectoryPath) throws ParserConfigurationException, IOException, SAXException { final DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); // Validations of XML file make parsing too slow, that is why we disable all validations. factory.setNamespaceAware(false); factory.setValidating(false); factory.setFeature("http://xml.org/sax/features/namespaces", false); factory.setFeature("http://xml.org/sax/features/validation", false); factory.setFeature("http://apache.org/xml/features/nonvalidating/load-dtd-grammar", false); factory.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false); final Set<Path> xdocsFilePaths = getXdocsFilePaths(xdocsDirectoryPath); final Set<String> modulesNamesWhichHaveXdoc = new HashSet<>(); for (Path path : xdocsFilePaths) { final DocumentBuilder builder = factory.newDocumentBuilder(); final Document document = builder.parse(path.toFile()); // optional, but recommended // FYI: http://stackoverflow.com/questions/13786607/normalization-in-dom-parsing-with-java-how-does-it-work document.getDocumentElement().normalize(); final NodeList nodeList = document.getElementsByTagName("section"); for (int i = 0; i < nodeList.getLength(); i++) { final Node currentNode = nodeList.item(i); if (currentNode.getNodeType() == Node.ELEMENT_NODE) { final Element module = (Element) currentNode; final String moduleName = module.getAttribute("name"); if (!"Content".equals(moduleName) && !"Overview".equals(moduleName)) { modulesNamesWhichHaveXdoc.add(moduleName); } } } } return modulesNamesWhichHaveXdoc; }