Example usage for java.nio.file Files move

List of usage examples for java.nio.file Files move

Introduction

In this page you can find the example usage for java.nio.file Files move.

Prototype

public static Path move(Path source, Path target, CopyOption... options) throws IOException 

Source Link

Document

Move or rename a file to a target file.

Usage

From source file:Main.java

public static void main(String[] args) throws Exception {
    Path sourceFile = Paths.get("C:/home/docs/users.txt");
    Path destinationFile = Paths.get("C:/home/music/users.txt");
    Files.move(sourceFile, destinationFile, StandardCopyOption.ATOMIC_MOVE);

}

From source file:Main.java

public static void main(String[] args) throws Exception {
    Path source = Paths.get("C:\\Java_Dev\\test1.txt");
    Path target = Paths.get("C:\\Java_Dev\\dir2\\test1.txt");

    try {/*from  w  ww.j  ava 2s  .c o m*/
        Path p = Files.move(source, target, StandardCopyOption.ATOMIC_MOVE);
        System.out.println(source + "  has  been  moved to " + p);
    } catch (NoSuchFileException e) {
        System.out.println("Source/target does  not  exist.");
    } catch (FileAlreadyExistsException e) {
        System.out.println(target + "  already exists.  Move failed.");
    } catch (DirectoryNotEmptyException e) {
        System.out.println(target + "  is not  empty.  Move failed.");
    } catch (AtomicMoveNotSupportedException e) {
        System.out.println("Atomic move is not  supported. MOve  failed.");
    } catch (IOException e) {
        e.printStackTrace();
    }

}

From source file:uk.co.modularaudio.componentdesigner.generators.ComponentDesignerSupportFileGenerator.java

public static void main(final String[] args) throws Exception {
    if (args.length != 1) {
        throw new IOException("Expecting only output directory: outputDir");
    }//from w w w.j  a v  a 2  s  .com
    if (log.isInfoEnabled()) {
        log.info("Creating output in '" + args[0] + "'");
    }
    final File outputDir = new File(args[0]);
    if (!outputDir.exists()) {
        if (!outputDir.mkdirs()) {
            throw new IOException("Unable to create output directory");
        }
    }

    JTransformsConfigurator.setThreadsToOne();

    final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
    final Configuration config = ctx.getConfiguration();
    final LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME);
    loggerConfig.setLevel(Level.INFO);
    ctx.updateLoggers();

    final ComponentDesignerSupportFileGenerator sfg = new ComponentDesignerSupportFileGenerator();

    sfg.init();

    sfg.generateFiles();

    sfg.initialiseThingsNeedingComponentGraph();

    final String[] dbFilesToMove = sfg.getDatabaseFiles();
    sfg.destroy();

    // Finally move the (now closed) database files into the output directory
    for (final String dbFileToMove : dbFilesToMove) {
        final File source = new File(dbFileToMove);
        final String fileName = source.getName();
        final File target = new File(args[0] + File.separatorChar + fileName);
        Files.move(source.toPath(), target.toPath(), StandardCopyOption.ATOMIC_MOVE);
    }

}

From source file:hdfs.MiniHDFS.java

public static void main(String[] args) throws Exception {
    if (args.length != 1 && args.length != 3) {
        throw new IllegalArgumentException(
                "Expected: MiniHDFS <baseDirectory> [<kerberosPrincipal> <kerberosKeytab>], " + "got: "
                        + Arrays.toString(args));
    }//  ww w. j a  v  a  2 s .  com
    boolean secure = args.length == 3;

    // configure Paths
    Path baseDir = Paths.get(args[0]);
    // hadoop-home/, so logs will not complain
    if (System.getenv("HADOOP_HOME") == null) {
        Path hadoopHome = baseDir.resolve("hadoop-home");
        Files.createDirectories(hadoopHome);
        System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString());
    }
    // hdfs-data/, where any data is going
    Path hdfsHome = baseDir.resolve("hdfs-data");

    // configure cluster
    Configuration cfg = new Configuration();
    cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
    // lower default permission: TODO: needed?
    cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");

    // optionally configure security
    if (secure) {
        String kerberosPrincipal = args[1];
        String keytabFile = args[2];

        cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
        cfg.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytabFile);
        cfg.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytabFile);
        cfg.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true");
        cfg.set(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, "true");
        cfg.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true");
    }

    UserGroupInformation.setConfiguration(cfg);

    // TODO: remove hardcoded port!
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg);
    if (secure) {
        builder.nameNodePort(9998);
    } else {
        builder.nameNodePort(9999);
    }
    MiniDFSCluster dfs = builder.build();

    // Configure contents of the filesystem
    org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch");
    try (FileSystem fs = dfs.getFileSystem()) {

        // Set the elasticsearch user directory up
        fs.mkdirs(esUserPath);
        if (UserGroupInformation.isSecurityEnabled()) {
            List<AclEntry> acls = new ArrayList<>();
            acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch")
                    .setPermission(FsAction.ALL).build());
            fs.modifyAclEntries(esUserPath, acls);
        }

        // Install a pre-existing repository into HDFS
        String directoryName = "readonly-repository";
        String archiveName = directoryName + ".tar.gz";
        URL readOnlyRepositoryArchiveURL = MiniHDFS.class.getClassLoader().getResource(archiveName);
        if (readOnlyRepositoryArchiveURL != null) {
            Path tempDirectory = Files.createTempDirectory(MiniHDFS.class.getName());
            File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile();
            FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive);
            FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile());

            fs.copyFromLocalFile(true, true,
                    new org.apache.hadoop.fs.Path(
                            tempDirectory.resolve(directoryName).toAbsolutePath().toUri()),
                    esUserPath.suffix("/existing/" + directoryName));

            FileUtils.deleteDirectory(tempDirectory.toFile());
        }
    }

    // write our PID file
    Path tmp = Files.createTempFile(baseDir, null, null);
    String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
    Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);

    // write our port file
    tmp = Files.createTempFile(baseDir, null, null);
    Files.write(tmp, Integer.toString(dfs.getNameNodePort()).getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
}

From source file:ms.dew.devops.kernel.plugin.appkind.frontend_node.FrontendNodeBuildFlow.java

protected void preDockerBuild(FinalProjectConfig config, String flowBasePath) throws IOException {
    String preparePath = config.getTargetDirectory() + "dew_prepare" + File.separator;
    FileUtils.deleteDirectory(new File(flowBasePath + "dist"));
    Files.move(Paths.get(preparePath + "dist"), Paths.get(flowBasePath + "dist"),
            StandardCopyOption.REPLACE_EXISTING);
    if (config.getApp().getServerConfig() != null && !config.getApp().getServerConfig().isEmpty()) {
        Files.write(Paths.get(flowBasePath + "custom.conf"), config.getApp().getServerConfig().getBytes());
    } else {//from  www.  ja  v  a2 s.  com
        Files.write(Paths.get(flowBasePath + "custom.conf"), "".getBytes());
    }
    $.file.copyStreamToPath(DevOps.class.getResourceAsStream("/dockerfile/frontend_node/Dockerfile"),
            flowBasePath + "Dockerfile");
}

From source file:com.netflix.spinnaker.halyard.config.config.v1.AtomicFileWriter.java

public void commit() throws IOException {
    writer.close();
    writer = null;
    Files.move(tmpPath, path, REPLACE_EXISTING);
}

From source file:com.stimulus.archiva.domain.Settings.java

public static void saveProperties(final String name, String intro, Settings prop, String charset)
        throws ConfigurationException {
    File f = null;//from  w w  w  .  j  av a2s . c o  m
    try {
        // if the disk is full we dont want to end up in a situation where we delete
        // server.conf file
        f = File.createTempFile("server_conf", ".tmp");
        prop.store(intro, new FileOutputStream(f), charset);

    } catch (Exception e) {
        if (f != null)
            f.delete();
        throw new ConfigurationException("failed to save properties. cause:" + e.toString(), e, logger);
    }
    File newFile = new File(name);
    newFile.delete();
    //Mod start Seolhwa.kim 2017-04-13
    //f.renameTo(newFile);

    try {
        logger.debug("####################################### Call Files.move");
        Files.move(Paths.get(f.getAbsolutePath()), Paths.get(newFile.getAbsolutePath()),
                StandardCopyOption.REPLACE_EXISTING);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        logger.debug("####################################### Call Files.move fails");
        e.printStackTrace();
    }

    //Mod end Seolhwa.kim 2017-04-13
}

From source file:pm.filemanager.operations.FileOperations.java

public static void moveFolder(String source, String destination) throws IOException {

    Path sourcePath = Paths.get(source);
    Path targetPath = Paths.get(destination, source);
    try {//from  w  w w  .  j  a va2s  .c  o m
        Files.move(sourcePath, targetPath, StandardCopyOption.REPLACE_EXISTING);
    } catch (IOException e) {
        System.out.println("You must select a valid directory to move the folder into!");
    }
}

From source file:ms.dew.devops.kernel.plugin.appkind.frontend_node.FrontendNodePrepareFlow.java

protected void postPrepareBuild(FinalProjectConfig config, String flowBasePath) throws IOException {
    FileUtils.deleteDirectory(new File(flowBasePath + "dist"));
    Files.move(Paths.get(config.getDirectory() + "dist"), Paths.get(flowBasePath + "dist"),
            StandardCopyOption.REPLACE_EXISTING);
}

From source file:org.kie.workbench.common.migration.cli.RealSystemAccess.java

@Override
public Path move(Path source, Path target, CopyOption... options) throws IOException {
    return Files.move(source, target, options);
}