Example usage for org.apache.hadoop.fs FileSystem createNewFile

List of usage examples for org.apache.hadoop.fs FileSystem createNewFile

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem createNewFile.

Prototype

public boolean createNewFile(Path f) throws IOException 

Source Link

Document

Creates the given Path as a brand-new zero-length file.

Usage

From source file:org.apache.carbondata.core.datastorage.store.impl.FileFactory.java

License:Apache License

public static boolean createNewFile(String filePath, FileType fileType) throws IOException {
    filePath = filePath.replace("\\", "/");
    switch (fileType) {
    case HDFS://from  w w w  .  j  a v a  2s.c o m
    case VIEWFS:
        Path path = new Path(filePath);
        FileSystem fs = path.getFileSystem(configuration);
        return fs.createNewFile(path);

    case LOCAL:
    default:
        File file = new File(filePath);
        return file.createNewFile();
    }
}

From source file:org.apache.carbondata.core.datastorage.store.impl.FileFactory.java

License:Apache License

/**
 * for creating a new Lock file and if it is successfully created
 * then in case of abrupt shutdown then the stream to that file will be closed.
 *
 * @param filePath/* ww  w  .java2 s  . c  o m*/
 * @param fileType
 * @return
 * @throws IOException
 */
public static boolean createNewLockFile(String filePath, FileType fileType) throws IOException {
    filePath = filePath.replace("\\", "/");
    switch (fileType) {
    case HDFS:
    case VIEWFS:
        Path path = new Path(filePath);
        FileSystem fs = path.getFileSystem(configuration);
        if (fs.createNewFile(path)) {
            fs.deleteOnExit(path);
            return true;
        }
        return false;
    case LOCAL:
    default:
        File file = new File(filePath);
        return file.createNewFile();
    }
}

From source file:org.apache.carbondata.core.datastore.impl.FileFactory.java

License:Apache License

public static boolean createNewFile(String filePath, FileType fileType) throws IOException {
    filePath = filePath.replace("\\", "/");
    switch (fileType) {
    case HDFS://from   w  w  w  . j a  v  a2 s .c om
    case ALLUXIO:
    case VIEWFS:
        Path path = new Path(filePath);
        FileSystem fs = path.getFileSystem(configuration);
        return fs.createNewFile(path);

    case LOCAL:
    default:
        filePath = getUpdatedFilePath(filePath, fileType);
        File file = new File(filePath);
        return file.createNewFile();
    }
}

From source file:org.apache.carbondata.core.datastore.impl.FileFactory.java

License:Apache License

/**
 * for creating a new Lock file and if it is successfully created
 * then in case of abrupt shutdown then the stream to that file will be closed.
 *
 * @param filePath/*  ww w .  j a  va 2  s.  co  m*/
 * @param fileType
 * @return
 * @throws IOException
 */
public static boolean createNewLockFile(String filePath, FileType fileType) throws IOException {
    filePath = filePath.replace("\\", "/");
    switch (fileType) {
    case HDFS:
    case ALLUXIO:
    case VIEWFS:
        Path path = new Path(filePath);
        FileSystem fs = path.getFileSystem(configuration);
        if (fs.createNewFile(path)) {
            fs.deleteOnExit(path);
            return true;
        }
        return false;
    case LOCAL:
    default:
        filePath = getUpdatedFilePath(filePath, fileType);
        File file = new File(filePath);
        return file.createNewFile();
    }
}

From source file:org.apache.drill.exec.store.StorageStrategy.java

License:Apache License

/**
 * Creates passed file on appropriate file system.
 * Before creation checks which parent directories do not exists.
 * Applies storage strategy rules to all newly created directories and file.
 * Will return first created parent path or file if no new parent paths created.
 *
 * Case 1: /a/b -> already exists, attempt to create /a/b/c/some_file.txt
 * Will create file and return /a/b/c.//from   ww w  .j  a  v a  2  s.  co  m
 * Case 2: /a/b/c -> already exists, attempt to create /a/b/c/some_file.txt
 * Will create file and return /a/b/c/some_file.txt.
 * Case 3: /a/b/c/some_file.txt -> already exists, will fail.
 *
 * @param fs file system where file should be located
 * @param file file path
 * @return first created parent path or file
 * @throws IOException is thrown in case of problems while creating path, setting permission
 *         or adding path to delete on exit list
 */
public Path createFileAndApply(FileSystem fs, Path file) throws IOException {
    List<Path> locations = getNonExistentLocations(fs, file.getParent());
    if (!fs.createNewFile(file)) {
        throw new IOException(String.format("File [%s] already exists on file system [%s].",
                file.toUri().getPath(), fs.getUri()));
    }
    applyToFile(fs, file);

    if (locations.isEmpty()) {
        return file;
    }

    for (Path location : locations) {
        applyStrategy(fs, location, folderPermission, deleteOnExit);
    }
    return locations.get(locations.size() - 1);
}

From source file:org.apache.drill.exec.util.FileSystemUtilTestBase.java

License:Apache License

private static void createDefaultStructure(FileSystem fs, Path base, String name, int nesting)
        throws IOException {
    Path newBase = base;/*from  ww w .  j  av  a  2 s .c  om*/
    for (int i = 1; i <= nesting; i++) {
        Path path = new Path(newBase, Strings.repeat(name, i));
        fs.mkdirs(path);
        for (String fileName : Arrays.asList("f.txt", ".f.txt", "_f.txt")) {
            fs.createNewFile(new Path(path, fileName));
        }
        newBase = path;
    }
}

From source file:org.apache.druid.storage.hdfs.HdfsDataSegmentKillerTest.java

License:Apache License

@Test
public void testKillForSegmentPathWithoutPartitionNumber() throws Exception {
    Configuration config = new Configuration();
    HdfsDataSegmentKiller killer = new HdfsDataSegmentKiller(config, new HdfsDataSegmentPusherConfig() {
        @Override//from w  w w  .j av  a 2s  . c  o  m
        public String getStorageDirectory() {
            return "/tmp";
        }
    });

    FileSystem fs = FileSystem.get(config);
    Path dataSourceDir = new Path("/tmp/dataSourceNew");

    Path interval1Dir = new Path(dataSourceDir, "intervalNew");
    Path version11Dir = new Path(interval1Dir, "v1");

    Assert.assertTrue(fs.mkdirs(version11Dir));
    fs.createNewFile(new Path(version11Dir, StringUtils.format("%s_index.zip", 3)));

    killer.kill(getSegmentWithPath(new Path(version11Dir, "3_index.zip").toString()));

    Assert.assertFalse(fs.exists(version11Dir));
    Assert.assertFalse(fs.exists(interval1Dir));
    Assert.assertTrue(fs.exists(dataSourceDir));
    Assert.assertTrue(fs.exists(new Path("/tmp")));
    Assert.assertTrue(fs.exists(dataSourceDir));
    Assert.assertTrue(fs.delete(dataSourceDir, false));
}

From source file:org.apache.druid.storage.hdfs.HdfsDataSegmentKillerTest.java

License:Apache License

@Test
public void testKillForSegmentWithUniquePath() throws Exception {
    Configuration config = new Configuration();
    HdfsDataSegmentKiller killer = new HdfsDataSegmentKiller(config, new HdfsDataSegmentPusherConfig() {
        @Override/*from ww w. j  ava2  s. c  o  m*/
        public String getStorageDirectory() {
            return "/tmp";
        }
    });

    FileSystem fs = FileSystem.get(config);
    Path dataSourceDir = new Path("/tmp/dataSourceNew");

    Path interval1Dir = new Path(dataSourceDir, "intervalNew");
    Path version11Dir = new Path(interval1Dir, "v1");
    String uuid = UUID.randomUUID().toString().substring(0, 5);

    Assert.assertTrue(fs.mkdirs(version11Dir));
    fs.createNewFile(new Path(version11Dir, StringUtils.format("%s_%s_index.zip", 3, uuid)));

    killer.kill(getSegmentWithPath(
            new Path(version11Dir, StringUtils.format("%s_%s_index.zip", 3, uuid)).toString()));

    Assert.assertFalse(fs.exists(version11Dir));
    Assert.assertFalse(fs.exists(interval1Dir));
    Assert.assertTrue(fs.exists(dataSourceDir));
    Assert.assertTrue(fs.exists(new Path("/tmp")));
    Assert.assertTrue(fs.exists(dataSourceDir));
    Assert.assertTrue(fs.delete(dataSourceDir, false));
}

From source file:org.apache.druid.storage.hdfs.HdfsDataSegmentKillerTest.java

License:Apache License

private void makePartitionDirWithIndex(FileSystem fs, Path path) throws IOException {
    Assert.assertTrue(fs.mkdirs(path));//from ww  w.  j a v  a2 s. co  m
    fs.createNewFile(new Path(path, "index.zip"));
}

From source file:org.apache.giraph.zk.ZooKeeperManager.java

License:Apache License

/**
 * Create a new file with retries if it fails.
 *
 * @param fs File system where the new file is created
 * @param path Path of the new file//  w  ww  . jav a 2 s. c  om
 * @param maxAttempts Maximum number of attempts
 * @param retryWaitMsecs Milliseconds to wait before retrying
 */
private static void createNewFileWithRetries(FileSystem fs, Path path, int maxAttempts, int retryWaitMsecs) {
    int attempt = 0;
    while (attempt < maxAttempts) {
        try {
            fs.createNewFile(path);
            return;
        } catch (IOException e) {
            LOG.warn("createNewFileWithRetries: Failed to create file at path " + path + " on attempt "
                    + attempt + " of " + maxAttempts + ".", e);
        }
        ++attempt;
        Uninterruptibles.sleepUninterruptibly(retryWaitMsecs, TimeUnit.MILLISECONDS);
    }
    throw new IllegalStateException("createNewFileWithRetries: Failed to create file at path " + path
            + " after " + attempt + " attempts");
}