Example usage for org.apache.hadoop.fs FileSystem createNewFile

List of usage examples for org.apache.hadoop.fs FileSystem createNewFile

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem createNewFile.

Prototype

public boolean createNewFile(Path f) throws IOException 

Source Link

Document

Creates the given Path as a brand-new zero-length file.

Usage

From source file:gobblin.filesystem.MetricsFileSystemInstrumentationTest.java

License:Apache License

/**
 * This test is disabled because it requires a local hdfs cluster at localhost:8020, which requires installation and setup.
 * Changes to {@link MetricsFileSystemInstrumentation} should be followed by a manual run of this tests.
 *
 * TODO: figure out how to fully automate this test.
 * @throws Exception//from  ww w . ja  va  2  s .  co  m
 */
@Test(enabled = false)
public void test() throws Exception {

    String uri = "instrumented-hdfs://localhost:9000";

    FileSystem fs = FileSystem.get(new URI(uri), new Configuration());

    String name = UUID.randomUUID().toString();
    fs.mkdirs(new Path("/tmp"));

    // Test absolute paths
    Path absolutePath = new Path("/tmp", name);
    Assert.assertFalse(fs.exists(absolutePath));
    fs.createNewFile(absolutePath);
    Assert.assertTrue(fs.exists(absolutePath));
    Assert.assertEquals(fs.getFileStatus(absolutePath).getLen(), 0);
    fs.delete(absolutePath, false);
    Assert.assertFalse(fs.exists(absolutePath));

    // Test fully qualified paths
    Path fqPath = new Path(uri + "/tmp", name);
    Assert.assertFalse(fs.exists(fqPath));
    fs.createNewFile(fqPath);
    Assert.assertTrue(fs.exists(fqPath));
    Assert.assertEquals(fs.getFileStatus(fqPath).getLen(), 0);
    fs.delete(fqPath, false);
    Assert.assertFalse(fs.exists(fqPath));
}

From source file:hdfs.hdfsadapter.XMLJob.java

public int run(String[] args) throws Exception {

    long startTime = System.currentTimeMillis();

    // Paths of input and output directory
    Path input = new Path(args[0]); //input path
    Path output = new Path(args[1]); //output path
    Path temp = new Path("buffer.txt");

    // Create configuration
    Configuration conf = super.getConf();
    //conf.set("mapred.map.tasks", );
    conf.set("fs.default.name", "hdfs://localhost:9000");
    String tag = args[2];// w  w w . j  av a  2s  .c  om
    conf.set("start_tag", "<" + tag + ">");
    conf.set("end_tag", "</" + tag + ">");
    // Create connector with the hdfs system
    FileSystem hdfs = FileSystem.get(conf);

    // Delete output if it exists to avoid error
    if (hdfs.exists(output)) {
        hdfs.delete(output, true);
    }
    if (hdfs.exists(temp)) {
        hdfs.delete(output, true);
    }
    hdfs.createNewFile(temp);
    DistributedCache.addCacheFile(new URI("buffer.txt"), conf);

    Job read = new Job(super.getConf(), "Read from HDFS");
    read.setNumReduceTasks(0);
    // Assign Map and Reduce class
    read.setJarByClass(XmlReadMapper.class);
    read.setMapperClass(XmlReadMapper.class);
    // Define the data type of key and value
    read.setMapOutputKeyClass(Text.class); //key from map
    read.setMapOutputValueClass(Text.class);//value from map
    // Set input path 
    FileInputFormat.addInputPath(read, input);

    //How to read each block
    //1.Whole Block
    //read.setInputFormatClass(XmlInputFormatBlockSolution.class);

    //2.One Buffer 
    //read.setInputFormatClass(XmlInputFormatOneBufferSolution.class);

    //3.Two buffers
    read.setInputFormatClass(XmlInputFormatTwoBufferSolution.class);

    // Set output path
    FileOutputFormat.setOutputPath(read, output);
    read.setOutputFormatClass(TextOutputFormat.class);

    //Execute job 
    int code = read.waitForCompletion(true) ? 0 : 1;

    URI[] filenames = DistributedCache.getCacheFiles(conf);

    long endTime = System.currentTimeMillis();
    long totalTime = endTime - startTime;
    System.out.println(totalTime);

    return code;
}

From source file:io.prestosql.plugin.hive.AbstractTestHiveClient.java

License:Apache License

private void assertEmptyFile(HiveStorageFormat format) throws Exception {
    SchemaTableName tableName = temporaryTable("empty_file");
    try {//from   w  w w . j  a v  a  2s.  co m
        List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty()));
        createEmptyTable(tableName, format, columns, ImmutableList.of());

        try (Transaction transaction = newTransaction()) {
            ConnectorSession session = newSession();
            ConnectorMetadata metadata = transaction.getMetadata();

            ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
            List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(
                    metadata.getColumnHandles(session, tableHandle).values());

            Table table = transaction.getMetastore(tableName.getSchemaName())
                    .getTable(tableName.getSchemaName(), tableName.getTableName())
                    .orElseThrow(AssertionError::new);

            // verify directory is empty
            HdfsContext context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName());
            Path location = new Path(table.getStorage().getLocation());
            assertTrue(listDirectory(context, location).isEmpty());

            // read table with empty directory
            readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(0),
                    Optional.of(ORC));

            // create empty file
            FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, location);
            assertTrue(fileSystem.createNewFile(new Path(location, "empty-file")));
            assertEquals(listDirectory(context, location), ImmutableList.of("empty-file"));

            // read table with empty file
            MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session,
                    TupleDomain.all(), OptionalInt.of(1), Optional.empty());
            assertEquals(result.getRowCount(), 0);
        }
    } finally {
        dropTable(tableName);
    }
}

From source file:org.apache.accumulo.core.client.impl.BulkImport.java

License:Apache License

/**
 * Check path of bulk directory and permissions
 *///w  w  w.j a v  a  2  s. co  m
private Path checkPath(FileSystem fs, String dir) throws IOException, AccumuloException {
    Path ret;

    if (dir.contains(":")) {
        ret = new Path(dir);
    } else {
        ret = fs.makeQualified(new Path(dir));
    }

    try {
        if (!fs.getFileStatus(ret).isDirectory()) {
            throw new AccumuloException("Bulk import directory " + dir + " is not a directory!");
        }
        Path tmpFile = new Path(ret, "isWritable");
        if (fs.createNewFile(tmpFile))
            fs.delete(tmpFile, true);
        else
            throw new AccumuloException("Bulk import directory " + dir + " is not writable.");
    } catch (FileNotFoundException fnf) {
        throw new AccumuloException("Bulk import directory " + dir + " does not exist or has bad permissions",
                fnf);
    }
    return ret;
}

From source file:org.apache.accumulo.core.clientImpl.bulk.BulkImport.java

License:Apache License

/**
 * Check path of bulk directory and permissions
 *//* w w  w  .  j  av a  2  s.c  om*/
private Path checkPath(FileSystem fs, String dir) throws IOException, AccumuloException {
    Path ret;

    if (dir.contains(":")) {
        ret = new Path(dir);
    } else {
        ret = fs.makeQualified(new Path(dir));
    }

    try {
        if (!fs.getFileStatus(ret).isDirectory()) {
            throw new AccumuloException("Bulk import directory " + dir + " is not a directory!");
        }
        Path tmpFile = new Path(ret, "isWritable");
        if (fs.createNewFile(tmpFile))
            fs.delete(tmpFile, true);
        else
            throw new AccumuloException("Bulk import directory " + dir + " is not writable.");
    } catch (FileNotFoundException fnf) {
        throw new AccumuloException("Bulk import directory " + dir + " does not exist or has bad permissions",
                fnf);
    }

    // TODO ensure dir does not contain bulk load mapping

    return ret;
}

From source file:org.apache.accumulo.server.test.scalability.Run.java

License:Apache License

public static void main(String[] args) throws Exception {

    final String sitePath = "/tmp/scale-site.conf";
    final String testPath = "/tmp/scale-test.conf";

    // parse command line
    if (args.length != 3) {
        throw new IllegalArgumentException("usage : Run <testId> <action> <numTabletServers>");
    }//from  w  w  w .j a  va  2  s. c  o  m
    String testId = args[0];
    String action = args[1];
    int numTabletServers = Integer.parseInt(args[2]);

    Configuration conf = CachedConfiguration.getInstance();
    FileSystem fs;
    fs = FileSystem.get(conf);

    fs.copyToLocalFile(new Path("/accumulo-scale/conf/site.conf"), new Path(sitePath));
    fs.copyToLocalFile(new Path(String.format("/accumulo-scale/conf/%s.conf", testId)), new Path(testPath));

    // load configuration file properties
    Properties scaleProps = new Properties();
    Properties testProps = new Properties();
    try {
        scaleProps.load(new FileInputStream(sitePath));
        testProps.load(new FileInputStream(testPath));
    } catch (Exception e) {
        System.out.println("Problem loading config file");
        e.printStackTrace();
    }

    ScaleTest test = (ScaleTest) Class.forName(String.format("accumulo.server.test.scalability.%s", testId))
            .newInstance();

    test.init(scaleProps, testProps, numTabletServers);

    if (action.equalsIgnoreCase("setup")) {
        test.setup();
    } else if (action.equalsIgnoreCase("client")) {
        InetAddress addr = InetAddress.getLocalHost();
        String host = addr.getHostName();
        fs.createNewFile(new Path("/accumulo-scale/clients/" + host));
        test.client();
        fs.copyFromLocalFile(new Path("/tmp/scale.out"), new Path("/accumulo-scale/results/" + host));
    } else if (action.equalsIgnoreCase("teardown")) {
        test.teardown();
    }
}

From source file:org.apache.accumulo.test.functional.WriteAheadLogEncryptedIT.java

License:Apache License

@Override
public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
    String keyPath = System.getProperty("user.dir") + "/target/mini-tests/WriteAheadLogEncryptedIT-testkeyfile";
    cfg.setProperty(Property.INSTANCE_CRYPTO_SERVICE, "org.apache.accumulo.core.cryptoImpl.AESCryptoService");
    cfg.setProperty(INSTANCE_CRYPTO_PREFIX.getKey() + "key.uri", keyPath);

    WriteAheadLogIT.setupConfig(cfg, hadoopCoreSite);

    // setup key file
    try {/*  w ww.j  a v a  2s.c o m*/
        Path keyFile = new Path(keyPath);
        FileSystem fs = FileSystem.getLocal(new Configuration());
        fs.delete(keyFile, true);
        if (fs.createNewFile(keyFile))
            log.info("Created keyfile at {}", keyPath);
        else
            log.error("Failed to create key file at {}", keyPath);

        try (FSDataOutputStream out = fs.create(keyFile)) {
            out.writeUTF("sixteenbytekey"); // 14 + 2 from writeUTF
        }
    } catch (Exception e) {
        log.error("Exception during configure", e);
    }
}

From source file:org.apache.accumulo.test.scalability.Run.java

License:Apache License

public static void main(String[] args) throws Exception {

    final String sitePath = "/tmp/scale-site.conf";
    final String testPath = "/tmp/scale-test.conf";
    Opts opts = new Opts();
    opts.parseArgs(Run.class.getName(), args);

    Configuration conf = CachedConfiguration.getInstance();
    FileSystem fs;
    fs = FileSystem.get(conf);/*from www .j  ava2  s .c om*/

    fs.copyToLocalFile(new Path("/accumulo-scale/conf/site.conf"), new Path(sitePath));
    fs.copyToLocalFile(new Path(String.format("/accumulo-scale/conf/%s.conf", opts.testId)),
            new Path(testPath));

    // load configuration file properties
    Properties scaleProps = new Properties();
    Properties testProps = new Properties();
    try {
        FileInputStream fis = new FileInputStream(sitePath);
        try {
            scaleProps.load(fis);
        } finally {
            fis.close();
        }
        fis = new FileInputStream(testPath);
        try {
            testProps.load(fis);
        } finally {
            fis.close();
        }
    } catch (Exception e) {
        log.error("Error loading config file.", e);
    }

    ScaleTest test = (ScaleTest) Class
            .forName(String.format("org.apache.accumulo.test.scalability.%s", opts.testId)).newInstance();

    test.init(scaleProps, testProps, opts.numTabletServers);

    if (opts.action.equalsIgnoreCase("setup")) {
        test.setup();
    } else if (opts.action.equalsIgnoreCase("client")) {
        InetAddress addr = InetAddress.getLocalHost();
        String host = addr.getHostName();
        fs.createNewFile(new Path("/accumulo-scale/clients/" + host));
        test.client();
        fs.copyFromLocalFile(new Path("/tmp/scale.out"), new Path("/accumulo-scale/results/" + host));
    } else if (opts.action.equalsIgnoreCase("teardown")) {
        test.teardown();
    }
}

From source file:org.apache.ambari.fast_hdfs_resource.Resource.java

License:Apache License

public static void createResource(Resource resource, FileSystem dfs, Path pathHadoop) throws IOException {

    boolean isCreate = (resource.getSource() == null) ? true : false;

    if (isCreate && resource.getType().equals("directory")) {
        dfs.mkdirs(pathHadoop); // empty dir(s)
    } else if (isCreate && resource.getType().equals("file")) {
        dfs.createNewFile(pathHadoop); // empty file
    } else {//  w  ww  .ja v a 2 s  .co m
        if (dfs.exists(pathHadoop) && dfs.getFileStatus(pathHadoop).isDir()) {
            System.out.println(
                    "Skipping copy from local, as target " + pathHadoop + " is an existing directory."); // Copy from local to existing directory is not supported by dfs.
        } else {
            dfs.copyFromLocalFile(new Path(resource.getSource()), pathHadoop);
        }
    }
}

From source file:org.apache.beam.sdk.io.hadoop.format.HDFSSynchronizationTest.java

License:Apache License

@Test
public void testCatchingRemoteException() throws IOException {
    FileSystem mockedFileSystem = Mockito.mock(FileSystem.class);
    RemoteException thrownException = new RemoteException(AlreadyBeingCreatedException.class.getName(),
            "Failed to CREATE_FILE");
    Mockito.when(mockedFileSystem.createNewFile(Mockito.any())).thenThrow(thrownException);

    HDFSSynchronization synchronization = new HDFSSynchronization("someDir", (conf) -> mockedFileSystem);

    assertFalse(synchronization.tryAcquireJobLock(configuration));
}