Example usage for org.apache.hadoop.fs FileSystem append

List of usage examples for org.apache.hadoop.fs FileSystem append

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem append.

Prototype

public FSDataOutputStream append(Path f) throws IOException 

Source Link

Document

Append to an existing file (optional operation).

Usage

From source file:gobblin.metrics.JobMetrics.java

License:Open Source License

private void buildFileMetricReporter(Properties properties) {
    if (!Boolean.valueOf(properties.getProperty(ConfigurationKeys.METRICS_REPORTING_FILE_ENABLED_KEY,
            ConfigurationKeys.DEFAULT_METRICS_REPORTING_FILE_ENABLED))) {
        LOGGER.info("Not reporting metrics to log files");
        return;//from   ww  w.  ja  va 2  s. c  o  m
    }

    if (!properties.containsKey(ConfigurationKeys.METRICS_LOG_DIR_KEY)) {
        LOGGER.error("Not reporting metrics to log files because " + ConfigurationKeys.METRICS_LOG_DIR_KEY
                + " is undefined");
        return;
    }

    try {
        String fsUri = properties.getProperty(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI);
        FileSystem fs = FileSystem.get(URI.create(fsUri), new Configuration());

        // Each job gets its own metric log subdirectory
        Path metricsLogDir = new Path(properties.getProperty(ConfigurationKeys.METRICS_LOG_DIR_KEY),
                this.jobName);
        if (!fs.exists(metricsLogDir) && !fs.mkdirs(metricsLogDir)) {
            LOGGER.error("Failed to create metric log directory for job " + this.jobName);
            return;
        }

        // Each job run gets its own metric log file
        Path metricLogFile = new Path(metricsLogDir, this.jobId + ".metrics.log");
        boolean append = false;
        // Append to the metric file if it already exists
        if (fs.exists(metricLogFile)) {
            LOGGER.info(String.format("Metric log file %s already exists, appending to it", metricLogFile));
            append = true;
        }

        PrintStream ps = append ? this.closer.register(new PrintStream(fs.append(metricLogFile)))
                : this.closer.register(new PrintStream(fs.create(metricLogFile)));
        this.fileReporter = Optional.of(ConsoleReporter.forRegistry(this.metricRegistry).outputTo(ps)
                .convertRatesTo(TimeUnit.SECONDS).convertDurationsTo(TimeUnit.MILLISECONDS).build());
    } catch (IOException ioe) {
        LOGGER.error("Failed to build file metric reporter for job " + this.jobId, ioe);
    }
}

From source file:io.hops.experiments.utils.DFSOperationsUtils.java

License:Apache License

public static void appendFile(FileSystem dfs, String pathStr, long size) throws IOException {
    if (SERVER_LESS_MODE) {
        serverLessModeRandomWait();/*from ww  w .j  a va 2  s . co  m*/
        return;
    }

    FSDataOutputStream out = dfs.append(new Path(pathStr));
    if (size != 0) {
        for (long bytesWritten = 0; bytesWritten < size; bytesWritten += 4) {
            out.writeInt(1);
        }
    }
    out.close();
}

From source file:io.transwarp.flume.sink.HDFSCompressedDataStream.java

License:Apache License

@Override
public void open(String filePath, CompressionCodec codec, CompressionType cType) throws IOException {
    Configuration conf = new Configuration();
    Path dstPath = new Path(filePath);
    FileSystem hdfs = dstPath.getFileSystem(conf);
    if (useRawLocalFileSystem) {
        if (hdfs instanceof LocalFileSystem) {
            hdfs = ((LocalFileSystem) hdfs).getRaw();
        } else {//from   ww  w.j  a v a  2  s. co  m
            logger.warn("useRawLocalFileSystem is set to true but file system "
                    + "is not of type LocalFileSystem: " + hdfs.getClass().getName());
        }
    }
    boolean appending = false;
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
        fsOut = hdfs.append(dstPath);
        appending = true;
    } else {
        fsOut = hdfs.create(dstPath);
    }
    if (compressor == null) {
        compressor = CodecPool.getCompressor(codec, conf);
    }
    cmpOut = codec.createOutputStream(fsOut, compressor);
    serializer = EventSerializerFactory.getInstance(serializerType, serializerContext, cmpOut);
    if (appending && !serializer.supportsReopen()) {
        cmpOut.close();
        serializer = null;
        throw new IOException("serializer (" + serializerType + ") does not support append");
    }

    registerCurrentStream(fsOut, hdfs, dstPath);

    if (appending) {
        serializer.afterReopen();
    } else {
        serializer.afterCreate();
    }
    isFinished = false;
}

From source file:org.apache.accumulo.server.master.recovery.HadoopLogCloser.java

License:Apache License

@Override
public long close(AccumuloConfiguration conf, VolumeManager fs, Path source) throws IOException {
    FileSystem ns = fs.getVolumeByPath(source).getFileSystem();

    // if path points to a viewfs path, then resolve to underlying filesystem
    if (ViewFSUtils.isViewFS(ns)) {
        Path newSource = ns.resolvePath(source);
        if (!newSource.equals(source) && newSource.toUri().getScheme() != null) {
            ns = newSource.getFileSystem(CachedConfiguration.getInstance());
            source = newSource;/*from  ww w.j  av a  2s.  co  m*/
        }
    }

    if (ns instanceof DistributedFileSystem) {
        DistributedFileSystem dfs = (DistributedFileSystem) ns;
        try {
            if (!dfs.recoverLease(source)) {
                log.info("Waiting for file to be closed " + source.toString());
                return conf.getTimeInMillis(Property.MASTER_LEASE_RECOVERY_WAITING_PERIOD);
            }
            log.info("Recovered lease on " + source.toString());
        } catch (FileNotFoundException ex) {
            throw ex;
        } catch (Exception ex) {
            log.warn("Error recovering lease on " + source.toString(), ex);
            ns.append(source).close();
            log.info("Recovered lease on " + source.toString() + " using append");
        }
    } else if (ns instanceof LocalFileSystem || ns instanceof RawLocalFileSystem) {
        // ignore
    } else {
        throw new IllegalStateException("Don't know how to recover a lease for " + ns.getClass().getName());
    }
    return 0;
}

From source file:org.apache.carbondata.core.datastorage.store.impl.FileFactory.java

License:Apache License

/**
 * for getting the dataoutput stream using the hdfs filesystem append API.
 *
 * @param path//from  w w w  . java2 s  . c  o m
 * @param fileType
 * @return
 * @throws IOException
 */
public static DataOutputStream getDataOutputStreamUsingAppend(String path, FileType fileType)
        throws IOException {
    path = path.replace("\\", "/");
    switch (fileType) {
    case LOCAL:
        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path, true)));
    case HDFS:
    case VIEWFS:
        Path pt = new Path(path);
        FileSystem fs = pt.getFileSystem(configuration);
        FSDataOutputStream stream = fs.append(pt);
        return stream;
    default:
        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
    }
}

From source file:org.apache.carbondata.core.datastore.impl.FileFactory.java

License:Apache License

/**
 * for getting the dataoutput stream using the hdfs filesystem append API.
 *
 * @param path/*from   w w  w  .  j  a  v  a2s .  c om*/
 * @param fileType
 * @return
 * @throws IOException
 */
public static DataOutputStream getDataOutputStreamUsingAppend(String path, FileType fileType)
        throws IOException {
    path = path.replace("\\", "/");
    switch (fileType) {
    case LOCAL:
        path = getUpdatedFilePath(path, fileType);
        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path, true)));
    case HDFS:
    case ALLUXIO:
    case VIEWFS:
        Path pt = new Path(path);
        FileSystem fs = pt.getFileSystem(configuration);
        FSDataOutputStream stream = fs.append(pt);
        return stream;
    default:
        return new DataOutputStream(new BufferedOutputStream(new FileOutputStream(path)));
    }
}

From source file:org.apache.coheigea.bigdata.hdfs.HDFSAccessControlEnforcerTest.java

License:Apache License

@org.junit.Test
public void customPermissionsTest() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    final Path file = new Path("/tmp/tmpdir/data-file2");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();/*from   w  w  w  . ja  v  a 2 s .  com*/
    }
    out.close();

    // Now try to read the file as "bob" - this should be allowed
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            FSDataInputStream in = fs.open(file);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(in, output);
            String content = new String(output.toByteArray());
            Assert.assertTrue(content.startsWith("data0"));

            fs.close();
            return null;
        }
    });

    // Now try to read the file as "eve" - this should not be allowed
    ugi = UserGroupInformation.createRemoteUser("eve");
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            try {
                fs.open(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (AccessControlException ex) {
                // expected
            }

            fs.close();
            return null;
        }
    });

    // Write to the file as the owner, this should be allowed
    out = fileSystem.append(file);
    out.write(("new data\n").getBytes("UTF-8"));
    out.flush();
    out.close();

    // Now try to write to the file as "bob" - this should not be allowed
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                fs.append(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (AccessControlException ex) {
                // expected
            }

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.coheigea.bigdata.hdfs.HDFSTest.java

License:Apache License

@org.junit.Test
public void defaultPermissionsTest() throws Exception {
    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file
    final Path file = new Path("/tmp/tmpdir/data-file2");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();/*from   w  ww  .j a v  a2 s.  c om*/
    }
    out.close();

    // Check status
    // FileStatus status = fileSystem.getFileStatus(file);
    // System.out.println("OWNER: " + status.getOwner());
    // System.out.println("GROUP: " + status.getGroup());
    // System.out.println("PERM: " + status.getPermission().toString());
    // fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));
    // fileSystem.setOwner(file, "bob", null);

    // Now try to read the file as "bob" - this should be allowed
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Read the file
            FSDataInputStream in = fs.open(file);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(in, output);
            String content = new String(output.toByteArray());
            Assert.assertTrue(content.startsWith("data0"));

            fs.close();
            return null;
        }
    });

    // Write to the file as the owner, this should be allowed
    out = fileSystem.append(file);
    out.write(("new data\n").getBytes("UTF-8"));
    out.flush();
    out.close();

    // Now try to write to the file as "bob" - this should not be allowed
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                fs.append(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (AccessControlException ex) {
                // expected
            }

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.coheigea.bigdata.hdfs.ranger.HDFSRangerTest.java

License:Apache License

@org.junit.Test
public void writeTest() throws Exception {

    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    final Path file = new Path("/tmp/tmpdir2/data-file3");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();/*from   w w w . j a v  a 2 s . co m*/
    }
    out.close();

    // Now try to write to the file as "bob" - this should be allowed (by the policy - user)
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            fs.append(file);

            fs.close();
            return null;
        }
    });

    // Now try to write to the file as "alice" - this should be allowed (by the policy - group)
    ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" });
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            fs.append(file);

            fs.close();
            return null;
        }
    });

    // Now try to read the file as unknown user "eve" - this should not be allowed
    ugi = UserGroupInformation.createUserForTesting("eve", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                fs.append(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });

    // Now try to read the file as known user "dave" - this should not be allowed, as he doesn't have the correct permissions
    ugi = UserGroupInformation.createUserForTesting("dave", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                fs.append(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.flink.runtime.fs.hdfs.HadoopRecoverableFsDataOutputStream.java

License:Apache License

HadoopRecoverableFsDataOutputStream(FileSystem fs, HadoopFsRecoverable recoverable) throws IOException {

    ensureTruncateInitialized();/*from ww w  . ja  v  a  2  s. com*/

    this.fs = checkNotNull(fs);
    this.targetFile = checkNotNull(recoverable.targetFile());
    this.tempFile = checkNotNull(recoverable.tempFile());

    // truncate back and append
    try {
        truncate(fs, tempFile, recoverable.offset());
    } catch (Exception e) {
        throw new IOException("Missing data in tmp file: " + tempFile, e);
    }

    waitUntilLeaseIsRevoked(tempFile);
    out = fs.append(tempFile);

    // sanity check
    long pos = out.getPos();
    if (pos != recoverable.offset()) {
        IOUtils.closeQuietly(out);
        throw new IOException(
                "Truncate failed: " + tempFile + " (requested=" + recoverable.offset() + " ,size=" + pos + ')');
    }
}