Example usage for org.apache.hadoop.fs FileSystem append

List of usage examples for org.apache.hadoop.fs FileSystem append

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem append.

Prototype

public FSDataOutputStream append(Path f) throws IOException 

Source Link

Document

Append to an existing file (optional operation).

Usage

From source file:com.idvp.platform.hdfs.HDFSDataStream.java

License:Apache License

protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {

    boolean appending = false;
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
        outStream = hdfs.append(dstPath);
        appending = true;/*www . ja va 2 s .c o m*/
    } else {
        outStream = hdfs.create(dstPath);
    }

    serializer = new BodyTextEventSerializer.Builder().build(outStream);
    if (appending && !serializer.supportsReopen()) {
        outStream.close();
        serializer = null;
        throw new IOException("serializer (" + "TEXT" + ") does not support append");
    }

    // must call superclass to check for replication issues
    registerCurrentStream(outStream, hdfs, dstPath);

    if (appending) {
        serializer.afterReopen();
    } else {
        serializer.afterCreate();
    }
}

From source file:com.linkedin.pinot.core.indexsegment.utils.InputOutputStreamUtils.java

License:Apache License

public static DataOutputStream getAppendOutputStream(String filePath, FileSystemMode mode, FileSystem fs) {
    try {/*from   w  ww.j  a v a 2s . co  m*/
        DataOutputStream is = null;
        switch (mode) {
        case DISK:
            File appendFile = new File(filePath);
            if (!appendFile.exists()) {
                if (!appendFile.getParentFile().exists()) {
                    appendFile.getParentFile().mkdirs();
                }
                appendFile.createNewFile();
            }
            is = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(appendFile, true)));
            break;
        case HDFS:
            is = fs.append(new Path(filePath));
            break;
        default:
            throw new UnsupportedOperationException();
        }
        return is;
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}

From source file:com.mellanox.r4h.TestReadWhileWriting.java

License:Apache License

/** Try openning a file for append. */
private static FSDataOutputStream append(FileSystem fs, Path p) throws Exception {
    for (int i = 0; i < 10; i++) {
        try {// w w w  . j a v  a  2  s . c  o m
            return fs.append(p);
        } catch (RemoteException re) {
            if (re.getClassName().equals(RecoveryInProgressException.class.getName())) {
                MiniDFSClusterBridge.getAppendTestUtilLOG().info("Will sleep and retry, i=" + i + ", p=" + p,
                        re);
                Thread.sleep(1000);
            } else
                throw re;
        }
    }
    throw new IOException("Cannot append to " + p);
}

From source file:com.netflix.aegisthus.tools.StorageHelper.java

License:Apache License

public void logCommit(String file) throws IOException {
    Path log = commitPath(getTaskId());
    if (debug) {//from   www  .  j  a v  a2  s . c  o  m
        LOG.info(String.format("logging (%s) to commit log (%s)", file, log.toUri().toString()));
    }
    FileSystem fs = log.getFileSystem(config);
    DataOutputStream os = null;
    if (fs.exists(log)) {
        os = fs.append(log);
    } else {
        os = fs.create(log);
    }
    os.writeBytes(file);
    os.write('\n');
    os.close();
}

From source file:com.splicemachine.derby.impl.io.HdfsDirFile.java

License:Apache License

@Override
public OutputStream getOutputStream(boolean append) throws FileNotFoundException {
    if (append) {
        try {/*from   w w  w . j  av  a2 s .  c  om*/
            FileSystem fs = getFileSystem();
            return fs.append(new Path(path));
        } catch (FileNotFoundException fnfe) {
            throw fnfe;
        } catch (IOException e) {
            LOG.error(String.format("An exception occurred while creating the file '%s'.", path), e);
            return null;
        }
    } else {
        this.delete();
        return getOutputStream();
    }
}

From source file:com.toddbodnar.simpleHive.IO.hdfsFile.java

public void append(String line) {
    try {/* w  w w.java 2 s  .  co m*/
        if (!writing) {
            in.close();
            FileSystem fs = FileSystem.get(GetConfiguration.get());
            out = new BufferedWriter(new OutputStreamWriter(fs.append(location)));
            writing = true;
        }
        out.write(line + "\n");
    } catch (IOException ex) {
        Logger.getLogger(hdfsFile.class.getName()).log(Level.SEVERE, null, ex);
    }

}

From source file:com.vf.flume.sink.hdfs.HDFSDataStream.java

License:Apache License

protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {
    if (useRawLocalFileSystem) {
        if (hdfs instanceof LocalFileSystem) {
            hdfs = ((LocalFileSystem) hdfs).getRaw();
        } else {/*w ww . ja v  a 2 s  . co m*/
            logger.warn("useRawLocalFileSystem is set to true but file system "
                    + "is not of type LocalFileSystem: " + hdfs.getClass().getName());
        }
    }

    boolean appending = false;
    //    System.out.println(" ------ support-----" + conf.getBoolean("hdfs.append.support", false));
    //    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
    //            (dstPath)) {
    if (true == true && hdfs.isFile(dstPath)) {
        outStream = hdfs.append(dstPath);
        appending = true;
    } else {
        outStream = hdfs.create(dstPath);
    }

    serializer = EventSerializerFactory.getInstance(serializerType, serializerContext, outStream);
    if (appending && !serializer.supportsReopen()) {
        outStream.close();
        serializer = null;
        throw new IOException("serializer (" + serializerType + ") does not support append");
    }

    // must call superclass to check for replication issues
    registerCurrentStream(outStream, hdfs, dstPath);

    if (appending) {
        serializer.afterReopen();
    } else {
        serializer.afterCreate();
    }
}

From source file:cz.muni.fi.xfabian7.bp.mgrid.HdfsStorageBucket.java

/**
 * Return an OutputStream over the path using FileSystem
 *
 * @param fs FileSystem//from   w ww  .  ja  v  a  2 s.co  m
 * @return OutputStream
 * @throws IOException
 */
public OutputStream openOutputStream(FileSystem fs, Path path) throws IOException {
    if (!fs.isFile(path)) {
        createFile(path);
    }
    return fs.append(path);
}

From source file:fuse4j.hadoopfs.HdfsClientImpl.java

License:Apache License

@Override
public Object open(int uid, String path, int flags) {
    FileSystem dfs = null;
    try {//from   w  w  w .  ja  v a2s.  c o  m
        dfs = getDfs(uid);
        //based on fuse_impls_open in C fuse_dfs
        // 0x8000 is always passed in and hadoop doesn't like it, so killing it here
        // bugbug figure out what this flag is and report problem to Hadoop JIRA
        int hdfs_flags = (flags & 0x7FFF);
        System.out.println("HDFS CLIENT OPEN FILE:" + path + " mode:" + Integer.toOctalString(hdfs_flags));

        //TODO: connect to DFS as calling user to enforce perms
        //see doConnectAsUser(dfs->nn_hostname, dfs->nn_port);

        if ((hdfs_flags & NativeIO.O_RDWR) == NativeIO.O_RDWR) {
            hdfs_flags ^= NativeIO.O_RDWR;
            try {
                FileStatus fileStatus = dfs.getFileStatus(new Path(path));
                if (this.newFiles.containsKey(path)) {
                    // just previously created by "mknod" so open it in write-mode
                    hdfs_flags |= NativeIO.O_WRONLY;
                } else {
                    // File exists; open this as read only.
                    hdfs_flags |= NativeIO.O_RDONLY;
                }
            } catch (IOException e) {
                // File does not exist (maybe?); interpret it as a O_WRONLY
                // If the actual error was something else, we'll get it again when
                // we try to open the file.
                hdfs_flags |= NativeIO.O_WRONLY;
            }
        }

        ///
        Path hPath = new Path(path);
        if ((hdfs_flags & NativeIO.O_WRONLY) == 0) {
            //READ
            System.out.println("HDFS <open> file:" + path);
            return new HdfsFileIoContext(path, dfs.open(hPath));
        } else if ((hdfs_flags & NativeIO.O_APPEND) != 0) {
            //WRITE/APPEND
            System.out.println("HDFS <append> file:" + path);
            return new HdfsFileIoContext(path, dfs.append(hPath));
        } else {
            //WRITE/CREATE
            System.out.println("HDFS <create> file:" + path);
            HdfsFileIoContext fh = this.newFiles.remove(path);
            if (fh == null) {
                fh = new HdfsFileIoContext(path, dfs.create(new Path(path), true));
                System.out.println("File " + path + "created");
            } else {
                System.out.println("File " + path + "already created by a previous <mknod> call");
            }
            System.out.println("files queued:" + this.newFiles.size());
            return fh;
        }
    } catch (Exception e) {
        // fall through to failure
    }
    return null;
}

From source file:gobblin.metrics.GobblinMetrics.java

License:Apache License

private void buildFileMetricReporter(Properties properties) {
    if (!Boolean.valueOf(properties.getProperty(ConfigurationKeys.METRICS_REPORTING_FILE_ENABLED_KEY,
            ConfigurationKeys.DEFAULT_METRICS_REPORTING_FILE_ENABLED))) {
        return;//from w  ww.jav  a 2s. c om
    }
    LOGGER.info("Reporting metrics to log files");

    if (!properties.containsKey(ConfigurationKeys.METRICS_LOG_DIR_KEY)) {
        LOGGER.error("Not reporting metrics to log files because " + ConfigurationKeys.METRICS_LOG_DIR_KEY
                + " is undefined");
        return;
    }

    try {
        String fsUri = properties.getProperty(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI);
        FileSystem fs = FileSystem.get(URI.create(fsUri), new Configuration());

        // Each job gets its own metric log subdirectory
        Path metricsLogDir = new Path(properties.getProperty(ConfigurationKeys.METRICS_LOG_DIR_KEY),
                this.getName());
        if (!fs.exists(metricsLogDir) && !fs.mkdirs(metricsLogDir)) {
            LOGGER.error("Failed to create metric log directory for metrics " + this.getName());
            return;
        }

        // Add a suffix to file name if specified in properties.
        String metricsFileSuffix = properties.getProperty(ConfigurationKeys.METRICS_FILE_SUFFIX,
                ConfigurationKeys.DEFAULT_METRICS_FILE_SUFFIX);
        if (!Strings.isNullOrEmpty(metricsFileSuffix) && !metricsFileSuffix.startsWith(".")) {
            metricsFileSuffix = "." + metricsFileSuffix;
        }

        // Each job run gets its own metric log file
        Path metricLogFile = new Path(metricsLogDir, this.id + metricsFileSuffix + ".metrics.log");
        boolean append = false;
        // Append to the metric file if it already exists
        if (fs.exists(metricLogFile)) {
            LOGGER.info(String.format("Metric log file %s already exists, appending to it", metricLogFile));
            append = true;
        }

        OutputStream output = append ? fs.append(metricLogFile) : fs.create(metricLogFile, true);
        OutputStreamReporter.Factory.newBuilder().outputTo(output).build(properties);
        this.codahaleScheduledReporters.add(this.codahaleReportersCloser.register(
                OutputStreamEventReporter.forContext(RootMetricContext.get()).outputTo(output).build()));

        LOGGER.info("Will start reporting metrics to directory " + metricsLogDir);
    } catch (IOException ioe) {
        LOGGER.error("Failed to build file metric reporter for job " + this.id, ioe);
    }
}