Example usage for org.apache.hadoop.fs FileSystem append

List of usage examples for org.apache.hadoop.fs FileSystem append

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem append.

Prototype

public FSDataOutputStream append(Path f) throws IOException 

Source Link

Document

Append to an existing file (optional operation).

Usage

From source file:org.apache.kylin.monitor.QueryParser.java

License:Apache License

public void writeResultToHdfs(String dPath, String[] record) throws IOException {
    OutputStreamWriter writer = null;
    CSVWriter cwriter = null;//from  w  w  w.j a  v  a  2s  . c o m
    FileSystem fs = null;
    try {
        fs = this.getHdfsFileSystem();
        org.apache.hadoop.fs.Path resultStorePath = new org.apache.hadoop.fs.Path(dPath);
        writer = new OutputStreamWriter(fs.append(resultStorePath));
        cwriter = new CSVWriter(writer, '|', CSVWriter.NO_QUOTE_CHARACTER);

        cwriter.writeNext(record);

    } catch (IOException e) {
        logger.info("Exception", e);
    } finally {
        if (writer != null) {
            writer.close();
        }
        if (cwriter != null) {
            cwriter.close();
        }
        if (fs != null) {
            fs.close();
        }
    }
}

From source file:org.apache.metamodel.util.HdfsResource.java

License:Apache License

@Override
public OutputStream append() throws ResourceException {
    final FileSystem fs = getHadoopFileSystem();
    try {/*from w  w  w.j a v a2 s. c  om*/
        final FSDataOutputStream out = fs.append(getHadoopPath());
        return new HdfsFileOutputStream(out, fs);
    } catch (IOException e) {
        // we can close 'fs' in case of an exception
        FileHelper.safeClose(fs);
        throw wrapException(e);
    }
}

From source file:org.apache.pulsar.io.hdfs2.sink.HdfsAbstractSink.java

License:Apache License

protected FSDataOutputStream getHdfsStream() throws IllegalArgumentException, IOException {
    if (hdfsStream == null) {
        Path path = getPath();//from w  w w . jav a 2  s  .  c o  m
        FileSystem fs = getFileSystemAsUser(getConfiguration(), getUserGroupInformation());
        hdfsStream = fs.exists(path) ? fs.append(path) : fs.create(path);
    }
    return hdfsStream;
}

From source file:org.apache.ranger.audit.provider.hdfs.HdfsLogDestination.java

License:Apache License

private void openFile() {
    mLogger.debug("==> HdfsLogDestination.openFile()");

    closeFile();//from   w ww.  j a va  2 s  .c o m

    mNextRolloverTime = MiscUtil.getNextRolloverTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L));

    long startTime = MiscUtil.getRolloverStartTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L));

    mHdfsFilename = MiscUtil.replaceTokens(mDirectory + Path.SEPARATOR + mFile, startTime);

    FSDataOutputStream ostream = null;
    FileSystem fileSystem = null;
    Path pathLogfile = null;
    Configuration conf = null;
    boolean bOverwrite = false;

    try {
        mLogger.debug("HdfsLogDestination.openFile(): opening file " + mHdfsFilename);

        URI uri = URI.create(mHdfsFilename);

        // TODO: mechanism to XA-HDFS plugin to disable auditing of access checks to the current HDFS file

        conf = createConfiguration();
        pathLogfile = new Path(mHdfsFilename);
        fileSystem = FileSystem.get(uri, conf);

        try {
            if (fileSystem.exists(pathLogfile)) { // file already exists. either append to the file or write to a new file
                if (mIsAppend) {
                    mLogger.info("HdfsLogDestination.openFile(): opening file for append " + mHdfsFilename);

                    ostream = fileSystem.append(pathLogfile);
                } else {
                    mHdfsFilename = getNewFilename(mHdfsFilename, fileSystem);
                    pathLogfile = new Path(mHdfsFilename);
                }
            }

            // if file does not exist or if mIsAppend==false, create the file
            if (ostream == null) {
                mLogger.info("HdfsLogDestination.openFile(): opening file for write " + mHdfsFilename);

                createParents(pathLogfile, fileSystem);
                ostream = fileSystem.create(pathLogfile, bOverwrite);
            }
        } catch (IOException excp) {
            // append may not be supported by the filesystem; or the file might already be open by another application. Try a different filename
            String failedFilename = mHdfsFilename;

            mHdfsFilename = getNewFilename(mHdfsFilename, fileSystem);
            pathLogfile = new Path(mHdfsFilename);

            mLogger.info("HdfsLogDestination.openFile(): failed in opening file " + failedFilename
                    + ". Will try opening " + mHdfsFilename);
        }

        if (ostream == null) {
            mLogger.info("HdfsLogDestination.openFile(): opening file for write " + mHdfsFilename);

            createParents(pathLogfile, fileSystem);
            ostream = fileSystem.create(pathLogfile, bOverwrite);
        }
    } catch (Throwable ex) {
        mLogger.warn("HdfsLogDestination.openFile() failed", ex);
        //      } finally {
        // TODO: unset the property set above to exclude auditing of logfile opening
        //        System.setProperty(hdfsCurrentFilenameProperty, null);
    }

    mWriter = createWriter(ostream);

    if (mWriter != null) {
        mLogger.debug("HdfsLogDestination.openFile(): opened file " + mHdfsFilename);

        mFsDataOutStream = ostream;
        mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L);
        mLastOpenFailedTime = 0;
    } else {
        mLogger.warn("HdfsLogDestination.openFile(): failed to open file for write " + mHdfsFilename);

        mHdfsFilename = null;
        mLastOpenFailedTime = System.currentTimeMillis();
    }

    mLogger.debug("<== HdfsLogDestination.openFile(" + mHdfsFilename + ")");
}

From source file:org.apache.ranger.services.hdfs.HDFSRangerTest.java

License:Apache License

@org.junit.Test
public void writeTest() throws Exception {

    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
    final Path file = new Path("/tmp/tmpdir2/data-file3");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();/*from w  w  w.  j  a  v  a2 s.  c  o  m*/
    }
    out.close();

    // Now try to write to the file as "bob" - this should be allowed (by the policy - user)
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("bob", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            fs.append(file);

            fs.close();
            return null;
        }
    });

    // Now try to write to the file as "alice" - this should be allowed (by the policy - group)
    ugi = UserGroupInformation.createUserForTesting("alice", new String[] { "IT" });
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            fs.append(file);

            fs.close();
            return null;
        }
    });

    // Now try to read the file as unknown user "eve" - this should not be allowed
    ugi = UserGroupInformation.createUserForTesting("eve", new String[] {});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {

        public Void run() throws Exception {
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", defaultFs);

            FileSystem fs = FileSystem.get(conf);

            // Write to the file
            try {
                fs.append(file);
                Assert.fail("Failure expected on an incorrect permission");
            } catch (RemoteException ex) {
                // expected
                Assert.assertTrue(RangerAccessControlException.class.getName().equals(ex.getClassName()));
            }

            fs.close();
            return null;
        }
    });
}

From source file:org.apache.reef.runtime.yarn.driver.YarnContainerManager.java

License:Apache License

private void writeToEvaluatorLog(final String entry) throws IOException {
    final org.apache.hadoop.conf.Configuration config = new org.apache.hadoop.conf.Configuration();
    config.setBoolean("dfs.support.append", true);
    config.setBoolean("dfs.support.broken.append", true);
    final FileSystem fs = getFileSystemInstance();
    final Path path = new Path(getChangeLogLocation());
    final boolean appendToLog = fs.exists(path);

    try (final BufferedWriter bw = appendToLog ? new BufferedWriter(new OutputStreamWriter(fs.append(path)))
            : new BufferedWriter(new OutputStreamWriter(fs.create(path)));) {
        bw.write(entry);/*from  w  ww  .  j a va  2s  .  co m*/
    } catch (final IOException e) {
        if (appendToLog) {
            LOG.log(Level.FINE,
                    "Unable to add an entry to the Evaluator log. Attempting append by delete and recreate", e);
            appendByDeleteAndCreate(fs, path, entry);
        }
    }
}

From source file:org.apache.solr.update.HdfsTransactionLog.java

License:Apache License

HdfsTransactionLog(FileSystem fs, Path tlogFile, Collection<String> globalStrings, boolean openExisting) {
    super();/*from  w  ww.  j  av  a  2  s .c o m*/
    boolean success = false;
    this.fs = fs;

    try {
        if (debug) {
            //log.debug("New TransactionLog file=" + tlogFile + ", exists=" + tlogFile.exists() + ", size=" + tlogFile.length() + ", openExisting=" + openExisting);
        }
        this.tlogFile = tlogFile;

        // TODO: look into forcefully taking over any lease
        if (fs.exists(tlogFile) && openExisting) {
            tlogOutStream = fs.append(tlogFile);
        } else {
            fs.delete(tlogFile, false);

            tlogOutStream = fs.create(tlogFile, (short) 1);
            tlogOutStream.hsync();
        }

        fos = new FastOutputStream(tlogOutStream, new byte[65536], 0);
        long start = tlogOutStream.getPos();

        if (openExisting) {
            if (start > 0) {
                readHeader(null);

                // we should already be at the end 
                // raf.seek(start);

                //  assert channel.position() == start;
                fos.setWritten(start); // reflect that we aren't starting at the beginning
                //assert fos.size() == channel.size();
            } else {
                addGlobalStrings(globalStrings);
            }
        } else {
            if (start > 0) {
                log.error("New transaction log already exists:" + tlogFile + " size=" + tlogOutStream.size());
            }

            addGlobalStrings(globalStrings);
        }

        success = true;

    } catch (IOException e) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
    } finally {
        if (!success && tlogOutStream != null) {
            try {
                tlogOutStream.close();
            } catch (Exception e) {
                log.error("Error closing tlog file (after error opening)", e);
            }
        }
    }
}

From source file:org.deeplearning4j.listeners.SparkScoreIterationListener.java

License:Apache License

@Override
public void iterationDone(Model model, int iteration, int epoch) {

    if (printIterations <= 0) {
        printIterations = 1;// www .j av  a2 s  . com
    }
    String newScore = "";
    if (iteration % printIterations == 0) {
        double score = model.score();
        newScore += "Score at iteration {" + iteration + "} is {" + score + "}";
        log.info(newScore);
    }
    FileSystem nfs = null;
    try {
        nfs = CommonUtils.openHdfsConnect();
        Path path = new Path(pathStr);
        //although using append function isn't best ways, but currently it still solve the score log existing or not
        FSDataOutputStream out = nfs.append(path);//. .create(path);
        out.write(newScore.getBytes());
        out.write("\n".getBytes());
        out.hsync();
        out.close();
        CommonUtils.closeHdfsConnect(nfs);
    } catch (RemoteException e) {
        if (nfs != null) {
            CommonUtils.closeHdfsConnect(nfs);
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

}

From source file:org.kitesdk.apps.spark.spi.streaming.SparkStreamingJobManager.java

License:Apache License

private static void writeDescription(FileSystem fs, Path appRoot, StreamDescription description) {

    Path streamingJobPath = jobDescriptionFile(appRoot, description.getJobName());

    try {/*  w  w w .j  a v a 2  s  .co  m*/
        fs.mkdirs(streamingJobPath.getParent());
    } catch (IOException e) {
        throw new AppException(e);
    }

    OutputStream output = null;

    try {
        output = fs.append(streamingJobPath);
        OutputStreamWriter writer = new OutputStreamWriter(output);
        writer.write(description.toString());

    } catch (IOException e) {
        throw new AppException(e);
    } finally {
        Closeables.closeQuietly(output);
    }
}

From source file:org.opencloudengine.garuda.backend.hdfs.HdfsServiceImpl.java

License:Open Source License

private void _appendFile(String path, InputStream is) throws Exception {
    FileSystem fs = fileSystemFactory.getFileSystem();
    Path fsPath = new Path(path);
    FSDataOutputStream out = fs.append(fsPath);
    byte[] b = new byte[1024];
    int numBytes = 0;
    while ((numBytes = is.read(b)) > 0) {
        out.write(b, 0, numBytes);/*www.j a va  2  s .  c om*/
    }

    is.close();
    out.close();
    fs.close();
}