Example usage for org.apache.hadoop.fs FileSystem close

List of usage examples for org.apache.hadoop.fs FileSystem close

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Close this FileSystem instance.

Usage

From source file:com.github.sakserv.minicluster.impl.HdfsLocalClusterIntegrationTest.java

License:Apache License

@Test
public void testDfsClusterStart() throws Exception {

    // Write a file to HDFS containing the test string
    FileSystem hdfsFsHandle = dfsCluster.getHdfsFileSystemHandle();
    FSDataOutputStream writer = hdfsFsHandle
            .create(new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY)));
    writer.writeUTF(propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY));
    writer.close();//from   w ww.ja  v  a  2 s . co m

    // Read the file and compare to test string
    FSDataInputStream reader = hdfsFsHandle
            .open(new Path(propertyParser.getProperty(ConfigVars.HDFS_TEST_FILE_KEY)));
    assertEquals(reader.readUTF(), propertyParser.getProperty(ConfigVars.HDFS_TEST_STRING_KEY));
    reader.close();
    hdfsFsHandle.close();

    URL url = new URL(String.format("http://localhost:%s/webhdfs/v1?op=GETHOMEDIRECTORY&user.name=guest",
            propertyParser.getProperty(ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY)));
    URLConnection connection = url.openConnection();
    connection.setRequestProperty("Accept-Charset", "UTF-8");
    BufferedReader response = new BufferedReader(new InputStreamReader(connection.getInputStream()));
    String line = response.readLine();
    response.close();
    assertEquals("{\"Path\":\"/user/guest\"}", line);

}

From source file:com.github.sakserv.minicluster.impl.MRLocalClusterIntegrationTest.java

License:Apache License

private void writeFileToHdfs(String fileName, String contents) throws Exception {
    // Write a file to HDFS containing the test string
    FileSystem hdfsFsHandle = dfsCluster.getHdfsFileSystemHandle();
    FSDataOutputStream writer = hdfsFsHandle.create(new Path(fileName));
    writer.writeUTF(contents);/*from   w  ww. j a  va 2  s.  c  o  m*/
    writer.close();
    hdfsFsHandle.close();
}

From source file:com.github.sakserv.minicluster.impl.MRLocalClusterIntegrationTest.java

License:Apache License

private String readFileFromHdfs(String filename) throws Exception {
    FileSystem hdfsFsHandle = dfsCluster.getHdfsFileSystemHandle();
    FSDataInputStream reader = hdfsFsHandle.open(new Path(filename));
    String output = reader.readUTF();
    reader.close();/*from   w  w  w . java  2s  .  c o m*/
    hdfsFsHandle.close();
    return output;
}

From source file:com.github.sakserv.storm.KafkaHdfsTopologyTest.java

License:Apache License

/**
 * Validate that the files in HDFS contain the expected data from Kafka
 * @throws Exception/*from   ww  w  . jav a  2s  . co  m*/
 */
private void validateHdfsResults() throws Exception {
    LOG.info("HDFS: VALIDATING");

    // Get the filesystem handle and a list of files written by the test
    FileSystem hdfsFsHandle = hdfsLocalCluster.getHdfsFileSystemHandle();
    RemoteIterator<LocatedFileStatus> listFiles = hdfsFsHandle.listFiles(
            new Path(propertyParser.getProperty(ConfigVars.STORM_HDFS_BOLT_OUTPUT_LOCATION_KEY)), true);

    // Loop through the files and count up the lines
    int count = 0;
    while (listFiles.hasNext()) {
        LocatedFileStatus file = listFiles.next();

        LOG.info("HDFS READ: Found File: " + file);

        BufferedReader br = new BufferedReader(new InputStreamReader(hdfsFsHandle.open(file.getPath())));
        String line = br.readLine();
        while (line != null) {
            LOG.info("HDFS READ: Found Line: " + line);
            line = br.readLine();
            count++;
        }
    }
    hdfsFsHandle.close();

    // Validate the number of lines matches the number of kafka messages
    assertEquals(Integer.parseInt(propertyParser.getProperty(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY)), count);
}

From source file:com.github.sakserv.storm.KafkaHiveHdfsTopologyTest.java

License:Apache License

public void validateHdfsResults() throws IOException {
    System.out.println("HDFS: VALIDATING");
    FileSystem hdfsFsHandle = hdfsCluster.getHdfsFileSystemHandle();
    RemoteIterator<LocatedFileStatus> listFiles = hdfsFsHandle.listFiles(new Path("/tmp/kafka_data"), true);
    while (listFiles.hasNext()) {
        LocatedFileStatus file = listFiles.next();

        System.out.println("HDFS READ: Found File: " + file);

        BufferedReader br = new BufferedReader(new InputStreamReader(hdfsFsHandle.open(file.getPath())));
        String line = br.readLine();
        while (line != null) {
            System.out.println("HDFS READ: Found Line: " + line);
            line = br.readLine();/* www.ja  va  2  s.  c o m*/
        }
    }
    hdfsFsHandle.close();
}

From source file:com.github.ygf.pagerank.PageRank.java

License:Apache License

private void cleanPreviousIteration(int iter, Configuration conf, Path outputDir) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    Path prevIterDir = new Path(outputDir, "v" + (iter - 1));
    fs.delete(prevIterDir, true);//from w ww. ja  va2  s.c  om
    fs.close();
}

From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemIntegrationTest.java

License:Open Source License

@Test
public void testFileSystemIsRemovedFromCacheOnClose() throws IOException, URISyntaxException {
    Configuration conf = getConfigurationWtihImplementation();

    URI fsUri = new URI(String.format("gs://%s/", bucketName));

    FileSystem fs1 = FileSystem.get(fsUri, conf);
    FileSystem fs2 = FileSystem.get(fsUri, conf);

    Assert.assertSame(fs1, fs2);//from   www .j a  va2 s . c  o  m

    fs1.close();

    FileSystem fs3 = FileSystem.get(fsUri, conf);
    Assert.assertNotSame(fs1, fs3);

    fs3.close();
}

From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemIntegrationTest.java

License:Open Source License

@Test
public void testIOExceptionIsThrowAfterClose() throws IOException, URISyntaxException {
    Configuration conf = getConfigurationWtihImplementation();

    URI fsUri = new URI(String.format("gs://%s/", bucketName));

    FileSystem fs1 = FileSystem.get(fsUri, conf);
    FileSystem fs2 = FileSystem.get(fsUri, conf);

    junit.framework.Assert.assertSame(fs1, fs2);

    fs1.close();

    expectedException.expect(IOException.class);

    fs2.exists(new Path("/SomePath/That/Doesnt/Matter"));
}

From source file:com.hadoop.mapreduce.TestLzoTextInputFormat.java

License:Open Source License

/**
 * Generate random data, compress it, index and md5 hash the data.
 * Then read it all back and md5 that too, to verify that it all went ok.
 * /*from   w  w w .  ja  v a2  s.  c  o  m*/
 * @param testWithIndex Should we index or not?
 * @param charsToOutput How many characters of random data should we output.
 * @throws IOException
 * @throws NoSuchAlgorithmException
 * @throws InterruptedException
 */
private void runTest(boolean testWithIndex, int charsToOutput)
        throws IOException, NoSuchAlgorithmException, InterruptedException {

    if (!GPLNativeCodeLoader.isNativeCodeLoaded()) {
        LOG.warn("Cannot run this test without the native lzo libraries");
        return;
    }

    Configuration conf = new Configuration();
    conf.setLong("fs.local.block.size", charsToOutput / 2);
    // reducing block size to force a split of the tiny file
    conf.set("io.compression.codecs", LzopCodec.class.getName());

    FileSystem localFs = FileSystem.getLocal(conf);
    localFs.delete(outputDir, true);
    localFs.mkdirs(outputDir);

    Job job = new Job(conf);
    TextOutputFormat.setCompressOutput(job, true);
    TextOutputFormat.setOutputCompressorClass(job, LzopCodec.class);
    TextOutputFormat.setOutputPath(job, outputDir);

    TaskAttemptContext attemptContext = new TaskAttemptContextImpl(job.getConfiguration(),
            new TaskAttemptID("123", 0, TaskType.REDUCE, 1, 2));

    // create some input data
    byte[] expectedMd5 = createTestInput(outputDir, localFs, attemptContext, charsToOutput);

    if (testWithIndex) {
        Path lzoFile = new Path(outputDir, lzoFileName);
        LzoTextInputFormat.createIndex(localFs, lzoFile);
    }

    LzoTextInputFormat inputFormat = new LzoTextInputFormat();
    TextInputFormat.setInputPaths(job, outputDir);

    List<InputSplit> is = inputFormat.getSplits(job);
    //verify we have the right number of lzo chunks
    if (testWithIndex && OUTPUT_BIG == charsToOutput) {
        assertEquals(3, is.size());
    } else {
        assertEquals(1, is.size());
    }

    // let's read it all and calculate the md5 hash
    for (InputSplit inputSplit : is) {
        RecordReader<LongWritable, Text> rr = inputFormat.createRecordReader(inputSplit, attemptContext);
        rr.initialize(inputSplit, attemptContext);

        while (rr.nextKeyValue()) {
            Text value = rr.getCurrentValue();

            md5.update(value.getBytes(), 0, value.getLength());
        }

        rr.close();
    }

    localFs.close();
    assertTrue(Arrays.equals(expectedMd5, md5.digest()));
}

From source file:com.ibm.bi.dml.runtime.transform.BinAgent.java

License:Open Source License

/**
 * Method to load transform metadata for all attributes
 * //from ww  w . jav a 2s . com
 * @param job
 * @throws IOException
 */
@Override
public void loadTxMtd(JobConf job, FileSystem fs, Path txMtdDir, TfUtils agents) throws IOException {
    if (_binList == null)
        return;

    if (fs.isDirectory(txMtdDir)) {
        for (int i = 0; i < _binList.length; i++) {
            int colID = _binList[i];

            Path path = new Path(txMtdDir + "/Bin/" + agents.getName(colID) + BIN_FILE_SUFFIX);
            TfUtils.checkValidInputFile(fs, path, true);

            BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(path)));
            // format: colID,min,max,nbins
            String[] fields = br.readLine().split(TXMTD_SEP);
            double min = UtilFunctions.parseToDouble(fields[1]);
            //double max = UtilFunctions.parseToDouble(fields[2]);
            double binwidth = UtilFunctions.parseToDouble(fields[3]);
            int nbins = UtilFunctions.parseToInt(fields[4]);

            _numBins[i] = nbins;
            _min[i] = min;
            _binWidths[i] = binwidth; // (max-min)/nbins;

            br.close();
        }
    } else {
        fs.close();
        throw new RuntimeException("Path to recode maps must be a directory: " + txMtdDir);
    }
}