List of usage examples for org.apache.hadoop.fs FileSystem close
@Override public void close() throws IOException
From source file:com.pivotal.hawq.mapreduce.MapReduceLocalDriver.java
License:Apache License
@Override public int run(String[] args) throws Exception { if (args.length != 2 && args.length != 3) { System.err.printf("Usage: %s [generic options] <metadata_file> <output> [<mapper_classname>]\n", getClass().getSimpleName()); ToolRunner.printGenericCommandUsage(System.err); return -1; }/*from w w w. ja va 2 s. com*/ String metadataFile = args[0]; Path outputPath = new Path(args[1]); Class<? extends Mapper> mapperClass = (args.length == 2) ? HAWQTableMapper.class : (Class<? extends Mapper>) Class.forName(args[2]); // delete previous output FileSystem fs = FileSystem.getLocal(getConf()); if (fs.exists(outputPath)) fs.delete(outputPath, true); fs.close(); Job job = new Job(getConf()); job.setJarByClass(MapReduceLocalDriver.class); job.setInputFormatClass(HAWQInputFormat.class); HAWQInputFormat.setInput(job.getConfiguration(), metadataFile); FileOutputFormat.setOutputPath(job, outputPath); job.setMapperClass(mapperClass); job.setReducerClass(HAWQTableReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); return job.waitForCompletion(true) ? 0 : 1; }
From source file:com.pivotal.hawq.mapreduce.pt.HAWQInputFormatPerformanceTest_TPCH.java
License:Apache License
private int runMapReduceJob() throws Exception { Path outputPath = new Path("/output"); // delete previous output FileSystem fs = FileSystem.get(getConf()); if (fs.exists(outputPath)) fs.delete(outputPath, true);/*from w w w .j ava 2s . co m*/ fs.close(); Job job = new Job(getConf()); job.setJarByClass(HAWQInputFormatPerformanceTest_TPCH.class); job.setInputFormatClass(HAWQInputFormat.class); long startTime = System.currentTimeMillis(); HAWQInputFormat.setInput(job.getConfiguration(), MRFormatConfiguration.TEST_DB_URL, null, null, tableName); metadataExtractTime = System.currentTimeMillis() - startTime; FileOutputFormat.setOutputPath(job, outputPath); job.setMapperClass(TPCHTableMapper.class); job.setNumReduceTasks(0); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Void.class); return job.waitForCompletion(true) ? 0 : 1; }
From source file:com.redsqirl.workflow.server.datatype.PigTestUtils.java
License:Open Source License
public static void createHDFSFile(Path p, String containt) throws IOException { FileSystem fileSystem = NameNodeVar.getFS(); // Check if the file already exists if (fileSystem.exists(p)) { if (fileSystem.listStatus(p).length > 0) { logger.warn("File " + p.toString() + " already exists"); return; }//w w w . j a v a 2 s. c om } else { fileSystem.mkdirs(p); } // Create a new file and write data to it. FSDataOutputStream out = fileSystem.create(new Path(p, "part-0000")); out.write(containt.getBytes()); out.close(); fileSystem.close(); }
From source file:com.redsqirl.workflow.server.datatype.PigTestUtils.java
License:Open Source License
public static void writeContent(Path p, String file, String content) throws IOException { FileSystem fs = NameNodeVar.getFS(); if (fs.exists(p)) { FSDataOutputStream out = fs.create(new Path(p, file)); out.write(content.getBytes());//from w w w. jav a 2 s . c om out.close(); fs.close(); } }
From source file:com.redsqirl.workflow.server.datatype.PigTestUtils.java
License:Open Source License
public static void createHFDSdir(String path) throws IOException { FileSystem fs = NameNodeVar.getFS(); Path p = new Path(path); if (fs.exists(p)) { logger.warn("Dir " + p.toString() + " already exists"); return;/*from w w w .j a va 2 s.co m*/ } fs.mkdirs(p); fs.close(); }
From source file:com.redsqirl.workflow.test.TestUtils.java
License:Open Source License
public static void createHDFSFile(Path p, String containt) throws IOException { FileSystem fileSystem = NameNodeVar.getFS(); // Check if the file already exists if (fileSystem.exists(p)) { logger.warn("File " + p.toString() + " already exists"); return;/*from w w w . ja v a2 s . c o m*/ } // Create a new file and write data to it. fileSystem.mkdirs(p); FSDataOutputStream out = fileSystem.create(new Path(p, "part-0000")); out.write(containt.getBytes()); out.close(); fileSystem.close(); }
From source file:com.rockstor.compact.RockIndexReader.java
License:Apache License
/** * @param args//from w w w.j a v a2 s . c om */ public static void main(String[] args) { RockAccessor.connectHDFS(); FileSystem dfs = RockAccessor.getFileSystem(); try { FileStatus[] fs = dfs.listStatus(new Path("/rockstor/tmp/gb_meta/")); RockIndexReader rir = null; for (FileStatus fx : fs) { try { rir = new RockIndexReader(); rir.open(fx.getPath().toString()); Chunk c = null; while (rir.hasNext()) { c = rir.next(); LOG.info(c); } } catch (Exception e) { e.printStackTrace(); } finally { if (rir != null) { rir.close(); rir = null; } } } } catch (IOException e) { e.printStackTrace(); } finally { try { dfs.close(); } catch (IOException e) { e.printStackTrace(); } } }
From source file:com.rockstor.compact.TaskMetaReader.java
License:Apache License
public static void main(String[] argv) { RockAccessor.connectHDFS();// ww w . ja va 2 s.co m FileSystem dfs = RockAccessor.getFileSystem(); try { FileStatus[] fs = dfs.listStatus(new Path("/rockstor/tmp/task")); TaskMetaReader rir = null; for (FileStatus fx : fs) { try { rir = new TaskMetaReader(); rir.open(fx.getPath().toString() + "/meta"); Map<String, byte[]> s = rir.getRocks(); LOG.info(fx.getPath().toString() + "/meta"); for (Map.Entry<String, byte[]> kv : s.entrySet()) { LOG.info(kv.getKey()); } } catch (Exception e) { e.printStackTrace(); } finally { if (rir != null) { rir.close(); rir = null; } } } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } finally { try { dfs.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } }
From source file:com.shmsoft.dmass.main.MRFreeEedProcess.java
License:Apache License
private void copyToHdfs(String from, String to) throws IOException { Configuration configuration = getConf(); FileSystem fileSystem = FileSystem.get(configuration); // Check if the file already exists Path path = new Path(to); if (fileSystem.exists(path)) { System.out.println("File " + to + " already exists"); return;// w ww . j av a 2s .c o m } // Create a new file and write data to it. FSDataOutputStream out = fileSystem.create(path); InputStream in = new BufferedInputStream(new FileInputStream(new File(from))); int numBytes = 0; while ((numBytes = in.read(b)) > 0) { out.write(b, 0, numBytes); } // Close all the file descripters in.close(); out.close(); fileSystem.close(); }
From source file:com.streamsets.pipeline.stage.destination.hdfs.writer.TestRecordWriter.java
License:Apache License
@Test public void testTextFile() throws Exception { FileSystem fs = getRawLocalFileSystem(); try {/*from w ww .j av a 2s. c o m*/ Path file = new Path(getTestDir(), "file.txt"); OutputStream os = fs.create(file, false); long timeToLive = 10000; long expires = System.currentTimeMillis() + timeToLive; RecordWriter writer = new RecordWriter(file, timeToLive, os, new DummyDataGeneratorFactory(null)); Assert.assertTrue(writer.isTextFile()); Assert.assertFalse(writer.isSeqFile()); Assert.assertEquals(file, writer.getPath()); Assert.assertTrue(expires <= writer.getExpiresOn()); Assert.assertTrue(writer.toString().contains(file.toString())); Record record = RecordCreator.create(); record.set(Field.create("a")); writer.write(record); record.set(Field.create("z")); writer.write(record); Assert.assertFalse(writer.isClosed()); writer.flush(); Assert.assertTrue(writer.getLength() > 2); Assert.assertEquals(2, writer.getRecords()); writer.close(); Assert.assertTrue(writer.isClosed()); try { writer.write(record); Assert.fail(); } catch (IOException ex) { //NOP } BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(file))); Assert.assertEquals("a", reader.readLine()); Assert.assertEquals("z", reader.readLine()); Assert.assertNull(reader.readLine()); reader.close(); } finally { fs.close(); } }