List of usage examples for org.apache.hadoop.fs FileSystem close
@Override public void close() throws IOException
From source file:org.sf.xrime.algorithms.layout.radialtree.RadialTreeVisitReducer.java
License:Apache License
/** * Record an edge emits in file system. We create a directory in file system as a flag. * RadialTreeStep will check the directory, if it is existed, we need a next step in RadialTree algorithm. * If no edge emitted, we have visited all reachable elements in the graph. The algorithm will end. * This is a communication mechanism in Map/Reduce to indicate a event. * @throws IOException indicate error in creating directory. *///w w w.j av a 2s. co m private void recordContinue() throws IOException { if (changeFlag) { // we only need to create the directory once. return; } changeFlag = true; String continueFile = context.getParameter(RadialTreeStep1.continueFileKey); if (continueFile != null) { // create the directory. FileSystem client = FileSystem.get(job); client.mkdirs(new Path(continueFile)); client.close(); } }
From source file:org.sf.xrime.algorithms.layout.radialtree.RadialTreeWeightReducer.java
License:Apache License
/** * Record an edge emits in file system. We create a directory in file system as a flag. * RadialTreeStep will check the directory, if it is existed, we need a next step in RadialTree algorithm. * If no edge emitted, we have visited all reachable elements in the graph. The algorithm will end. * This is a communication mechanism in Map/Reduce to indicate a event. * @throws IOException indicate error in creating directory. *//*from w w w .j ava2 s . c o m*/ private void recordContinue() throws IOException { if (changeFlag) { // we only need to create the directory once. return; } changeFlag = true; String continueFile = context.getParameter(RadialTreeStep2.continueFileKeyWeight); if (continueFile != null) { // create the directory. FileSystem client = FileSystem.get(job); client.mkdirs(new Path(continueFile)); client.close(); } }
From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java
License:Open Source License
/** * Close the DFS quietly//w ww.j a v a 2 s . c o m * * @param dfs the dfs reference; can be null */ public static void closeQuietly(FileSystem dfs) { if (dfs != null) { try { dfs.close(); } catch (IOException e) { LogFactory.getLog(DfsUtils.class).info("Failed to close DFS", e); } } }
From source file:org.smartfrog.services.hadoop.operations.utils.DfsUtils.java
License:Open Source License
/** * This is the non-quiet close operation * * @param dfs filesystem//from www. jav a 2 s . c om * @throws SmartFrogRuntimeException if the filesystem does not close */ public static void closeDfs(FileSystem dfs) throws SmartFrogRuntimeException { try { dfs.close(); } catch (IOException e) { if (isFilesystemClosedException(e)) { LogFactory.getLog(DfsUtils.class).info("DFS has already closed", e); } else { throw (SmartFrogRuntimeException) SmartFrogRuntimeException .forward(ERROR_FAILED_TO_CLOSE + dfs.getUri(), e); } } }
From source file:org.springframework.batch.integration.x.RemoteFileToHadoopTaskletTests.java
License:Apache License
@Test @SuppressWarnings({ "unchecked", "rawtypes" }) public void testWrite() throws Exception { File file = new File(tmpDir, "foo.txt"); file.delete();// w w w. j ava2 s. c o m ByteArrayInputStream data = new ByteArrayInputStream("foobarbaz".getBytes()); Session session = mock(Session.class); SessionFactory factory = mock(SessionFactory.class); when(factory.getSession()).thenReturn(session); when(session.readRaw("foo.txt")).thenReturn(data); when(session.finalizeRaw()).thenReturn(true); StepExecution stepExecution = new StepExecution("foo", null); ExecutionContext stepExecutionContext = new ExecutionContext(); stepExecutionContext.putString("filePath", "foo.txt"); stepExecution.setExecutionContext(stepExecutionContext); StepContext stepContext = new StepContext(stepExecution); ChunkContext chunkContext = new ChunkContext(stepContext); RemoteFileTemplate template = new RemoteFileTemplate(factory); template.setBeanFactory(mock(BeanFactory.class)); template.afterPropertiesSet(); // clean up from old tests FileSystem fs = FileSystem.get(configuration); Path p = new Path("/qux/foo.txt"); fs.delete(p, true); assertFalse(fs.exists(p)); RemoteFileToHadoopTasklet tasklet = new RemoteFileToHadoopTasklet(template, configuration, "/qux"); assertEquals(RepeatStatus.FINISHED, tasklet.execute(null, chunkContext)); assertTrue(fs.exists(p)); FSDataInputStream stream = fs.open(p); byte[] out = new byte[9]; stream.readFully(out); stream.close(); assertEquals("foobarbaz", new String(out)); fs.close(); }
From source file:org.springframework.data.hadoop.mapreduce.ExecutionUtils.java
License:Apache License
/** * Most jars don't close the file system. * /*from w w w . ja v a 2 s .com*/ * @param cfg */ static void shutdownFileSystem(Configuration cfg) { FileSystem fs; try { fs = FileSystem.get(cfg); if (fs != null) { fs.close(); } } catch (Exception ex) { } try { fs = FileSystem.getLocal(cfg); if (fs != null) { fs.close(); } } catch (Exception ex) { } }
From source file:org.wso2.carbon.hdfs.dataaccess.DataAccessService.java
License:Open Source License
/** * Close the HDFS file system connecion with HDFS cluster * * @param fileSystem/*from w w w. j a va 2 s .c o m*/ * @throws IOException */ public void unmountFileSystem(FileSystem fileSystem) throws IOException { fileSystem.close(); }
From source file:org.wso2.carbon.hdfs.mgt.HDFSAdmin.java
License:Open Source License
/** * Use this method to close the the HDFS instance obtained for the user. *///from w w w . jav a2s .c o m public void closeHDFSInstance() { FileSystem hdfsFS = null; try { hdfsFS = TenantUserFSCache.getInstance() .getFSforUser(HDFSAdminHelper.getInstance().getCurrentUserHomeFolder()); if (hdfsFS != null) { hdfsFS.close(); } } catch (IOException e) { log.error("error occured when closing file system instance", e); } }
From source file:pack.block.blockstore.hdfs.HdfsMiniClusterUtil.java
License:Apache License
public static void shutdownDfs(MiniDFSCluster cluster) { if (cluster != null) { LOGGER.info("Shutting down Mini DFS"); try {//from w w w . java2 s . c o m cluster.shutdown(); } catch (Exception e) { // / Can get a java.lang.reflect.UndeclaredThrowableException thrown // here because of an InterruptedException. Don't let exceptions in // here be cause of test failure. } try { FileSystem fs = cluster.getFileSystem(); if (fs != null) { LOGGER.info("Shutting down FileSystem"); fs.close(); } FileSystem.closeAll(); } catch (IOException e) { LOGGER.error("error closing file system", e); } // This has got to be one of the worst hacks I have ever had to do. // This is needed to shutdown 2 thread pools that are not shutdown by // themselves. ThreadGroup threadGroup = Thread.currentThread().getThreadGroup(); Thread[] threads = new Thread[100]; int enumerate = threadGroup.enumerate(threads); for (int i = 0; i < enumerate; i++) { Thread thread = threads[i]; if (thread.getName().startsWith("pool")) { if (thread.isAlive()) { thread.interrupt(); LOGGER.info("Stopping ThreadPoolExecutor {}", thread.getName()); Object target = getField(Thread.class, thread, "target"); if (target != null) { ThreadPoolExecutor e = (ThreadPoolExecutor) getField(ThreadPoolExecutor.class, target, "this$0"); if (e != null) { e.shutdownNow(); } } try { LOGGER.info("Waiting for thread pool to exit {}", thread.getName()); thread.join(); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } }
From source file:ras.test.hadoop.fs.InMemoryFileSystem.java
License:Apache License
/** * A simple helper method for testing, which creates a file consisting of a * supplied content string./* w ww . j a v a2 s . c o m*/ * * @param fs * The file system on which the file is to be created. * @param path * The path of the file to be created. Must not be {@code null}. * @param contents * the contents to be stored in the file. Must not be * {@code null}. * @throws IOException */ public static void createFile(FileSystem fs, Path path, String contents) throws IOException { Validate.notNull(fs, "fs == null not allowed!"); Validate.notNull(contents, "contents == null not allowed!"); FSDataOutputStream out = fs.create(path); try { out.writeBytes(contents); } finally { out.close(); fs.close(); } }