List of usage examples for org.apache.hadoop.fs FileSystem close
@Override public void close() throws IOException
From source file:org.apache.ignite.igfs.IgfsHadoopFileSystemIpcCacheSelfTest.java
License:Apache License
/** * Test how IPC cache map works.//from w ww . j a va 2 s . c o m * * @throws Exception If failed. */ @SuppressWarnings("unchecked") public void testIpcCache() throws Exception { Field cacheField = IgfsHadoopIpcIo.class.getDeclaredField("ipcCache"); cacheField.setAccessible(true); Field activeCntField = IgfsHadoopIpcIo.class.getDeclaredField("activeCnt"); activeCntField.setAccessible(true); Map<String, IgfsHadoopIpcIo> cache = (Map<String, IgfsHadoopIpcIo>) cacheField.get(null); String name = "igfs:" + getTestGridName(0) + "@"; Configuration cfg = new Configuration(); cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG)); cfg.setBoolean("fs.igfs.impl.disable.cache", true); cfg.setBoolean(String.format(IgfsHadoopUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, name), true); // Ensure that existing IO is reused. FileSystem fs1 = FileSystem.get(new URI("igfs://" + name + "/"), cfg); assertEquals(1, cache.size()); IgfsHadoopIpcIo io = null; System.out.println("CACHE: " + cache); for (String key : cache.keySet()) { if (key.contains("10500")) { io = cache.get(key); break; } } assert io != null; assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get()); // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped. FileSystem fs2 = FileSystem.get(new URI("igfs://" + name + "/abc"), cfg); assertEquals(1, cache.size()); assertEquals(2, ((AtomicInteger) activeCntField.get(io)).get()); fs2.close(); assertEquals(1, cache.size()); assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get()); Field stopField = IgfsHadoopIpcIo.class.getDeclaredField("stopping"); stopField.setAccessible(true); assert !(Boolean) stopField.get(io); // Ensure that IO is stopped when nobody else is need it. fs1.close(); assert cache.isEmpty(); assert (Boolean) stopField.get(io); }
From source file:org.apache.ignite.igfs.IgniteHadoopFileSystemAbstractSelfTest.java
License:Apache License
/** * Test how IPC cache map works./*from w w w.java 2 s. c o m*/ * * @throws Exception If failed. */ public void testIpcCache() throws Exception { HadoopIgfsEx hadoop = GridTestUtils.getFieldValue(fs, "rmtClient", "delegateRef", "value", "hadoop"); if (hadoop instanceof HadoopIgfsOutProc) { FileSystem fsOther = null; try { Field field = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache"); field.setAccessible(true); Map<String, HadoopIgfsIpcIo> cache = (Map<String, HadoopIgfsIpcIo>) field.get(null); Configuration cfg = configuration(PRIMARY_AUTHORITY, skipEmbed, skipLocShmem); // we disable caching in order to obtain new FileSystem instance. cfg.setBoolean("fs.igfs.impl.disable.cache", true); // Initial cache size. int initSize = cache.size(); // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped. fsOther = FileSystem.get(new URI(PRIMARY_URI), cfg); assert fs != fsOther; assertEquals(initSize, cache.size()); fsOther.close(); assertEquals(initSize, cache.size()); Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping"); stopField.setAccessible(true); HadoopIgfsIpcIo io = null; for (Map.Entry<String, HadoopIgfsIpcIo> ioEntry : cache.entrySet()) { if (endpoint.contains(ioEntry.getKey())) { io = ioEntry.getValue(); break; } } assert io != null; assert !(Boolean) stopField.get(io); // Ensure that IO is stopped when nobody else is need it. fs.close(); assertEquals(initSize - 1, cache.size()); assert (Boolean) stopField.get(io); } finally { U.closeQuiet(fsOther); } } }
From source file:org.apache.ignite.igfs.IgniteHadoopFileSystemAbstractSelfTest.java
License:Apache License
/** @throws Exception If failed. */ public void testCloseIfNotInitialized() throws Exception { final FileSystem fs = new IgniteHadoopFileSystem(); // Check close makes nothing harmful. fs.close(); }
From source file:org.apache.ignite.igfs.IgniteHadoopFileSystemIpcCacheSelfTest.java
License:Apache License
/** * Test how IPC cache map works.//from w w w. j a v a 2 s . c om * * @throws Exception If failed. */ @SuppressWarnings("unchecked") public void testIpcCache() throws Exception { Field cacheField = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache"); cacheField.setAccessible(true); Field activeCntField = HadoopIgfsIpcIo.class.getDeclaredField("activeCnt"); activeCntField.setAccessible(true); Map<String, HadoopIgfsIpcIo> cache = (Map<String, HadoopIgfsIpcIo>) cacheField.get(null); String name = "igfs:" + getTestGridName(0) + "@"; Configuration cfg = new Configuration(); cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG)); cfg.setBoolean("fs.igfs.impl.disable.cache", true); cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, name), true); // Ensure that existing IO is reused. FileSystem fs1 = FileSystem.get(new URI("igfs://" + name + "/"), cfg); assertEquals(1, cache.size()); HadoopIgfsIpcIo io = null; System.out.println("CACHE: " + cache); for (String key : cache.keySet()) { if (key.contains("10500")) { io = cache.get(key); break; } } assert io != null; assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get()); // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped. FileSystem fs2 = FileSystem.get(new URI("igfs://" + name + "/abc"), cfg); assertEquals(1, cache.size()); assertEquals(2, ((AtomicInteger) activeCntField.get(io)).get()); fs2.close(); assertEquals(1, cache.size()); assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get()); Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping"); stopField.setAccessible(true); assert !(Boolean) stopField.get(io); // Ensure that IO is stopped when nobody else is need it. fs1.close(); assert cache.isEmpty(); assert (Boolean) stopField.get(io); }
From source file:org.apache.ignite.internal.processors.hadoop.impl.client.HadoopClientProtocolMultipleServersSelfTest.java
License:Apache License
/** * @throws Exception If failed./*from w w w.j a v a 2s. c om*/ */ @SuppressWarnings({ "ConstantConditions", "ThrowableResultOfMethodCallIgnored" }) public void testSingleAddress() throws Exception { try { // Don't use REST_PORT to test connection fails if the only this port is configured restPort = REST_PORT + 1; startGrids(gridCount()); GridTestUtils.assertThrowsAnyCause(log, new Callable<Object>() { @Override public Object call() throws Exception { checkJobSubmit(configSingleAddress()); return null; } }, GridServerUnreachableException.class, "Failed to connect to any of the servers in list"); } finally { FileSystem fs = FileSystem.get(configSingleAddress()); fs.close(); } }
From source file:org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopFIleSystemFactorySelfTest.java
License:Apache License
/** * Test custom factory./*from w w w . j a v a 2s . c o m*/ * * @throws Exception If failed. */ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") public void testCustomFactory() throws Exception { assert START_CNT.get() == 1; assert STOP_CNT.get() == 0; // Use IGFS directly. primary.mkdirs(IGFS_PATH_DUAL); assert primary.exists(IGFS_PATH_DUAL); assert secondary.exists(IGFS_PATH_DUAL); // Create remote instance. FileSystem fs = FileSystem.get(URI.create("igfs://primary@127.0.0.1:10500/"), baseConfiguration()); assertEquals(1, START_CNT.get()); assertEquals(0, STOP_CNT.get()); // Check file system operations. assert fs.exists(PATH_DUAL); assert fs.delete(PATH_DUAL, true); assert !primary.exists(IGFS_PATH_DUAL); assert !secondary.exists(IGFS_PATH_DUAL); assert !fs.exists(PATH_DUAL); assert fs.mkdirs(PATH_DUAL); assert primary.exists(IGFS_PATH_DUAL); assert secondary.exists(IGFS_PATH_DUAL); assert fs.exists(PATH_DUAL); assert fs.mkdirs(PATH_PROXY); assert secondary.exists(IGFS_PATH_PROXY); assert fs.exists(PATH_PROXY); fs.close(); assertEquals(1, START_CNT.get()); assertEquals(0, STOP_CNT.get()); // Stop primary node and ensure that base factory was notified. G.stop(primary.context().kernalContext().grid().name(), true); assertEquals(1, START_CNT.get()); assertEquals(1, STOP_CNT.get()); }
From source file:org.apache.ignite.internal.processors.hadoop.impl.igfs.IgniteHadoopFileSystemAbstractSelfTest.java
License:Apache License
/** * Test how IPC cache map works.// ww w . j a v a 2s. c o m * * @throws Exception If failed. */ public void testIpcCache() throws Exception { HadoopIgfsEx hadoop = GridTestUtils.getFieldValue(fs, "rmtClient", "delegateRef", "value", "hadoop"); if (hadoop instanceof HadoopIgfsOutProc) { FileSystem fsOther = null; try { Field field = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache"); field.setAccessible(true); Map<String, HadoopIgfsIpcIo> cache = (Map<String, HadoopIgfsIpcIo>) field.get(null); Configuration cfg = configuration(PRIMARY_AUTHORITY, skipEmbed, skipLocShmem); // we disable caching in order to obtain new FileSystem instance. cfg.setBoolean("fs.igfs.impl.disable.cache", true); // Initial cache size. int initSize = cache.size(); // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped. fsOther = FileSystem.get(new URI(PRIMARY_URI), cfg); assert fs != fsOther; assertEquals(initSize, cache.size()); fsOther.close(); assertEquals(initSize, cache.size()); Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping"); stopField.setAccessible(true); HadoopIgfsIpcIo io = null; for (Map.Entry<String, HadoopIgfsIpcIo> ioEntry : cache.entrySet()) { if (endpoint.contains(ioEntry.getKey())) { io = ioEntry.getValue(); break; } } assert io != null; assert !(Boolean) stopField.get(io); // Ensure that IO is stopped when nobody else is need it. fs.close(); assert initSize >= cache.size(); assert (Boolean) stopField.get(io); } finally { U.closeQuiet(fsOther); } } }
From source file:org.apache.ignite.internal.processors.hadoop.impl.igfs.IgniteHadoopFileSystemIpcCacheSelfTest.java
License:Apache License
/** * Test how IPC cache map works.//ww w. ja v a 2 s. co m * * @throws Exception If failed. */ @SuppressWarnings("unchecked") public void testIpcCache() throws Exception { Field cacheField = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache"); cacheField.setAccessible(true); Field activeCntField = HadoopIgfsIpcIo.class.getDeclaredField("activeCnt"); activeCntField.setAccessible(true); Map<String, HadoopIgfsIpcIo> cache = (Map<String, HadoopIgfsIpcIo>) cacheField.get(null); cache.clear(); // avoid influence of previous tests in the same process. String name = "igfs:" + getTestIgniteInstanceName(0) + "@"; Configuration cfg = new Configuration(); cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG)); cfg.setBoolean("fs.igfs.impl.disable.cache", true); cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, name), true); // Ensure that existing IO is reused. FileSystem fs1 = FileSystem.get(new URI("igfs://" + name + "/"), cfg); assertEquals(1, cache.size()); HadoopIgfsIpcIo io = null; System.out.println("CACHE: " + cache); for (String key : cache.keySet()) { if (key.contains("10500")) { io = cache.get(key); break; } } assert io != null; assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get()); // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped. FileSystem fs2 = FileSystem.get(new URI("igfs://" + name + "/abc"), cfg); assertEquals(1, cache.size()); assertEquals(2, ((AtomicInteger) activeCntField.get(io)).get()); fs2.close(); assertEquals(1, cache.size()); assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get()); Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping"); stopField.setAccessible(true); assert !(Boolean) stopField.get(io); // Ensure that IO is stopped when nobody else is need it. fs1.close(); assert cache.isEmpty(); assert (Boolean) stopField.get(io); }
From source file:org.apache.ignite.loadtests.igfs.IgfsPerformanceBenchmark.java
License:Apache License
/** * Starts benchmark.//from ww w.ja va 2s. c o m * * @param args Program arguments. * [0] - number of threads, default 1. * [1] - file length, default is 1GB. * [2] - stream buffer size, default is 1M. * [3] - fs config path. * @throws Exception If failed. */ public static void main(String[] args) throws Exception { final int threadNum = intArgument(args, 0, 1); final int op = intArgument(args, 1, OP_WRITE); final long fileLen = longArgument(args, 2, 256 * 1024 * 1024); final int bufSize = intArgument(args, 3, 128 * 1024); final String cfgPath = argument(args, 4, HADOOP_FS_CFG); final String fsPrefix = argument(args, 5, FS_PREFIX); final short replication = (short) intArgument(args, 6, 3); final Path igfsHome = new Path(fsPrefix); final FileSystem fs = igfs(igfsHome, cfgPath); final AtomicLong progress = new AtomicLong(); final AtomicInteger idx = new AtomicInteger(); System.out.println("Warming up..."); // warmUp(fs, igfsHome, op, fileLen); System.out.println("Finished warm up."); if (op == OP_READ) { for (int i = 0; i < threadNum; i++) benchmarkWrite(fs, new Path(igfsHome, "in-" + i), fileLen, bufSize, replication, null); } long total = 0; long start = System.currentTimeMillis(); IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() { @Override public void run() { String fileIdx = op == OP_READ ? String.valueOf(idx.getAndIncrement()) : UUID.randomUUID().toString(); try { for (int i = 0; i < 200; i++) { if (op == OP_WRITE) benchmarkWrite(fs, new Path(igfsHome, "out-" + fileIdx), fileLen, bufSize, replication, progress); else benchmarkRead(fs, new Path(igfsHome, "in-" + fileIdx), bufSize, progress); } System.out.println("Finished " + (op == OP_WRITE ? "writing" : "reading") + " data."); } catch (Exception e) { System.out.println("Failed to process stream: " + e); e.printStackTrace(); } } }, threadNum, "test-runner"); while (!fut.isDone()) { U.sleep(1000); long written = progress.getAndSet(0); total += written; int mbytesPerSec = (int) (written / (1024 * 1024)); System.out.println((op == OP_WRITE ? "Write" : "Read") + " rate [threads=" + threadNum + ", bufSize=" + bufSize + ", MBytes/s=" + mbytesPerSec + ']'); } long now = System.currentTimeMillis(); System.out.println((op == OP_WRITE ? "Written" : "Read") + " " + total + " bytes in " + (now - start) + "ms, avg write rate is " + (total * 1000 / ((now - start) * 1024 * 1024)) + "MBytes/s"); fs.close(); }
From source file:org.apache.kylin.monitor.ApiRequestParser.java
License:Apache License
public void parseRequestInit() throws IOException { logger.info("parse api request initializing..."); FileSystem fs = null; try {/* w w w . j a va2 s.co m*/ Configuration conf = new Configuration(); fs = FileSystem.get(conf); org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path( ApiRequestParser.REQUEST_PARSE_RESULT_PATH); if (!fs.exists(path)) { fs.create(path); // need to close before get FileSystem again fs.close(); this.writeResultToHdfs(ApiRequestParser.REQUEST_PARSE_RESULT_PATH, ApiRequestParser.KYLIN_REQUEST_CSV_HEADER); } } catch (Exception e) { fs.close(); logger.info("Failed to init:", e); } }