Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:org.apache.ignite.igfs.IgfsHadoopDualAbstractSelfTest.java

License:Apache License

/**
 * Check how prefetch override works.//from  w  w  w . ja va2s . c  o  m
 *
 * @throws Exception IF failed.
 */
public void testOpenPrefetchOverride() throws Exception {
    create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));

    // Write enough data to the secondary file system.
    final int blockSize = IGFS_BLOCK_SIZE;

    IgfsOutputStream out = igfsSecondary.append(FILE, false);

    int totalWritten = 0;

    while (totalWritten < blockSize * 2 + chunk.length) {
        out.write(chunk);

        totalWritten += chunk.length;
    }

    out.close();

    awaitFileClose(igfsSecondary, FILE);

    // Instantiate file system with overridden "seq reads before prefetch" property.
    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));

    int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;

    cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads);

    FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);

    // Read the first two blocks.
    Path fsHome = new Path(PRIMARY_URI);
    Path dir = new Path(fsHome, DIR.name());
    Path subdir = new Path(dir, SUBDIR.name());
    Path file = new Path(subdir, FILE.name());

    FSDataInputStream fsIn = fs.open(file);

    final byte[] readBuf = new byte[blockSize * 2];

    fsIn.readFully(0, readBuf, 0, readBuf.length);

    // Wait for a while for prefetch to finish (if any).
    IgfsMetaManager meta = igfs.context().meta();

    IgfsFileInfo info = meta.info(meta.fileId(FILE));

    IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);

    GridCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache()
            .cache(igfs.configuration().getDataCacheName());

    for (int i = 0; i < 10; i++) {
        if (dataCache.containsKey(key))
            break;
        else
            U.sleep(100);
    }

    fsIn.close();

    // Remove the file from the secondary file system.
    igfsSecondary.delete(FILE, false);

    // Try reading the third block. Should fail.
    GridTestUtils.assertThrows(log, new Callable<Object>() {
        @Override
        public Object call() throws Exception {
            IgfsInputStream in0 = igfs.open(FILE);

            in0.seek(blockSize * 2);

            try {
                in0.read(readBuf);
            } finally {
                U.closeQuiet(in0);
            }

            return null;
        }
    }, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file");
}

From source file:org.apache.ignite.igfs.IgfsHadoopFileSystemAbstractSelfTest.java

License:Apache License

/**
 * Gets instance of Hadoop local file system.
 *
 * @param home File system home.//from w w  w .  j  av a 2 s . c o m
 * @return File system.
 * @throws IOException If failed.
 */
private FileSystem local(Path home) throws IOException {
    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));

    return FileSystem.get(home.toUri(), cfg);
}

From source file:org.apache.ignite.igfs.IgfsHadoopFileSystemIpcCacheSelfTest.java

License:Apache License

/**
 * Test how IPC cache map works.//  w w  w.  j a  va2  s  .  co m
 *
 * @throws Exception If failed.
 */
@SuppressWarnings("unchecked")
public void testIpcCache() throws Exception {
    Field cacheField = IgfsHadoopIpcIo.class.getDeclaredField("ipcCache");

    cacheField.setAccessible(true);

    Field activeCntField = IgfsHadoopIpcIo.class.getDeclaredField("activeCnt");

    activeCntField.setAccessible(true);

    Map<String, IgfsHadoopIpcIo> cache = (Map<String, IgfsHadoopIpcIo>) cacheField.get(null);

    String name = "igfs:" + getTestGridName(0) + "@";

    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));
    cfg.setBoolean("fs.igfs.impl.disable.cache", true);
    cfg.setBoolean(String.format(IgfsHadoopUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, name), true);

    // Ensure that existing IO is reused.
    FileSystem fs1 = FileSystem.get(new URI("igfs://" + name + "/"), cfg);

    assertEquals(1, cache.size());

    IgfsHadoopIpcIo io = null;

    System.out.println("CACHE: " + cache);

    for (String key : cache.keySet()) {
        if (key.contains("10500")) {
            io = cache.get(key);

            break;
        }
    }

    assert io != null;

    assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get());

    // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped.
    FileSystem fs2 = FileSystem.get(new URI("igfs://" + name + "/abc"), cfg);

    assertEquals(1, cache.size());
    assertEquals(2, ((AtomicInteger) activeCntField.get(io)).get());

    fs2.close();

    assertEquals(1, cache.size());
    assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get());

    Field stopField = IgfsHadoopIpcIo.class.getDeclaredField("stopping");

    stopField.setAccessible(true);

    assert !(Boolean) stopField.get(io);

    // Ensure that IO is stopped when nobody else is need it.
    fs1.close();

    assert cache.isEmpty();

    assert (Boolean) stopField.get(io);
}

From source file:org.apache.ignite.igfs.IgfsHadoopFileSystemLoggerStateSelfTest.java

License:Apache License

/**
 * Instantiate new file system./*from w  ww .  j a  va2s  . co  m*/
 *
 * @return New file system.
 * @throws Exception If failed.
 */
private IgfsHadoopFileSystem fileSystem() throws Exception {
    Configuration fsCfg = new Configuration();

    fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));

    fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);

    if (logging)
        fsCfg.setBoolean(String.format(PARAM_IGFS_LOG_ENABLED, "igfs:igfs-grid@"), logging);

    fsCfg.setStrings(String.format(PARAM_IGFS_LOG_DIR, "igfs:igfs-grid@"), U.getIgniteHome());

    return (IgfsHadoopFileSystem) FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg);
}

From source file:org.apache.ignite.igfs.IgfsHadoopFileSystemSecondaryModeSelfTest.java

License:Apache License

/**
 * Perform initial startup.//ww w . jav  a2 s .c  om
 *
 * @throws Exception If failed.
 */
@SuppressWarnings("NullableProblems")
private void startUp() throws Exception {
    startUpSecondary();

    IgfsConfiguration igfsCfg = new IgfsConfiguration();

    igfsCfg.setDataCacheName("partitioned");
    igfsCfg.setMetaCacheName("replicated");
    igfsCfg.setName("igfs");
    igfsCfg.setBlockSize(512 * 1024);
    igfsCfg.setDefaultMode(mode);
    igfsCfg.setPathModes(pathModes);
    igfsCfg.setIpcEndpointConfiguration(new HashMap<String, String>() {
        {
            put("type", "tcp");
            put("port", "10500");
        }
    });

    igfsCfg.setManagementPort(-1);
    igfsCfg.setSecondaryFileSystem(
            new IgfsHadoopFileSystemWrapper("igfs://igfs-secondary:igfs-grid-secondary@127.0.0.1:11500/",
                    "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml"));

    CacheConfiguration cacheCfg = defaultCacheConfiguration();

    cacheCfg.setName("partitioned");
    cacheCfg.setCacheMode(PARTITIONED);
    cacheCfg.setDistributionMode(CacheDistributionMode.PARTITIONED_ONLY);
    cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
    cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
    cacheCfg.setBackups(0);
    cacheCfg.setQueryIndexEnabled(false);
    cacheCfg.setAtomicityMode(TRANSACTIONAL);

    CacheConfiguration metaCacheCfg = defaultCacheConfiguration();

    metaCacheCfg.setName("replicated");
    metaCacheCfg.setCacheMode(REPLICATED);
    metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
    metaCacheCfg.setQueryIndexEnabled(false);
    metaCacheCfg.setAtomicityMode(TRANSACTIONAL);

    IgniteConfiguration cfg = new IgniteConfiguration();

    cfg.setGridName("igfs-grid");

    TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();

    discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));

    cfg.setDiscoverySpi(discoSpi);
    cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
    cfg.setIgfsConfiguration(igfsCfg);

    cfg.setLocalHost("127.0.0.1");

    G.start(cfg);

    Configuration fsCfg = new Configuration();

    fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));

    fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);

    fs = (IgfsHadoopFileSystem) FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg);
}

From source file:org.apache.ignite.igfs.IgfsLoad.java

License:Apache License

/**
 * Executes read/write/delete operations.
 *
 * @param url File system url./*from  w ww.  j  a  v  a2  s. c o  m*/
 * @param hadoopCfg Hadoop configuration.
 * @param primaryOnly If {@code true} then creates files only on directory named 'primary'.
 * @param threads Threads number.
 * @param files Files number.
 * @param reads Reads number.
 * @param writes Writes number.
 * @param deletes Deletes number.
 * @param minSize Min file size.
 * @param maxSize Max file size.
 * @param delay Delay between operations.
 * @throws Exception If some file system operation failed.
 */
@SuppressWarnings("IfMayBeConditional")
public void runLoad(String url, String hadoopCfg, final boolean primaryOnly, int threads, int files,
        final int reads, final int writes, final int deletes, final int minSize, final int maxSize,
        final long delay) throws Exception {
    Path fsPath = new Path(url);

    Configuration cfg = new Configuration(true);

    cfg.addResource(U.resolveIgniteUrl(hadoopCfg));

    final FileSystem fs = FileSystem.get(fsPath.toUri(), cfg);

    Path workDir = new Path(fsPath, "/fsload");

    fs.delete(workDir, true);

    fs.mkdirs(workDir, FsPermission.getDefault());

    final Path[] dirs;

    if (primaryOnly)
        dirs = new Path[] { mkdir(fs, workDir, DIR_PRIMARY_MODE) };
    else
        dirs = new Path[] { mkdir(fs, workDir, DIR_PRIMARY_MODE), mkdir(fs, workDir, DIR_PROXY_MODE),
                mkdir(fs, workDir, DIR_DUAL_SYNC_MODE), mkdir(fs, workDir, DIR_DUAL_ASYNC_MODE) };

    try {
        ExecutorService exec = Executors.newFixedThreadPool(threads);

        Collection<Future<?>> futs = new ArrayList<>(threads);

        for (int i = 0; i < threads; i++) {
            final int filesPerThread;

            if (i == 0 && files % threads != 0)
                filesPerThread = files / threads + files % threads;
            else
                filesPerThread = files / threads;

            futs.add(exec.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    runLoad(fs, dirs, filesPerThread, reads, writes, deletes, minSize, maxSize, delay);

                    return null;
                }
            }));
        }

        exec.shutdown();

        for (Future<?> fut : futs) {
            try {
                fut.get();
            } catch (ExecutionException e) {
                X.error("Error during execution: " + e);

                e.getCause().printStackTrace();
            }
        }
    } finally {
        try {
            fs.delete(workDir, true);
        } catch (IOException ignored) {
            // Ignore.
        }
    }
}

From source file:org.apache.ignite.igfs.IgfsNearOnlyMultiNodeSelfTest.java

License:Apache License

/**
 * Gets config of concrete File System./*ww  w.  j ava2s.  c o m*/
 *
 * @return Config of concrete File System.
 */
protected Configuration getFileSystemConfig() {
    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));

    return cfg;
}

From source file:org.apache.ignite.igfs.IgniteHadoopFileSystemIpcCacheSelfTest.java

License:Apache License

/**
 * Test how IPC cache map works./*  w  ww.  j av a2  s.co m*/
 *
 * @throws Exception If failed.
 */
@SuppressWarnings("unchecked")
public void testIpcCache() throws Exception {
    Field cacheField = HadoopIgfsIpcIo.class.getDeclaredField("ipcCache");

    cacheField.setAccessible(true);

    Field activeCntField = HadoopIgfsIpcIo.class.getDeclaredField("activeCnt");

    activeCntField.setAccessible(true);

    Map<String, HadoopIgfsIpcIo> cache = (Map<String, HadoopIgfsIpcIo>) cacheField.get(null);

    String name = "igfs:" + getTestGridName(0) + "@";

    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(HADOOP_FS_CFG));
    cfg.setBoolean("fs.igfs.impl.disable.cache", true);
    cfg.setBoolean(String.format(HadoopIgfsUtils.PARAM_IGFS_ENDPOINT_NO_EMBED, name), true);

    // Ensure that existing IO is reused.
    FileSystem fs1 = FileSystem.get(new URI("igfs://" + name + "/"), cfg);

    assertEquals(1, cache.size());

    HadoopIgfsIpcIo io = null;

    System.out.println("CACHE: " + cache);

    for (String key : cache.keySet()) {
        if (key.contains("10500")) {
            io = cache.get(key);

            break;
        }
    }

    assert io != null;

    assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get());

    // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not stopped.
    FileSystem fs2 = FileSystem.get(new URI("igfs://" + name + "/abc"), cfg);

    assertEquals(1, cache.size());
    assertEquals(2, ((AtomicInteger) activeCntField.get(io)).get());

    fs2.close();

    assertEquals(1, cache.size());
    assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get());

    Field stopField = HadoopIgfsIpcIo.class.getDeclaredField("stopping");

    stopField.setAccessible(true);

    assert !(Boolean) stopField.get(io);

    // Ensure that IO is stopped when nobody else is need it.
    fs1.close();

    assert cache.isEmpty();

    assert (Boolean) stopField.get(io);
}

From source file:org.apache.ignite.igfs.IgniteHadoopFileSystemLoggerStateSelfTest.java

License:Apache License

/**
 * Instantiate new file system./*from   w  w w.ja v  a  2s .co m*/
 *
 * @return New file system.
 * @throws Exception If failed.
 */
private IgniteHadoopFileSystem fileSystem() throws Exception {
    Configuration fsCfg = new Configuration();

    fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));

    fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);

    if (logging)
        fsCfg.setBoolean(String.format(PARAM_IGFS_LOG_ENABLED, "igfs:igfs-grid@"), logging);

    fsCfg.setStrings(String.format(PARAM_IGFS_LOG_DIR, "igfs:igfs-grid@"), U.getIgniteHome());

    return (IgniteHadoopFileSystem) FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg);
}

From source file:org.apache.ignite.igfs.IgniteHadoopFileSystemSecondaryFileSystemInitializationSelfTest.java

License:Apache License

/**
 * Perform initial startup.//from   ww w .  jav  a  2s . c o  m
 *
 * @param initDfltPathModes WHether to initialize default path modes.
 * @throws Exception If failed.
 */
@SuppressWarnings({ "NullableProblems", "unchecked" })
private void startUp(boolean initDfltPathModes) throws Exception {
    startUpSecondary();

    FileSystemConfiguration igfsCfg = new FileSystemConfiguration();

    igfsCfg.setDataCacheName("partitioned");
    igfsCfg.setMetaCacheName("replicated");
    igfsCfg.setName("igfs");
    igfsCfg.setBlockSize(512 * 1024);
    igfsCfg.setInitializeDefaultPathModes(initDfltPathModes);

    IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();

    endpointCfg.setType(IgfsIpcEndpointType.TCP);
    endpointCfg.setPort(10500);

    igfsCfg.setIpcEndpointConfiguration(endpointCfg);

    igfsCfg.setManagementPort(-1);
    igfsCfg.setSecondaryFileSystem(new IgniteHadoopIgfsSecondaryFileSystem(
            "igfs://igfs-secondary:igfs-grid-secondary@127.0.0.1:11500/",
            "modules/core/src/test/config/hadoop/core-site-loopback-secondary.xml"));

    CacheConfiguration cacheCfg = defaultCacheConfiguration();

    cacheCfg.setName("partitioned");
    cacheCfg.setCacheMode(PARTITIONED);
    cacheCfg.setNearConfiguration(null);
    cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
    cacheCfg.setAffinityMapper(new IgfsGroupDataBlocksKeyMapper(128));
    cacheCfg.setBackups(0);
    cacheCfg.setAtomicityMode(TRANSACTIONAL);

    CacheConfiguration metaCacheCfg = defaultCacheConfiguration();

    metaCacheCfg.setName("replicated");
    metaCacheCfg.setCacheMode(REPLICATED);
    metaCacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
    metaCacheCfg.setAtomicityMode(TRANSACTIONAL);

    IgniteConfiguration cfg = new IgniteConfiguration();

    cfg.setGridName("igfs-grid");

    TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();

    discoSpi.setIpFinder(new TcpDiscoveryVmIpFinder(true));

    cfg.setDiscoverySpi(discoSpi);
    cfg.setCacheConfiguration(metaCacheCfg, cacheCfg);
    cfg.setFileSystemConfiguration(igfsCfg);

    cfg.setLocalHost("127.0.0.1");

    G.start(cfg);

    Configuration fsCfg = new Configuration();

    fsCfg.addResource(U.resolveIgniteUrl("modules/core/src/test/config/hadoop/core-site-loopback.xml"));

    fsCfg.setBoolean("fs.igfs.impl.disable.cache", true);

    fs = (IgniteHadoopFileSystem) FileSystem.get(new URI("igfs://igfs:igfs-grid@/"), fsCfg);
}