Example usage for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY

List of usage examples for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY.

Prototype

String FS_DEFAULT_NAME_KEY

To view the source code for org.apache.hadoop.fs FileSystem FS_DEFAULT_NAME_KEY.

Click Source Link

Usage

From source file:org.apache.drill.exec.dotdrill.TestDotDrillUtil.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS);
    dfs = new DrillFileSystem(conf);
    tempDir = dirTestWatcher.getTmpDir();
    tempPath = new Path(tempDir.getAbsolutePath());
}

From source file:org.apache.drill.exec.ExecTest.java

License:Apache License

/**
 * Creates instance of local file system.
 *
 * @return local file system/*from  ww w  . j a va2s .co  m*/
 */
public static FileSystem getLocalFileSystem() throws IOException {
    Configuration configuration = new Configuration();
    configuration.set(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS);
    return FileSystem.get(configuration);
}

From source file:org.apache.drill.exec.expr.fn.registry.RemoteFunctionRegistry.java

License:Apache License

/**
 * Creates if absent and validates three udf areas: STAGING, REGISTRY and TMP.
 * Generated udf ares root from {@link ExecConstants#UDF_DIRECTORY_ROOT},
 * if not set, uses user home directory instead.
 *//*from  ww w  .jav  a  2 s. c o  m*/
private void prepareAreas(DrillConfig config) {
    logger.info("Preparing three remote udf areas: staging, registry and tmp.");
    Configuration conf = new Configuration();
    if (config.hasPath(ExecConstants.UDF_DIRECTORY_FS)) {
        conf.set(FileSystem.FS_DEFAULT_NAME_KEY, config.getString(ExecConstants.UDF_DIRECTORY_FS));
    }

    try {
        this.fs = FileSystem.get(conf);
    } catch (IOException e) {
        DrillRuntimeException.format(e, "Error during file system %s setup",
                conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
    }

    String root = fs.getHomeDirectory().toUri().getPath();
    if (config.hasPath(ExecConstants.UDF_DIRECTORY_ROOT)) {
        root = config.getString(ExecConstants.UDF_DIRECTORY_ROOT);
    }

    this.registryArea = createArea(fs, root, config.getString(ExecConstants.UDF_DIRECTORY_REGISTRY));
    this.stagingArea = createArea(fs, root, config.getString(ExecConstants.UDF_DIRECTORY_STAGING));
    this.tmpArea = createArea(fs, root, config.getString(ExecConstants.UDF_DIRECTORY_TMP));
}

From source file:org.apache.drill.exec.hive.TestHiveStorage.java

License:Apache License

@Test
public void queryingTablesInNonDefaultFS() throws Exception {
    // Update the default FS settings in Hive test storage plugin to non-local FS
    hiveTest.updatePluginConfig(getDrillbitContext().getStorage(),
            ImmutableMap.of(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9001"));

    testBuilder().sqlQuery("SELECT * FROM hive.`default`.kv LIMIT 1").unOrdered()
            .baselineColumns("key", "value").baselineValues(1, " key_1").go();
}

From source file:org.apache.drill.exec.impersonation.BaseTestImpersonation.java

License:Apache License

protected static void addMiniDfsBasedStorage(final Map<String, WorkspaceConfig> workspaces) throws Exception {
    // Create a HDFS based storage plugin based on local storage plugin and add it to plugin registry (connection string
    // for mini dfs is varies for each run).
    final StoragePluginRegistry pluginRegistry = getDrillbitContext().getStorage();
    final FileSystemConfig lfsPluginConfig = (FileSystemConfig) pluginRegistry.getPlugin("dfs_test")
            .getConfig();/*w  ww  .  j  ava 2 s . co  m*/

    final FileSystemConfig miniDfsPluginConfig = new FileSystemConfig();
    miniDfsPluginConfig.connection = dfsConf.get(FileSystem.FS_DEFAULT_NAME_KEY);

    createAndAddWorkspace("tmp", "/tmp", (short) 0777, processUser, processUser, workspaces);

    miniDfsPluginConfig.workspaces = workspaces;
    miniDfsPluginConfig.formats = ImmutableMap.copyOf(lfsPluginConfig.formats);
    miniDfsPluginConfig.setEnabled(true);

    pluginRegistry.createOrUpdate(MINIDFS_STORAGE_PLUGIN_NAME, miniDfsPluginConfig, true);
}

From source file:org.apache.drill.exec.impersonation.TestImpersonationDisabledWithMiniDFS.java

License:Apache License

@BeforeClass
public static void addMiniDfsBasedStorage() throws Exception {
    startMiniDfsCluster(TestImpersonationDisabledWithMiniDFS.class.getSimpleName(), false);

    // Create a HDFS based storage plugin based on local storage plugin and add it to plugin registry (connection string
    // for mini dfs is varies for each run).
    final StoragePluginRegistry pluginRegistry = getDrillbitContext().getStorage();
    final FileSystemConfig lfsPluginConfig = (FileSystemConfig) pluginRegistry.getPlugin("dfs_test")
            .getConfig();/*  w  w w  . j ava 2  s  . c  om*/

    final FileSystemConfig miniDfsPluginConfig = new FileSystemConfig();
    miniDfsPluginConfig.connection = conf.get(FileSystem.FS_DEFAULT_NAME_KEY);

    Map<String, WorkspaceConfig> workspaces = Maps.newHashMap(lfsPluginConfig.workspaces);
    createAndAddWorkspace(dfsCluster.getFileSystem(), "dfstemp", "/tmp", (short) 0777, processUser, processUser,
            workspaces);

    miniDfsPluginConfig.workspaces = workspaces;
    miniDfsPluginConfig.formats = ImmutableMap.copyOf(lfsPluginConfig.formats);
    miniDfsPluginConfig.setEnabled(true);

    pluginRegistry.createOrUpdate(MINIDFS_STORAGE_PLUGIN_NAME, miniDfsPluginConfig, true);

    // Create test table in minidfs.tmp schema for use in test queries
    test(String.format("CREATE TABLE %s.dfstemp.dfsRegion AS SELECT * FROM cp.`region.json`",
            MINIDFS_STORAGE_PLUGIN_NAME));
}

From source file:org.apache.drill.exec.physical.impl.trace.TestTraceOutputDump.java

License:Apache License

@Test
public void testFilter(@Injectable final DrillbitContext bitContext,
        @Injectable UserClientConnection connection) throws Throwable {
    mockDrillbitContext(bitContext);//  w w  w  . j  a v  a  2s  . c  o  m

    final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
    final PhysicalPlan plan = reader.readPhysicalPlan(
            Files.toString(FileUtils.getResourceAsFile("/trace/simple_trace.json"), Charsets.UTF_8));
    final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
    final FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(),
            connection, registry);
    final SimpleRootExec exec = new SimpleRootExec(
            ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));

    while (exec.next()) {
    }

    exec.close();

    if (context.getFailureCause() != null) {
        throw context.getFailureCause();
    }
    assertTrue(!context.isFailed());

    final FragmentHandle handle = context.getHandle();

    /* Form the file name to which the trace output will dump the record batches */
    final String qid = QueryIdHelper.getQueryId(handle.getQueryId());
    final int majorFragmentId = handle.getMajorFragmentId();
    final int minorFragmentId = handle.getMinorFragmentId();

    final String logLocation = c.getString(ExecConstants.TRACE_DUMP_DIRECTORY);
    System.out.println("Found log location: " + logLocation);

    final String filename = String.format("%s//%s_%d_%d_mock-scan", logLocation, qid, majorFragmentId,
            minorFragmentId);
    System.out.println("File Name: " + filename);

    final Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, c.getString(ExecConstants.TRACE_DUMP_FILESYSTEM));

    final FileSystem fs = FileSystem.get(conf);
    final Path path = new Path(filename);
    assertTrue("Trace file does not exist", fs.exists(path));
    final FSDataInputStream in = fs.open(path);

    final VectorAccessibleSerializable wrap = new VectorAccessibleSerializable(context.getAllocator());
    wrap.readFromStream(in);
    final VectorAccessible container = wrap.get();

    /* Assert there are no selection vectors */
    assertTrue(wrap.getSv2() == null);

    /* Assert there is only one record */
    assertTrue(container.getRecordCount() == 1);

    /* Read the Integer value and ASSERT its Integer.MIN_VALUE */
    final int value = (int) container.iterator().next().getValueVector().getAccessor().getObject(0);
    assertTrue(value == Integer.MIN_VALUE);
}

From source file:org.apache.drill.exec.physical.impl.trace.TraceRecordBatch.java

License:Apache License

public TraceRecordBatch(Trace pop, RecordBatch incoming, FragmentContext context)
        throws ExecutionSetupException {
    super(pop, context, incoming);
    this.traceTag = pop.traceTag;
    logLocation = context.getConfig().getString(ExecConstants.TRACE_DUMP_DIRECTORY);
    localAllocator = context.getNewChildAllocator("trace", 200, 0, Long.MAX_VALUE);
    String fileName = getFileName();

    /* Create the log file we will dump to and initialize the file descriptors */
    try {/*from ww w . j  a va 2  s  . c om*/
        Configuration conf = new Configuration();
        conf.set(FileSystem.FS_DEFAULT_NAME_KEY,
                context.getConfig().getString(ExecConstants.TRACE_DUMP_FILESYSTEM));
        FileSystem fs = FileSystem.get(conf);

        /* create the file */
        fos = fs.create(new Path(fileName));
    } catch (IOException e) {
        throw new ExecutionSetupException(
                "Unable to create file: " + fileName + " check permissions or if directory exists", e);
    }
}

From source file:org.apache.drill.exec.physical.impl.writer.TestCorruptParquetDateCorrection.java

License:Apache License

@BeforeClass
public static void initFs() throws Exception {
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "local");
    fs = FileSystem.get(conf);//from ww w .  j a  v  a 2  s .c o  m
    path = new Path(getDfsTestTmpSchemaLocation());

    // Move files into temp directory, rewrite the metadata cache file to contain the appropriate absolute
    // path
    copyDirectoryIntoTempSpace(CORRUPTED_PARTITIONED_DATES_1_2_PATH);
    copyMetaDataCacheToTempReplacingInternalPaths(
            "parquet/4203_corrupt_dates/drill.parquet.metadata_1_2.requires_replace.txt",
            PARTITIONED_1_2_FOLDER);
    copyDirectoryIntoTempSpace(CORRUPTED_PARTITIONED_DATES_1_2_PATH,
            MIXED_CORRUPTED_AND_CORRECT_PARTITIONED_FOLDER);
    copyDirectoryIntoTempSpace(CORRECT_PARTITIONED_DATES_1_9_PATH,
            MIXED_CORRUPTED_AND_CORRECT_PARTITIONED_FOLDER);
    copyDirectoryIntoTempSpace(CORRUPTED_PARTITIONED_DATES_1_4_0_PATH,
            MIXED_CORRUPTED_AND_CORRECT_PARTITIONED_FOLDER);
}

From source file:org.apache.drill.exec.physical.impl.writer.TestParquetWriter.java

License:Apache License

@BeforeClass
public static void initFs() throws Exception {
    Configuration conf = new Configuration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "local");

    fs = FileSystem.get(conf);//from   w w w  .ja  v a  2  s  .c  o m
    test(String.format("alter session set `%s` = true", PlannerSettings.ENABLE_DECIMAL_DATA_TYPE_KEY));
}