Example usage for org.apache.hadoop.fs FileSystem getWorkingDirectory

List of usage examples for org.apache.hadoop.fs FileSystem getWorkingDirectory

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getWorkingDirectory.

Prototype

public abstract Path getWorkingDirectory();

Source Link

Document

Get the current working directory for the given FileSystem

Usage

From source file:org.apache.nifi.processors.hadoop.PutHDFS.java

License:Apache License

@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    final FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;//from w w  w. ja v a  2 s  .c o m
    }

    final FileSystem hdfs = getFileSystem();
    final Configuration configuration = getConfiguration();
    final UserGroupInformation ugi = getUserGroupInformation();

    if (configuration == null || hdfs == null || ugi == null) {
        getLogger().error("HDFS not configured properly");
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
        return;
    }

    ugi.doAs(new PrivilegedAction<Object>() {
        @Override
        public Object run() {
            Path tempDotCopyFile = null;
            FlowFile putFlowFile = flowFile;
            try {
                final String dirValue = context.getProperty(DIRECTORY).evaluateAttributeExpressions(putFlowFile)
                        .getValue();
                final Path configuredRootDirPath = new Path(dirValue);

                final String conflictResponse = context.getProperty(CONFLICT_RESOLUTION).getValue();

                final Double blockSizeProp = context.getProperty(BLOCK_SIZE).asDataSize(DataUnit.B);
                final long blockSize = blockSizeProp != null ? blockSizeProp.longValue()
                        : hdfs.getDefaultBlockSize(configuredRootDirPath);

                final Double bufferSizeProp = context.getProperty(BUFFER_SIZE).asDataSize(DataUnit.B);
                final int bufferSize = bufferSizeProp != null ? bufferSizeProp.intValue()
                        : configuration.getInt(BUFFER_SIZE_KEY, BUFFER_SIZE_DEFAULT);

                final Integer replicationProp = context.getProperty(REPLICATION_FACTOR).asInteger();
                final short replication = replicationProp != null ? replicationProp.shortValue()
                        : hdfs.getDefaultReplication(configuredRootDirPath);

                final CompressionCodec codec = getCompressionCodec(context, configuration);

                final String filename = codec != null
                        ? putFlowFile.getAttribute(CoreAttributes.FILENAME.key()) + codec.getDefaultExtension()
                        : putFlowFile.getAttribute(CoreAttributes.FILENAME.key());

                final Path tempCopyFile = new Path(configuredRootDirPath, "." + filename);
                final Path copyFile = new Path(configuredRootDirPath, filename);

                // Create destination directory if it does not exist
                try {
                    if (!hdfs.getFileStatus(configuredRootDirPath).isDirectory()) {
                        throw new IOException(
                                configuredRootDirPath.toString() + " already exists and is not a directory");
                    }
                } catch (FileNotFoundException fe) {
                    if (!hdfs.mkdirs(configuredRootDirPath)) {
                        throw new IOException(configuredRootDirPath.toString() + " could not be created");
                    }
                    changeOwner(context, hdfs, configuredRootDirPath, flowFile);
                }

                final boolean destinationExists = hdfs.exists(copyFile);

                // If destination file already exists, resolve that based on processor configuration
                if (destinationExists) {
                    switch (conflictResponse) {
                    case REPLACE_RESOLUTION:
                        if (hdfs.delete(copyFile, false)) {
                            getLogger().info("deleted {} in order to replace with the contents of {}",
                                    new Object[] { copyFile, putFlowFile });
                        }
                        break;
                    case IGNORE_RESOLUTION:
                        session.transfer(putFlowFile, REL_SUCCESS);
                        getLogger().info(
                                "transferring {} to success because file with same name already exists",
                                new Object[] { putFlowFile });
                        return null;
                    case FAIL_RESOLUTION:
                        session.transfer(session.penalize(putFlowFile), REL_FAILURE);
                        getLogger().warn(
                                "penalizing {} and routing to failure because file with same name already exists",
                                new Object[] { putFlowFile });
                        return null;
                    default:
                        break;
                    }
                }

                // Write FlowFile to temp file on HDFS
                final StopWatch stopWatch = new StopWatch(true);
                session.read(putFlowFile, new InputStreamCallback() {

                    @Override
                    public void process(InputStream in) throws IOException {
                        OutputStream fos = null;
                        Path createdFile = null;
                        try {
                            if (conflictResponse.equals(APPEND_RESOLUTION_AV.getValue()) && destinationExists) {
                                fos = hdfs.append(copyFile, bufferSize);
                            } else {
                                fos = hdfs.create(tempCopyFile, true, bufferSize, replication, blockSize);
                            }
                            if (codec != null) {
                                fos = codec.createOutputStream(fos);
                            }
                            createdFile = tempCopyFile;
                            BufferedInputStream bis = new BufferedInputStream(in);
                            StreamUtils.copy(bis, fos);
                            bis = null;
                            fos.flush();
                        } finally {
                            try {
                                if (fos != null) {
                                    fos.close();
                                }
                            } catch (RemoteException re) {
                                // when talking to remote HDFS clusters, we don't notice problems until fos.close()
                                if (createdFile != null) {
                                    try {
                                        hdfs.delete(createdFile, false);
                                    } catch (Throwable ignore) {
                                    }
                                }
                                throw re;
                            } catch (Throwable ignore) {
                            }
                            fos = null;
                        }
                    }

                });
                stopWatch.stop();
                final String dataRate = stopWatch.calculateDataRate(putFlowFile.getSize());
                final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
                tempDotCopyFile = tempCopyFile;

                if (!conflictResponse.equals(APPEND_RESOLUTION_AV.getValue())
                        || (conflictResponse.equals(APPEND_RESOLUTION_AV.getValue()) && !destinationExists)) {
                    boolean renamed = false;
                    for (int i = 0; i < 10; i++) { // try to rename multiple times.
                        if (hdfs.rename(tempCopyFile, copyFile)) {
                            renamed = true;
                            break;// rename was successful
                        }
                        Thread.sleep(200L);// try waiting to let whatever might cause rename failure to resolve
                    }
                    if (!renamed) {
                        hdfs.delete(tempCopyFile, false);
                        throw new ProcessException("Copied file to HDFS but could not rename dot file "
                                + tempCopyFile + " to its final filename");
                    }

                    changeOwner(context, hdfs, copyFile, flowFile);
                }

                getLogger().info("copied {} to HDFS at {} in {} milliseconds at a rate of {}",
                        new Object[] { putFlowFile, copyFile, millis, dataRate });

                final String newFilename = copyFile.getName();
                final String hdfsPath = copyFile.getParent().toString();
                putFlowFile = session.putAttribute(putFlowFile, CoreAttributes.FILENAME.key(), newFilename);
                putFlowFile = session.putAttribute(putFlowFile, ABSOLUTE_HDFS_PATH_ATTRIBUTE, hdfsPath);
                final Path qualifiedPath = copyFile.makeQualified(hdfs.getUri(), hdfs.getWorkingDirectory());
                session.getProvenanceReporter().send(putFlowFile, qualifiedPath.toString());

                session.transfer(putFlowFile, REL_SUCCESS);

            } catch (final Throwable t) {
                if (tempDotCopyFile != null) {
                    try {
                        hdfs.delete(tempDotCopyFile, false);
                    } catch (Exception e) {
                        getLogger().error("Unable to remove temporary file {} due to {}",
                                new Object[] { tempDotCopyFile, e });
                    }
                }
                getLogger().error("Failed to write to HDFS due to {}", new Object[] { t });
                session.transfer(session.penalize(putFlowFile), REL_FAILURE);
                context.yield();
            }

            return null;
        }
    });
}

From source file:org.apache.nifi.processors.hadoop.PutHDFSTest.java

License:Apache License

@Test
public void testPutFileWithException() throws IOException {
    // Refer to comment in the BeforeClass method for an explanation
    assumeTrue(isNotWindows());//www  .jav a2  s .  com

    String dirName = "target/testPutFileWrongPermissions";
    File file = new File(dirName);
    file.mkdirs();
    Configuration config = new Configuration();
    FileSystem fs = FileSystem.get(config);
    Path p = new Path(dirName).makeQualified(fs.getUri(), fs.getWorkingDirectory());

    final KerberosProperties testKerberosProperties = kerberosProperties;
    TestRunner runner = TestRunners.newTestRunner(new PutHDFS() {
        @Override
        protected void changeOwner(ProcessContext context, FileSystem hdfs, Path name, FlowFile flowFile) {
            throw new ProcessException("Forcing Exception to get thrown in order to verify proper handling");
        }

        @Override
        protected KerberosProperties getKerberosProperties(File kerberosConfigFile) {
            return testKerberosProperties;
        }
    });
    runner.setProperty(PutHDFS.DIRECTORY, dirName);
    runner.setProperty(PutHDFS.CONFLICT_RESOLUTION, "replace");

    try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1");) {
        Map<String, String> attributes = new HashMap<String, String>();
        attributes.put(CoreAttributes.FILENAME.key(), "randombytes-1");
        runner.enqueue(fis, attributes);
        runner.run();
    }

    List<MockFlowFile> failedFlowFiles = runner
            .getFlowFilesForRelationship(new Relationship.Builder().name("failure").build());
    assertFalse(failedFlowFiles.isEmpty());
    assertTrue(failedFlowFiles.get(0).isPenalized());

    fs.delete(p, true);
}

From source file:org.apache.oozie.service.TestAuthorizationService.java

License:Apache License

private void _testAuthorizationService(boolean useDefaultGroup) throws Exception {
    init(useDefaultGroup, true);//from w  w w.  ja v  a2  s  .  com
    Reader reader = IOUtils.getResourceAsReader("wf-ext-schema-valid.xml", -1);
    Writer writer = new FileWriter(new File(getTestCaseDir(), "workflow.xml"));
    IOUtils.copyCharStream(reader, writer);

    final DagEngine engine = new DagEngine(getTestUser());
    Configuration jobConf = new XConfiguration();
    jobConf.set(OozieClient.APP_PATH, getTestCaseFileUri("workflow.xml"));
    jobConf.set(OozieClient.USER_NAME, getTestUser());
    if (useDefaultGroup) {
        jobConf.set(OozieClient.GROUP_NAME, getTestGroup());
    } else {
        jobConf.set(OozieClient.GROUP_NAME, getTestGroup() + ",foo");
    }

    jobConf.set(OozieClient.LOG_TOKEN, "t");

    jobConf.set("external-status", "ok");
    jobConf.set("signal-value", "based_on_action_status");

    final String jobId = engine.submitJob(jobConf, true);

    HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
    URI uri = getFileSystem().getUri();
    Configuration fsConf = has.createJobConf(uri.getAuthority());
    FileSystem fileSystem = has.createFileSystem(getTestUser(), uri, fsConf);

    Path path = new Path(fileSystem.getWorkingDirectory(), UUID.randomUUID().toString());
    Path fsTestDir = fileSystem.makeQualified(path);
    System.out.println(XLog.format("Setting FS testcase work dir[{0}]", fsTestDir));
    fileSystem.delete(fsTestDir, true);
    if (!fileSystem.mkdirs(path)) {
        throw new IOException(XLog.format("Could not create FS testcase dir [{0}]", fsTestDir));
    }

    String appPath = fsTestDir.toString() + "/app";

    Path jobXmlPath = new Path(appPath, "workflow.xml");
    fileSystem.create(jobXmlPath).close();
    fileSystem.setOwner(jobXmlPath, getTestUser(), getTestGroup());

    FsPermission permissions = new FsPermission(FsAction.READ_WRITE, FsAction.READ, FsAction.NONE);
    fileSystem.setPermission(jobXmlPath, permissions);

    AuthorizationService as = services.get(AuthorizationService.class);
    assertNotNull(as);
    as.authorizeForGroup(getTestUser(), getTestGroup());
    assertNotNull(as.getDefaultGroup(getTestUser()));

    as.authorizeForApp(getTestUser2(), getTestGroup(), appPath, jobConf);

    try {
        as.authorizeForApp(getTestUser3(), getTestGroup(), appPath, jobConf);
        fail();
    } catch (AuthorizationException ex) {
    }

    as.authorizeForJob(getTestUser(), jobId, false);
    as.authorizeForJob(getTestUser(), jobId, true);
    if (!useDefaultGroup) {
        as.authorizeForJob("foo", jobId, true);
    }
    try {
        as.authorizeForJob("bar", jobId, true);
        fail();
    } catch (AuthorizationException ex) {
    }
}

From source file:org.apache.oozie.util.ClasspathUtils.java

License:Apache License

private static void addToClasspathIfNotJar(Path[] paths, URI[] withLinks, Configuration conf,
        Map<String, String> environment, String classpathEnvVar) throws IOException {
    if (paths != null) {
        HashMap<Path, String> linkLookup = new HashMap<Path, String>();
        if (withLinks != null) {
            for (URI u : withLinks) {
                Path p = new Path(u);
                FileSystem remoteFS = p.getFileSystem(conf);
                p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
                String name = (null == u.getFragment()) ? p.getName() : u.getFragment();
                if (!name.toLowerCase(Locale.ENGLISH).endsWith(".jar")) {
                    linkLookup.put(p, name);
                }//from  w ww. j  a v a  2s  .c o  m
            }
        }

        for (Path p : paths) {
            FileSystem remoteFS = p.getFileSystem(conf);
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            String name = linkLookup.get(p);
            if (name == null) {
                name = p.getName();
            }
            if (!name.toLowerCase(Locale.ENGLISH).endsWith(".jar")) {
                MRApps.addToEnvironment(environment, classpathEnvVar,
                        ApplicationConstants.Environment.PWD.$() + Path.SEPARATOR + name, conf);
            }
        }
    }
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigInputFormat.java

License:Apache License

@SuppressWarnings({ "unchecked", "rawtypes" })
@Override// www .  j  ava  2s  .  c  o  m
public List<InputSplit> getSplits(JobContext jobcontext) throws IOException, InterruptedException {

    Configuration conf = jobcontext.getConfiguration();

    ArrayList<FileSpec> inputs;
    ArrayList<ArrayList<OperatorKey>> inpTargets;
    PigContext pigContext;
    try {
        inputs = (ArrayList<FileSpec>) ObjectSerializer.deserialize(conf.get("pig.inputs"));
        inpTargets = (ArrayList<ArrayList<OperatorKey>>) ObjectSerializer
                .deserialize(conf.get("pig.inpTargets"));
        pigContext = (PigContext) ObjectSerializer.deserialize(conf.get("pig.pigContext"));
        PigContext.setPackageImportList(
                (ArrayList<String>) ObjectSerializer.deserialize(conf.get("udf.import.list")));
        MapRedUtil.setupUDFContext(conf);
    } catch (Exception e) {
        int errCode = 2094;
        String msg = "Unable to deserialize object.";
        throw new ExecException(msg, errCode, PigException.BUG, e);
    }

    ArrayList<InputSplit> splits = new ArrayList<InputSplit>();
    for (int i = 0; i < inputs.size(); i++) {
        try {
            Path path = new Path(inputs.get(i).getFileName());

            FileSystem fs;
            boolean isFsPath = true;
            try {
                fs = path.getFileSystem(conf);
            } catch (Exception e) {
                // If an application specific
                // scheme was used
                // (e.g.: "hbase://table") we will fail
                // getting the file system. That's
                // ok, we just use the dfs in that case.
                fs = new Path("/").getFileSystem(conf);
                isFsPath = false;
            }

            // if the execution is against Mapred DFS, set
            // working dir to /user/<userid>
            if (!Utils.isLocal(pigContext, conf)) {
                fs.setWorkingDirectory(jobcontext.getWorkingDirectory());
            }

            // first pass input location to the loader - for this send a
            // clone of the configuration we have - this is so that if the
            // loader (or the inputformat of the loader) decide to store the
            // input location into the configuration (for example,
            // FileInputFormat stores this in mapred.input.dir in the conf),
            // then for different inputs, the loader's don't end up
            // over-writing the same conf.
            FuncSpec loadFuncSpec = inputs.get(i).getFuncSpec();
            LoadFunc loadFunc = (LoadFunc) PigContext.instantiateFuncFromSpec(loadFuncSpec);
            boolean combinable = !(loadFunc instanceof MergeJoinIndexer || loadFunc instanceof IndexableLoadFunc
                    || (loadFunc instanceof CollectableLoadFunc && loadFunc instanceof OrderedLoadFunc));
            if (combinable)
                combinable = !conf.getBoolean("pig.noSplitCombination", false);
            JobConf confClone = new JobConf(conf);
            Job inputSpecificJob = new Job(confClone);
            // Pass loader signature to LoadFunc and to InputFormat through
            // the conf
            passLoadSignature(loadFunc, i, inputSpecificJob.getConfiguration());
            loadFunc.setLocation(inputs.get(i).getFileName(), inputSpecificJob);
            // The above setLocation call could write to the conf within
            // the inputSpecificJob - use this updated conf

            // get the InputFormat from it and ask for splits
            InputFormat inpFormat = loadFunc.getInputFormat();
            List<InputSplit> oneInputSplits = inpFormat.getSplits(
                    HadoopShims.createJobContext(inputSpecificJob.getConfiguration(), jobcontext.getJobID()));
            List<InputSplit> oneInputPigSplits = getPigSplits(oneInputSplits, i, inpTargets.get(i),
                    HadoopShims.getDefaultBlockSize(fs, isFsPath ? path : fs.getWorkingDirectory()), combinable,
                    confClone);
            splits.addAll(oneInputPigSplits);
        } catch (ExecException ee) {
            throw ee;
        } catch (Exception e) {
            int errCode = 2118;
            String msg = "Unable to create input splits for: " + inputs.get(i).getFileName();
            if (e.getMessage() != null && (!e.getMessage().isEmpty())) {
                throw new ExecException(e.getMessage(), errCode, PigException.BUG, e);
            } else {
                throw new ExecException(msg, errCode, PigException.BUG, e);
            }
        }
    }

    // XXX hadoop 20 new API integration: get around a hadoop 20 bug by
    // passing total # of splits to each split so that it can be retrieved
    // in the RecordReader method when called by mapreduce framework later.
    int n = splits.size();
    // also passing the multi-input flag to the back-end so that
    // the multi-input record counters can be created
    int m = inputs.size();

    boolean disableCounter = conf.getBoolean("pig.disable.counter", false);
    if ((m > 1) && disableCounter) {
        log.info("Disable Pig custom input counters");
    }

    for (InputSplit split : splits) {
        ((PigSplit) split).setTotalSplits(n);
        if (m > 1)
            ((PigSplit) split).setMultiInputs(true);
        ((PigSplit) split).setDisableCounter(disableCounter);
    }

    return splits;
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigInputSplitFormat.java

License:Apache License

@SuppressWarnings({ "unchecked", "rawtypes" })
@Override/*from  ww  w .jav a 2s .c om*/
public List<InputSplit> getSplits(JobContext jobcontext) throws IOException {

    Configuration conf = jobcontext.getConfiguration();

    ArrayList<FileSpec> inputs;
    ArrayList<ArrayList<OperatorKey>> inpTargets;
    PigContext pigContext;
    try {
        inputs = (ArrayList<FileSpec>) ObjectSerializer.deserialize(conf.get("pig.inputs"));
        inpTargets = (ArrayList<ArrayList<OperatorKey>>) ObjectSerializer
                .deserialize(conf.get("pig.inpTargets"));
        pigContext = (PigContext) ObjectSerializer.deserialize(conf.get("pig.pigContext"));
        PigContext.setPackageImportList(
                (ArrayList<String>) ObjectSerializer.deserialize(conf.get("udf.import.list")));
        MapRedUtil.setupUDFContext(conf);
    } catch (Exception e) {
        int errCode = 2094;
        String msg = "Unable to deserialize object.";
        throw new ExecException(msg, errCode, PigException.BUG, e);
    }

    ArrayList<InputSplit> splits = new ArrayList<InputSplit>();
    for (int i = 0; i < inputs.size(); i++) {
        try {
            Path path = new Path(inputs.get(i).getFileName());

            FileSystem fs;
            boolean isFsPath = true;
            try {
                fs = path.getFileSystem(conf);
            } catch (Exception e) {
                // If an application specific
                // scheme was used
                // (e.g.: "hbase://table") we will fail
                // getting the file system. That's
                // ok, we just use the dfs in that case.
                fs = new Path("/").getFileSystem(conf);
                isFsPath = false;
            }

            // if the execution is against Mapred DFS, set
            // working dir to /user/<userid>
            if (!Utils.isLocal(pigContext, conf)) {
                fs.setWorkingDirectory(jobcontext.getWorkingDirectory());
            }

            // first pass input location to the loader - for this send a
            // clone of the configuration we have - this is so that if the
            // loader (or the inputformat of the loader) decide to store the
            // input location into the configuration (for example,
            // FileInputFormat stores this in mapred.input.dir in the conf),
            // then for different inputs, the loader's don't end up
            // over-writing the same conf.
            FuncSpec loadFuncSpec = inputs.get(i).getFuncSpec();
            LoadFunc loadFunc = (LoadFunc) PigContext.instantiateFuncFromSpec(loadFuncSpec);
            boolean combinable = !(loadFunc instanceof MergeJoinIndexer || loadFunc instanceof IndexableLoadFunc
                    || (loadFunc instanceof CollectableLoadFunc && loadFunc instanceof OrderedLoadFunc));
            if (combinable)
                combinable = !conf.getBoolean("pig.noSplitCombination", false);
            Configuration confClone = new Configuration(conf);
            Job inputSpecificJob = new Job(confClone);
            // Pass loader signature to LoadFunc and to InputFormat through
            // the conf
            passLoadSignature(loadFunc, i, inputSpecificJob.getConfiguration());
            loadFunc.setLocation(inputs.get(i).getFileName(), inputSpecificJob);
            // The above setLocation call could write to the conf within
            // the inputSpecificJob - use this updated conf

            // get the InputFormat from it and ask for splits
            InputFormat inpFormat = loadFunc.getInputFormat();
            // List<InputSplit> oneInputSplits = inpFormat.getSplits(
            // HadoopShims.createJobContext(inputSpecificJob.getConfiguration(),
            // jobcontext.getJobID()));

            List<InputSplit> oneInputSplits = getSplitsSample(jobcontext);

            List<InputSplit> oneInputPigSplits = getPigSplits(oneInputSplits, i, inpTargets.get(i),
                    HadoopShims.getDefaultBlockSize(fs, isFsPath ? path : fs.getWorkingDirectory()), combinable,
                    confClone);
            splits.addAll(oneInputPigSplits);
        } catch (ExecException ee) {
            throw ee;
        } catch (Exception e) {
            int errCode = 2118;
            String msg = "Unable to create input splits for: " + inputs.get(i).getFileName();
            if (e.getMessage() != null && (!e.getMessage().isEmpty())) {
                throw new ExecException(e.getMessage(), errCode, PigException.BUG, e);
            } else {
                throw new ExecException(msg, errCode, PigException.BUG, e);
            }
        }
    }

    // XXX hadoop 20 new API integration: get around a hadoop 20 bug by
    // passing total # of splits to each split so that it can be retrieved
    // in the RecordReader method when called by mapreduce framework later.
    int n = splits.size();
    // also passing the multi-input flag to the back-end so that
    // the multi-input record counters can be created
    int m = inputs.size();

    boolean disableCounter = conf.getBoolean("pig.disable.counter", false);
    if ((m > 1) && disableCounter) {
        log.info("Disable Pig custom input counters");
    }

    for (InputSplit split : splits) {
        ((PigSplit) split).setTotalSplits(n);
        if (m > 1)
            ((PigSplit) split).setMultiInputs(true);
        ((PigSplit) split).setDisableCounter(disableCounter);
    }
    // shuffle --> return splits
    return splits;
}

From source file:org.apache.pig.impl.util.JarManager.java

License:Apache License

/**
 * Add the qualified path name of jars containing the given classes 
 * /*from  w ww  .  j  a  va  2 s.co  m*/
 * @param fs
 *            FileSystem object
 * @param jars
 *            the resolved path names to be added to this set
 * @param classes
 *            classes to find
 */
private static void addQualifiedJarsName(FileSystem fs, Set<String> jars, Class<?>... classes) {
    URI fsUri = fs.getUri();
    Path workingDir = fs.getWorkingDirectory();
    for (Class<?> clazz : classes) {
        String jarName = findContainingJar(clazz);
        if (jarName == null) {
            log.warn("Could not find jar for class " + clazz);
            continue;
        }
        jars.add(new Path(jarName).makeQualified(fsUri, workingDir).toString());
    }
}

From source file:org.apache.sentry.tests.e2e.solr.AbstractSolrSentryTestBase.java

License:Apache License

public static File setupSentry() throws Exception {
    File sentrySite = File.createTempFile("sentry-site", "xml");
    sentrySite.deleteOnExit();//w w  w  . ja v a  2s.c  o  m
    File authProviderDir = new File(RESOURCES_DIR, "sentry");
    String authProviderName = "test-authz-provider.ini";
    FileSystem clusterFs = dfsCluster.getFileSystem();
    clusterFs.copyFromLocalFile(false, new Path(authProviderDir.toString(), authProviderName),
            new Path(authProviderName));

    // need to write sentry-site at execution time because we don't know
    // the location of sentry.solr.provider.resource beforehand
    StringBuilder sentrySiteData = new StringBuilder();
    sentrySiteData.append("<configuration>\n");
    addPropertyToSentry(sentrySiteData, "sentry.provider",
            "org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider");
    addPropertyToSentry(sentrySiteData, "sentry.solr.provider.resource",
            clusterFs.getWorkingDirectory() + File.separator + authProviderName);
    sentrySiteData.append("</configuration>\n");
    FileUtils.writeStringToFile(sentrySite, sentrySiteData.toString());
    return sentrySite;
}

From source file:org.apache.slider.common.tools.TestSliderUtils.java

License:Apache License

@Test
public void testGetMetaInfoStreamFromZip() throws Exception {
    String zipFileName = TestUtility.createAppPackage(folder, "testpkg", "test.zip",
            "target/test-classes/org/apache/slider/common/tools/test");
    Configuration configuration = new Configuration();
    FileSystem fs = FileSystem.getLocal(configuration);
    log.info("fs working dir is {}", fs.getWorkingDirectory().toString());
    SliderFileSystem sliderFileSystem = new SliderFileSystem(fs, configuration);

    InputStream stream = SliderUtils.getApplicationResourceInputStream(sliderFileSystem.getFileSystem(),
            new Path(zipFileName), "metainfo.xml");
    Assert.assertTrue(stream != null);//  ww w.ja v  a2  s .c  o  m
    Assert.assertTrue(stream.available() > 0);
}

From source file:org.apache.slider.providers.agent.TestAgentUtils.java

License:Apache License

@Test
public void testGetApplicationMetainfo() throws Exception {
    String zipFileName = TestUtility.createAppPackage(folder, "testpkg", "test.zip",
            "target/test-classes/org/apache/slider/common/tools/test");
    Configuration configuration = new Configuration();
    FileSystem fs = FileSystem.getLocal(configuration);
    log.info("fs working dir is {}", fs.getWorkingDirectory().toString());
    SliderFileSystem sliderFileSystem = new SliderFileSystem(fs, configuration);

    // Without accompany metainfo file, read metainfo from the zip file
    Metainfo metainfo = AgentUtils.getApplicationMetainfo(sliderFileSystem, zipFileName, false);
    Assert.assertNotNull(metainfo.getApplication());
    Assert.assertEquals("STORM", metainfo.getApplication().getName());

    // With accompany metainfo file, read metainfo from the accompany file
    String acompanyFileName = zipFileName + ".metainfo.xml";
    File f = new File(acompanyFileName);
    try (BufferedWriter writer = new BufferedWriter(new FileWriter(f))) {
        writer.write(metainfo_str);//  w  ww  .j a  v  a2  s. com
    }
    metainfo = AgentUtils.getApplicationMetainfo(sliderFileSystem, zipFileName, false);
    Assert.assertNotNull(metainfo.getApplication());
    Assert.assertEquals("MYTESTAPPLICATION", metainfo.getApplication().getName());
}