Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:cmd.infer.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }/*from w w w  .  j  a v a2  s .c o m*/

    Configuration configuration = getConf();
    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERRIDE_OUTPUT,
            Constants.OPTION_OVERRIDE_OUTPUT_DEFAULT);

    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
    }

    Tool infer = new InferDriver(configuration);
    infer.run(new String[] { args[0], args[1] });

    return 0;
}

From source file:cmd.rdf2adjacencylist.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }//w  w w . j  a  v  a 2 s .  c  om

    Configuration configuration = getConf();
    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERWRITE_OUTPUT,
            Constants.OPTION_OVERWRITE_OUTPUT_DEFAULT);

    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
    }

    Tool tool = new Rdf2AdjacencyListDriver(configuration);
    tool.run(new String[] { args[0], args[1] });

    return 0;
}

From source file:cmd.stats.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }/*from  w ww.  j  a  va 2 s . c om*/

    Configuration configuration = getConf();
    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERRIDE_OUTPUT,
            Constants.OPTION_OVERRIDE_OUTPUT_DEFAULT);

    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
    }

    Tool stats = new StatsDriver(configuration);
    stats.run(new String[] { args[0], args[1] });

    return 0;
}

From source file:cmd.tdbloader4.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }//from w w w.  j  a va 2 s .co m

    Configuration configuration = getConf();
    configuration.set(Constants.RUN_ID, String.valueOf(System.currentTimeMillis()));
    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERRIDE_OUTPUT,
            Constants.OPTION_OVERRIDE_OUTPUT_DEFAULT);
    boolean copyToLocal = configuration.getBoolean(Constants.OPTION_COPY_TO_LOCAL,
            Constants.OPTION_COPY_TO_LOCAL_DEFAULT);
    boolean verify = configuration.getBoolean(Constants.OPTION_VERIFY, Constants.OPTION_VERIFY_DEFAULT);
    boolean runLocal = configuration.getBoolean(Constants.OPTION_RUN_LOCAL, Constants.OPTION_RUN_LOCAL_DEFAULT);

    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
        fs.delete(new Path(args[1] + OUTPUT_PATH_POSTFIX_1), true);
        fs.delete(new Path(args[1] + OUTPUT_PATH_POSTFIX_2), true);
        fs.delete(new Path(args[1] + OUTPUT_PATH_POSTFIX_3), true);
        fs.delete(new Path(args[1] + OUTPUT_PATH_POSTFIX_4), true);
    }

    if ((copyToLocal) || (runLocal)) {
        File path = new File(args[1]);
        path.mkdirs();
    }

    Tool first = new FirstDriver(configuration);
    int status = first.run(new String[] { args[0], args[1] + OUTPUT_PATH_POSTFIX_1 });
    if (status != 0) {
        return status;
    }

    createOffsetsFile(fs, args[1] + OUTPUT_PATH_POSTFIX_1, args[1] + OUTPUT_PATH_POSTFIX_1);
    Path offsets = new Path(args[1] + OUTPUT_PATH_POSTFIX_1, Constants.OFFSETS_FILENAME);
    DistributedCache.addCacheFile(offsets.toUri(), configuration);

    Tool second = new SecondDriver(configuration);
    status = second.run(new String[] { args[0], args[1] + OUTPUT_PATH_POSTFIX_2 });
    if (status != 0) {
        return status;
    }

    Tool third = new ThirdDriver(configuration);
    status = third.run(new String[] { args[1] + OUTPUT_PATH_POSTFIX_2, args[1] + OUTPUT_PATH_POSTFIX_3 });
    if (status != 0) {
        return status;
    }

    Tool fourth = new FourthDriver(configuration);
    status = fourth.run(new String[] { args[1] + OUTPUT_PATH_POSTFIX_3, args[1] + OUTPUT_PATH_POSTFIX_4 });
    if (status != 0) {
        return status;
    }

    if (copyToLocal) {
        Tool download = new download(configuration);
        download.run(
                new String[] { args[1] + OUTPUT_PATH_POSTFIX_2, args[1] + OUTPUT_PATH_POSTFIX_4, args[1] });
    }

    if (verify) {
        DatasetGraphTDB dsgMem = load(args[0]);
        Location location = new Location(args[1]);

        if (!copyToLocal) {
            // TODO: this is a sort of a cheat and it could go away (if it turns out to be too slow)!
            download.fixNodeTable2(location);
        }

        DatasetGraphTDB dsgDisk = SetupTDB.buildDataset(location);
        boolean isomorphic = isomorphic(dsgMem, dsgDisk);
        System.out.println("> " + isomorphic);
    }

    return status;
}

From source file:cn.easyhbase.client.hbase.HBaseAsyncOperationFactory.java

License:Apache License

public static HBaseAsyncOperation create(Configuration configuration) throws IOException {
    boolean enableAsyncMethod = configuration.getBoolean(ENABLE_ASYNC_METHOD, DEFAULT_ENABLE_ASYNC_METHOD);
    LOGGER.info("hbase.client.async.enable: " + enableAsyncMethod);
    if (!enableAsyncMethod) {
        return DisabledHBaseAsyncOperation.INSTANCE;
    }/* ww  w  .  jav a2s  .co m*/

    int queueSize = configuration.getInt(ASYNC_IN_QUEUE_SIZE, DEFAULT_ASYNC_IN_QUEUE_SIZE);

    if (configuration.get(ASYNC_PERIODIC_FLUSH_TIME, null) == null) {
        configuration.setInt(ASYNC_PERIODIC_FLUSH_TIME, DEFAULT_ASYNC_PERIODIC_FLUSH_TIME);
    }

    if (configuration.get(ASYNC_RETRY_COUNT, null) == null) {
        configuration.setInt(ASYNC_RETRY_COUNT, DEFAULT_ASYNC_RETRY_COUNT);
    }

    return new HBaseAsyncTemplate(configuration, queueSize);
}

From source file:cn.easyhbase.client.hbase.HBaseAsyncOperationFactory.java

License:Apache License

public static HBaseAsyncOperation create(Connection connection, Configuration configuration)
        throws IOException {
    boolean enableAsyncMethod = configuration.getBoolean(ENABLE_ASYNC_METHOD, DEFAULT_ENABLE_ASYNC_METHOD);
    if (!enableAsyncMethod) {
        return DisabledHBaseAsyncOperation.INSTANCE;
    }//from w  w w . j a  v  a 2 s. c  o  m

    int queueSize = configuration.getInt(ASYNC_IN_QUEUE_SIZE, DEFAULT_ASYNC_IN_QUEUE_SIZE);

    if (configuration.get(ASYNC_PERIODIC_FLUSH_TIME, null) == null) {
        configuration.setInt(ASYNC_PERIODIC_FLUSH_TIME, DEFAULT_ASYNC_PERIODIC_FLUSH_TIME);
    }

    if (configuration.get(ASYNC_RETRY_COUNT, null) == null) {
        configuration.setInt(ASYNC_RETRY_COUNT, DEFAULT_ASYNC_RETRY_COUNT);
    }

    return new HBaseAsyncTemplate(connection, configuration, queueSize);
}

From source file:cn.edu.hfut.dmic.webcollectorcluster.fetcher.Fetcher.java

public Fetcher(Configuration conf) {
    super(conf);/*from  w  w  w.j  a v a 2 s .co m*/
    try {
        isContentStored = conf.getBoolean("fetcher.store.content", false);
        String requestFactoryClass = conf.get("plugin.request.factory.class");
        String parseFactoryClass = conf.get("plugin.parser.factory.class");
        String generatorFactoryClass = conf.get("plugin.generator.factory.class");
        String handlerFactoryClass = conf.get("plugin.fetchhandler.factory.class");
        requestFactory = (RequestFactory) Class.forName(requestFactoryClass).newInstance();
        parserFactory = (ParserFactory) Class.forName(parseFactoryClass).newInstance();
        generatorFactory = (GeneratorFactory) Class.forName(generatorFactoryClass).newInstance();
        HandlerFactory handlerFactory = (HandlerFactory) Class.forName(handlerFactoryClass).newInstance();
        setHandler(handlerFactory.createHandler());
    } catch (Exception ex) {
        LogUtils.getLogger().info("Exception", ex);
    }

}

From source file:co.cask.cdap.app.runtime.spark.SparkRuntimeContextConfig.java

License:Apache License

/**
 * Returns {@code true} if running in local mode.
 *///from  ww  w .j av a 2s.  co m
static boolean isLocal(Configuration hConf) {
    return !hConf.getBoolean(HCONF_ATTR_CLUSTER_MODE, false);
}

From source file:co.cask.cdap.internal.app.runtime.batch.dataset.partitioned.DynamicPartitioningOutputCommitter.java

License:Apache License

@Override
public void commitJob(JobContext context) throws IOException {
    Configuration configuration = context.getConfiguration();
    MapReduceClassLoader classLoader = MapReduceClassLoader.getFromConfiguration(configuration);
    BasicMapReduceTaskContext taskContext = classLoader.getTaskContextProvider().get(this.taskContext);

    String outputDatasetName = configuration.get(Constants.Dataset.Partitioned.HCONF_ATTR_OUTPUT_DATASET);
    PartitionedFileSet outputDataset = taskContext.getDataset(outputDatasetName);
    Partitioning partitioning = outputDataset.getPartitioning();

    Set<PartitionKey> partitionsToAdd = new HashSet<>();
    Set<String> relativePaths = new HashSet<>();
    // Go over all files in the temporary directory and keep track of partitions to add for them
    FileStatus[] allCommittedTaskPaths = getAllCommittedTaskPaths(context);
    for (FileStatus committedTaskPath : allCommittedTaskPaths) {
        FileSystem fs = committedTaskPath.getPath().getFileSystem(configuration);
        RemoteIterator<LocatedFileStatus> fileIter = fs.listFiles(committedTaskPath.getPath(), true);
        while (fileIter.hasNext()) {
            Path path = fileIter.next().getPath();
            String relativePath = getRelative(committedTaskPath.getPath(), path);

            int lastPathSepIdx = relativePath.lastIndexOf(Path.SEPARATOR);
            if (lastPathSepIdx == -1) {
                // this shouldn't happen because each relative path should consist of at least one partition key and
                // the output file name
                LOG.warn("Skipping path '{}'. It's relative path '{}' has fewer than two parts", path,
                        relativePath);/*ww w  .j av a  2s. c om*/
                continue;
            }
            // relativePath = "../key1/key2/part-m-00000"
            // relativeDir = "../key1/key2"
            // fileName = "part-m-00000"
            String relativeDir = relativePath.substring(0, lastPathSepIdx);
            String fileName = relativePath.substring(lastPathSepIdx + 1);

            Path finalDir = new Path(FileOutputFormat.getOutputPath(context), relativeDir);
            Path finalPath = new Path(finalDir, fileName);
            if (fs.exists(finalPath)) {
                throw new FileAlreadyExistsException("Final output path " + finalPath + " already exists");
            }
            PartitionKey partitionKey = getPartitionKey(partitioning, relativeDir);
            partitionsToAdd.add(partitionKey);
            relativePaths.add(relativeDir);
        }
    }

    // We need to copy to the parent of the FileOutputFormat's outputDir, since we added a _temporary_jobId suffix to
    // the original outputDir.
    Path finalOutput = FileOutputFormat.getOutputPath(context);
    FileSystem fs = finalOutput.getFileSystem(configuration);
    for (FileStatus stat : getAllCommittedTaskPaths(context)) {
        mergePaths(fs, stat, finalOutput);
    }

    // compute the metadata to be written to every output partition
    Map<String, String> metadata = ConfigurationUtil.getNamedConfigurations(this.taskContext.getConfiguration(),
            PartitionedFileSetArguments.OUTPUT_PARTITION_METADATA_PREFIX);

    // create all the necessary partitions
    for (PartitionKey partitionKey : partitionsToAdd) {
        PartitionOutput partitionOutput = outputDataset.getPartitionOutput(partitionKey);
        partitionOutput.setMetadata(metadata);
        partitionOutput.addPartition();
    }

    // close the TaskContext, which flushes dataset operations
    try {
        taskContext.flushOperations();
    } catch (Exception e) {
        Throwables.propagateIfPossible(e, IOException.class);
        throw new IOException(e);
    }

    // delete the job-specific _temporary folder and create a _done file in the o/p folder
    cleanupJob(context);

    // mark all the final output paths with a _SUCCESS file, if configured to do so (default = true)
    if (configuration.getBoolean(SUCCESSFUL_JOB_OUTPUT_DIR_MARKER, true)) {
        for (String relativePath : relativePaths) {
            Path pathToMark = new Path(finalOutput, relativePath);
            Path markerPath = new Path(pathToMark, SUCCEEDED_FILE_NAME);
            fs.createNewFile(markerPath);
        }
    }
}

From source file:co.cask.hydrator.plugin.db.batch.sink.ETLDBOutputFormat.java

License:Apache License

private Connection getConnection(Configuration conf) {
    Connection connection;/*from ww w.  j a v a 2 s  . c  o  m*/
    try {
        String url = conf.get(DBConfiguration.URL_PROPERTY);
        try {
            // throws SQLException if no suitable driver is found
            DriverManager.getDriver(url);
        } catch (SQLException e) {
            if (driverShim == null) {
                if (driver == null) {
                    ClassLoader classLoader = conf.getClassLoader();
                    @SuppressWarnings("unchecked")
                    Class<? extends Driver> driverClass = (Class<? extends Driver>) classLoader
                            .loadClass(conf.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
                    driver = driverClass.newInstance();

                    // De-register the default driver that gets registered when driver class is loaded.
                    DBUtils.deregisterAllDrivers(driverClass);
                }

                driverShim = new JDBCDriverShim(driver);
                DriverManager.registerDriver(driverShim);
                LOG.debug("Registered JDBC driver via shim {}. Actual Driver {}.", driverShim, driver);
            }
        }

        if (conf.get(DBConfiguration.USERNAME_PROPERTY) == null) {
            connection = DriverManager.getConnection(url);
        } else {
            connection = DriverManager.getConnection(url, conf.get(DBConfiguration.USERNAME_PROPERTY),
                    conf.get(DBConfiguration.PASSWORD_PROPERTY));
        }

        boolean autoCommitEnabled = conf.getBoolean(AUTO_COMMIT_ENABLED, false);
        if (autoCommitEnabled) {
            // hack to work around jdbc drivers like the hive driver that throw exceptions on commit
            connection = new NoOpCommitConnection(connection);
        } else {
            connection.setAutoCommit(false);
        }
        connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
    return connection;
}