Example usage for org.apache.hadoop.conf Configuration setBoolean

List of usage examples for org.apache.hadoop.conf Configuration setBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setBoolean.

Prototype

public void setBoolean(String name, boolean value) 

Source Link

Document

Set the value of the name property to a boolean.

Usage

From source file:io.amient.kafka.hadoop.CheckpointManager.java

License:Apache License

public static void configureUseZooKeeper(Configuration conf, String kafkaGroupId) {
    conf.setBoolean(CONFIG_CHECKPOINTS_ZOOKEEPER, true);
    conf.set(CONFIG_KAFKA_GROUP_ID, kafkaGroupId);
}

From source file:io.covert.dns.storage.accumulo.mutgen.EdgeMutationGeneratorFactory.java

License:Apache License

public static void configure(Job job, String table, String dataType, boolean bidirectional,
        boolean univariateStats, Multimap<String, String> edges) {
    Configuration conf = job.getConfiguration();

    conf.set("edge.mutation.generator.table", table);
    conf.set("edge.mutation.generator.data.type", dataType);

    conf.setBoolean("edge.mutation.generator.bidirection", bidirectional);
    conf.setBoolean("edge.mutation.generator.univar.stats", univariateStats);

    StringBuilder s = new StringBuilder();
    boolean first = true;
    for (String name1 : edges.keySet()) {
        for (String name2 : edges.get(name1)) {
            if (first) {
                first = false;/*from  w  ww .  j  a  va  2s.  co m*/
                s.append(name1).append(":").append(name2);
            } else {
                s.append(",").append(name1).append(":").append(name2);
            }
        }
    }
    conf.set("edge.mutation.generator.edges", s.toString());
}

From source file:io.dataapps.chlorine.hadoop.HDFSScanMR.java

License:Apache License

public static Job makeJob(Configuration conf, Path in, Path out, String matchPath, long scanSince,
        String chlorineConfigFilePath, String queue, String maskPath) throws IOException {
    conf.setBoolean("mapred.output.compress", false);
    conf.setLong("scanSince", scanSince);
    conf.set("matchPath", matchPath);
    conf.set("maskPath", maskPath);
    conf.set("inputPath", in.toString());
    if (queue != null) {
        conf.set("mapred.job.queue.name", queue);
    }//from   ww  w. j a v a  2 s.  c  o  m
    conf.set("fs.permissions.umask-mode", "007");
    conf.setInt("input_path_depth", in.depth());
    Job job = Job.getInstance(conf, "Chlorine_HDFS_Scan");
    job.setJarByClass(HDFSScanMR.class);
    if (chlorineConfigFilePath != null) {
        try {
            job.addCacheFile(new URI(chlorineConfigFilePath));
            conf.set("finder_file", (new File(chlorineConfigFilePath)).getName());
        } catch (URISyntaxException e) {
            LOG.error(e);
        }
    }
    job.setMapperClass(DeepScanMapper.class);
    job.setNumReduceTasks(0);
    job.setInputFormatClass(TextInputFormat.class);
    TextInputFormat.addInputPath(job, in);
    TextInputFormat.setInputDirRecursive(job, true);
    TextInputFormat.setInputPathFilter(job, NewFilesFilter.class);
    FileOutputFormat.setOutputPath(job, out);
    LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class);
    return job;
}

From source file:io.divolte.server.hdfs.HdfsFlusher.java

License:Apache License

public HdfsFlusher(final ValidatedConfiguration vc, final Schema schema) {
    Objects.requireNonNull(vc);//w  ww.  j  a  v  a2s .co  m

    final FileSystem hadoopFs;
    final Configuration hdfsConfiguration = new Configuration();
    final short hdfsReplication = (short) vc.configuration().hdfsFlusher.hdfs.replication;

    /*
     * The HDFS client creates a JVM shutdown hook, which interferes with our own server shutdown hook.
     * This config option disabled the built in shutdown hook. We call FileSystem.closeAll() ourselves
     * in the server shutdown hook instead.
     */
    hdfsConfiguration.setBoolean("fs.automatic.close", false);
    try {
        hadoopFs = vc.configuration().hdfsFlusher.hdfs.uri.map(uri -> {
            try {
                return FileSystem.get(new URI(uri), hdfsConfiguration);
            } catch (IOException | URISyntaxException e) {
                /*
                 * It is possible to create a FileSystem instance when HDFS is not available (e.g. NameNode down).
                 * This exception only occurs when there is a configuration error in the URI (e.g. wrong scheme).
                 * So we fail to start up in this case. Below we create the actual HDFS connection, by opening
                 * files. If that fails, we do startup and initiate the regular retry cycle.
                 */
                logger.error("Could not initialize HDFS filesystem.", e);
                throw new RuntimeException("Could not initialize HDFS filesystem", e);
            }
        }).orElse(FileSystem.get(hdfsConfiguration));
    } catch (IOException ioe) {
        /*
         * It is possible to create a FileSystem instance when HDFS is not available (e.g. NameNode down).
         * This exception only occurs when there is a configuration error in the URI (e.g. wrong scheme).
         * So we fail to start up in this case. Below we create the actual HDFS connection, by opening
         * files. If that fails, we do startup and initiate the regular retry cycle.
         */
        logger.error("Could not initialize HDFS filesystem.", ioe);
        throw new RuntimeException("Could not initialize HDFS filesystem", ioe);
    }

    fileStrategy = FileCreateAndSyncStrategy.create(vc, hadoopFs, hdfsReplication,
            Objects.requireNonNull(schema));
    lastHdfsResult = fileStrategy.setup();
}

From source file:io.hops.transaction.TestTransaction.java

License:Apache License

@BeforeClass
public static void setupCluster() throws Exception {
    Configuration conf = new HdfsConfiguration();

    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 10);

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).storagesPerDatanode(1).build();

    cluster.waitActive();//from   ww  w. j av  a 2  s  .  c o m

}

From source file:io.pravega.local.LocalHDFSEmulator.java

License:Open Source License

public void start() throws IOException {
    baseDir = Files.createTempDirectory(baseDirName).toFile().getAbsoluteFile();
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.setBoolean("dfs.permissions.enabled", true);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();/*from w w  w. ja v  a 2s. c  o  m*/
}

From source file:io.pravega.segmentstore.storage.impl.hdfs.HDFSClusterHelpers.java

License:Open Source License

/**
 * Creates a MiniDFSCluster at the given Path.
 *
 * @param path The path to create at.//from  w w w  . j  av  a  2 s .  c  o m
 * @return A MiniDFSCluster.
 * @throws IOException If an Exception occurred.
 */
public static MiniDFSCluster createMiniDFSCluster(String path) throws IOException {
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, path);
    conf.setBoolean("dfs.permissions.enabled", true);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    return builder.build();
}

From source file:io.prestosql.plugin.hive.HdfsConfigurationInitializer.java

License:Apache License

public void initializeConfiguration(Configuration config) {
    copy(resourcesConfiguration, config);

    // this is to prevent dfs client from doing reverse DNS lookups to determine whether nodes are rack local
    config.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, NoOpDNSToSwitchMapping.class,
            DNSToSwitchMapping.class);

    if (socksProxy != null) {
        config.setClass(HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, SocksSocketFactory.class,
                SocketFactory.class);
        config.set(HADOOP_SOCKS_SERVER_KEY, socksProxy.toString());
    }//from w w w  . ja va  2  s .  c om

    if (domainSocketPath != null) {
        config.setStrings(DFS_DOMAIN_SOCKET_PATH_KEY, domainSocketPath);
    }

    // only enable short circuit reads if domain socket path is properly configured
    if (!config.get(DFS_DOMAIN_SOCKET_PATH_KEY, "").trim().isEmpty()) {
        config.setBooleanIfUnset(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
    }

    config.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, toIntExact(dfsTimeout.toMillis()));
    config.setInt(IPC_PING_INTERVAL_KEY, toIntExact(ipcPingInterval.toMillis()));
    config.setInt(IPC_CLIENT_CONNECT_TIMEOUT_KEY, toIntExact(dfsConnectTimeout.toMillis()));
    config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, dfsConnectMaxRetries);

    if (isHdfsWireEncryptionEnabled) {
        config.set(HADOOP_RPC_PROTECTION, "privacy");
        config.setBoolean("dfs.encrypt.data.transfer", true);
    }

    config.setInt("fs.cache.max-size", fileSystemMaxCacheSize);

    config.setInt(LineRecordReader.MAX_LINE_LENGTH, textMaxLineLength);

    configureCompression(config, compressionCodec);

    s3ConfigurationUpdater.updateConfiguration(config);
    gcsConfigurationInitialize.updateConfiguration(config);
}

From source file:io.prestosql.plugin.hive.HdfsConfigurationInitializer.java

License:Apache License

public static void configureCompression(Configuration config, HiveCompressionCodec compressionCodec) {
    boolean compression = compressionCodec != HiveCompressionCodec.NONE;
    config.setBoolean(COMPRESSRESULT.varname, compression);
    config.setBoolean("mapred.output.compress", compression);
    config.setBoolean(FileOutputFormat.COMPRESS, compression);
    // For DWRF/*  w w w. j a  va  2  s.  c  om*/
    com.facebook.hive.orc.OrcConf.setVar(config, HIVE_ORC_COMPRESSION,
            compressionCodec.getOrcCompressionKind().name());
    // For ORC
    OrcConf.COMPRESS.setString(config, compressionCodec.getOrcCompressionKind().name());
    // For RCFile and Text
    if (compressionCodec.getCodec().isPresent()) {
        config.set("mapred.output.compression.codec", compressionCodec.getCodec().get().getName());
        config.set(FileOutputFormat.COMPRESS_CODEC, compressionCodec.getCodec().get().getName());
    } else {
        config.unset("mapred.output.compression.codec");
        config.unset(FileOutputFormat.COMPRESS_CODEC);
    }
    // For Parquet
    config.set(ParquetOutputFormat.COMPRESSION, compressionCodec.getParquetCompressionCodec().name());
    // For SequenceFile
    config.set(FileOutputFormat.COMPRESS_TYPE, BLOCK.toString());
}

From source file:io.prestosql.plugin.hive.HiveUtil.java

License:Apache License

public static void setReadColumns(Configuration configuration, List<Integer> readHiveColumnIndexes) {
    configuration.set(READ_COLUMN_IDS_CONF_STR, Joiner.on(',').join(readHiveColumnIndexes));
    configuration.setBoolean(READ_ALL_COLUMNS, false);
}