Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:idgs.ConfVar.java

License:Open Source License

public static Boolean getBoolVar(Configuration conf, ConfVar variable) {
    require(variable.valClass == Boolean.class);
    return conf.getBoolean(variable.varname, variable.defaultBoolVal);
}

From source file:InvertedIndex.NLineRecordReader.java

License:Apache License

public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException {
    FileSplit split = (FileSplit) genericSplit;
    Configuration job = context.getConfiguration();
    this.job = job;
    this.context = context;
    this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength", Integer.MAX_VALUE);
    start = split.getStart();// w ww  . j  av  a 2  s  . c om
    end = start + split.getLength();
    final Path file = split.getPath();
    this.path = file;
    this.length = split.getLength();
    compressionCodecs = new CompressionCodecFactory(job);
    final CompressionCodec codec = compressionCodecs.getCodec(file);

    // open the file and seek to the start of the split
    FileSystem fs = file.getFileSystem(job);
    FSDataInputStream fileIn = fs.open(split.getPath());
    boolean skipFirstLine = false;
    if (codec != null) {
        if (0 == split.getLength() && job.getBoolean("mapred.ignore.badcompress", false)) {
            if (null != context && context instanceof TaskInputOutputContext) {
                ((TaskInputOutputContext) context).getCounter("Input Counter", "Gzip File length is zero")
                        .increment(1);
            }
            if (null != this.path) {
                LOG.warn("Skip 0-length Zip file: " + this.path.toString());
            }
            in = new NLineReader(fileIn, job);
        } else {
            try {
                in = new NLineReader(codec.createInputStream(fileIn), job);
                end = Long.MAX_VALUE;
            } catch (IOException e) {
                if (isIgnoreBadCompress(job, e)) {
                    in = new NLineReader(fileIn, job);
                    end = start;
                    LOG.warn("Skip Bad Compress File: " + this.path.toString());
                    LOG.warn("initialize line read error", e);
                    ((TaskInputOutputContext) context).getCounter("Input Counter", "Skip Bad Zip File")
                            .increment(1);
                    ((TaskInputOutputContext) context).getCounter("Input Counter", "Total Skip Bad Zip Length")
                            .increment(this.length);
                } else {
                    throw e;
                }
            }
        }
    } else {
        if (start != 0) {
            skipFirstLine = true;
            --start;
            fileIn.seek(start);
        }
        in = new NLineReader(fileIn, job);
    }
    if (skipFirstLine) { // skip first line and re-establish "start".
        start += in.readLine(new Text(), 0, (int) Math.min((long) Integer.MAX_VALUE, end - start));
    }
    this.pos = start;
}

From source file:InvertedIndex.NLineRecordReader.java

License:Apache License

public boolean isIgnoreBadCompress(Configuration job, Throwable exception) {
    if (null != job && job.getBoolean("mapred.ignore.badcompress", false)) {
        String exceptionStr = StringUtils.stringifyException(exception);
        String[] keywordsBL = job.getStrings("mapred.ignore.badcompress.keywords.blacklist",
                "Could not obtain block");
        if (null != keywordsBL) {
            for (String keyword : keywordsBL) {
                if (null != keyword && exceptionStr.contains(keyword)) {
                    return false;
                }/* w w w. ja  v a2 s.co m*/
            }
        }

        String[] keywords = job.getStrings("mapred.ignore.badcompress.keywords",
                "org.apache.hadoop.io.compress.DecompressorStream",
                "org.apache.hadoop.io.compress.MultiMemberGZIPInputStream",
                "org.apache.hadoop.io.compress.GzipCodec$GzipInputStream",
                "com.hadoop.compression.lzo.LzopCodec$LzopInputStream");

        if (null != keywords) {
            for (String keyword : keywords) {
                if (null != keyword && exceptionStr.contains(keyword)) {
                    return true;
                }
            }
        }
    }
    return false;
}

From source file:io.amient.kafka.hadoop.CheckpointManager.java

License:Apache License

public CheckpointManager(Configuration conf, KafkaZkUtils zkUtils) throws IOException {
    fs = FileSystem.get(conf);/* w  w w .  j  a v  a  2  s.c o  m*/
    this.conf = conf;
    this.zkUtils = zkUtils;
    useZkCheckpoints = conf.getBoolean(CONFIG_CHECKPOINTS_ZOOKEEPER, false);
    hdfsCheckpointDir = new Path(conf.get("mapreduce.output.fileoutputformat.outputdir"), "_OFFSETS");
}

From source file:io.covert.dns.storage.accumulo.mutgen.EdgeMutationGeneratorFactory.java

License:Apache License

@Override
public MutationGenerator create(Configuration conf) throws Exception {

    Multimap<String, String> edges = HashMultimap.create();
    for (String edge : conf.get("edge.mutation.generator.edges").split(",")) {
        String names[] = edge.split(":", 2);
        edges.put(names[0], names[1]);/*from  w  w w .  j a  va  2 s . c o m*/
    }

    System.out.println(edges);

    return new EdgeMutationGenerator(conf.get("edge.mutation.generator.table"),
            conf.get("edge.mutation.generator.data.type"), edges,
            conf.getBoolean("edge.mutation.generator.bidirection", true),
            conf.getBoolean("edge.mutation.generator.univar.stats", true));
}

From source file:io.hops.erasure_coding.ErasureCodingManager.java

License:Apache License

public ErasureCodingManager(FSNamesystem namesystem, Configuration conf) {
    super(conf);//  ww  w  .  ja va2  s  . c o  m
    this.namesystem = namesystem;
    this.parityFolder = conf.get(DFSConfigKeys.PARITY_FOLDER, DFSConfigKeys.DEFAULT_PARITY_FOLDER);
    this.recheckInterval = conf.getInt(DFSConfigKeys.RECHECK_INTERVAL_KEY,
            DFSConfigKeys.DEFAULT_RECHECK_INTERVAL);
    this.activeEncodingLimit = conf.getInt(DFSConfigKeys.ACTIVE_ENCODING_LIMIT_KEY,
            DFSConfigKeys.DEFAULT_ACTIVE_ENCODING_LIMIT);
    this.activeRepairLimit = conf.getInt(DFSConfigKeys.ACTIVE_REPAIR_LIMIT_KEY,
            DFSConfigKeys.DEFAULT_ACTIVE_REPAIR_LIMIT);
    this.activeParityRepairLimit = conf.getInt(DFSConfigKeys.ACTIVE_PARITY_REPAIR_LIMIT_KEY,
            DFSConfigKeys.DEFAULT_ACTIVE_PARITY_REPAIR_LIMIT);
    this.repairDelay = conf.getInt(DFSConfigKeys.REPAIR_DELAY_KEY, DFSConfigKeys.DEFAULT_REPAIR_DELAY_KEY);
    this.parityRepairDelay = conf.getInt(DFSConfigKeys.PARITY_REPAIR_DELAY_KEY,
            DFSConfigKeys.DEFAULT_PARITY_REPAIR_DELAY);
    this.deletionLimit = conf.getInt(DFSConfigKeys.DELETION_LIMIT_KEY, DFSConfigKeys.DEFAULT_DELETION_LIMIT);
    enabled = conf.getBoolean(DFSConfigKeys.ERASURE_CODING_ENABLED_KEY,
            DFSConfigKeys.DEFAULT_ERASURE_CODING_ENABLED_KEY);
}

From source file:io.hops.erasure_coding.ErasureCodingManager.java

License:Apache License

public static boolean isErasureCodingEnabled(Configuration conf) {
    return conf.getBoolean(DFSConfigKeys.ERASURE_CODING_ENABLED_KEY,
            DFSConfigKeys.DEFAULT_ERASURE_CODING_ENABLED_KEY);
}

From source file:io.hops.memcache.PathMemcache.java

License:Apache License

public void setConfiguration(Configuration conf) throws IOException {
    numberOfConnections = conf.getInt(DFSConfigKeys.DFS_MEMCACHE_CONNECTION_POOL_SIZE,
            DFSConfigKeys.DFS_MEMCACHE_CONNECTION_POOL_SIZE_DEFAULT);
    server = conf.get(DFSConfigKeys.DFS_MEMCACHE_SERVER, DFSConfigKeys.DFS_MEMCACHE_SERVER_DEFAULT);
    keyExpiry = conf.getInt(DFSConfigKeys.DFS_MEMCACHE_KEY_EXPIRY_IN_SECONDS,
            DFSConfigKeys.DFS_MEMCACHE_KEY_EXPIRY_IN_SECONDS_DEFAULT);
    keyPrefix = conf.get(DFSConfigKeys.DFS_MEMCACHE_KEY_PREFIX, DFSConfigKeys.DFS_MEMCACHE_KEY_PREFIX_DEFAULT);
    isEnabled = conf.getBoolean(DFSConfigKeys.DFS_MEMCACHE_ENABLED, DFSConfigKeys.DFS_MEMCACHE_ENABLED_DEFAULT);
    if (isEnabled) {
        start();/* ww w.ja  va  2  s  .c om*/
    }
}

From source file:io.hops.metadata.HdfsStorageFactory.java

License:Apache License

public static void setConfiguration(Configuration conf) throws IOException {
    IDsMonitor.getInstance().setConfiguration(conf);
    PathMemcache.getInstance().setConfiguration(conf);
    LockFactory.getInstance().setConfiguration(conf);
    NDCWrapper.enableNDC(//ww  w.  j a  va  2s . co  m
            conf.getBoolean(DFSConfigKeys.DFS_NDC_ENABLED_KEY, DFSConfigKeys.DFS_NDC_ENABLED_DEFAULT));
    TransactionsStats.getInstance().setConfiguration(
            conf.getBoolean(DFSConfigKeys.DFS_TRANSACTION_STATS_ENABLED,
                    DFSConfigKeys.DFS_TRANSACTION_STATS_ENABLED_DEFAULT),
            conf.get(DFSConfigKeys.DFS_TRANSACTION_STATS_DIR, DFSConfigKeys.DFS_TRANSACTION_STATS_DIR_DEFAULT));
    if (!isDALInitialized) {
        HdfsVariables.registerDefaultValues();
        addToClassPath(conf.get(DFSConfigKeys.DFS_STORAGE_DRIVER_JAR_FILE,
                DFSConfigKeys.DFS_STORAGE_DRIVER_JAR_FILE_DEFAULT));
        dStorageFactory = DalDriver.load(conf.get(DFSConfigKeys.DFS_STORAGE_DRIVER_CLASS,
                DFSConfigKeys.DFS_STORAGE_DRIVER_CLASS_DEFAULT));
        dStorageFactory.setConfiguration(getMetadataClusterConfiguration(conf));
        initDataAccessWrappers();
        EntityManager.addContextInitializer(getContextInitializer());
        isDALInitialized = true;
    }
}

From source file:io.hops.metadata.util.RMUtilities.java

License:Apache License

public static org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode getRMNode(final String id,
        final RMContext context, final Configuration conf) throws IOException {
    LightWeightRequestHandler getRMNodeHandler = new LightWeightRequestHandler(YARNOperationType.TEST) {
        @Override//from  w w  w .  j av a 2  s. c o m
        public Object performTask() throws IOException {
            connector.beginTransaction();
            connector.readLock();
            org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode rmNode = null;
            RMNodeDataAccess rmnodeDA = (RMNodeDataAccess) RMStorageFactory
                    .getDataAccess(RMNodeDataAccess.class);
            RMNode hopRMNode = (RMNode) rmnodeDA.findByNodeId(id);
            if (hopRMNode != null) {
                ResourceDataAccess resDA = (ResourceDataAccess) RMStorageFactory
                        .getDataAccess(ResourceDataAccess.class);
                NodeDataAccess nodeDA = (NodeDataAccess) RMStorageFactory.getDataAccess(NodeDataAccess.class);
                //Retrieve resource of RMNode
                Resource res = (Resource) resDA.findEntry(hopRMNode.getNodeId(), Resource.TOTAL_CAPABILITY,
                        Resource.RMNODE);

                NodeId nodeId = ConverterUtils.toNodeId(id);
                //Retrieve and Initialize NodeBase for RMNode
                org.apache.hadoop.net.Node node = null;
                if (hopRMNode.getNodeId() != null) {
                    Node hopNode = (Node) nodeDA.findById(hopRMNode.getNodeId());
                    node = new NodeBase(hopNode.getName(), hopNode.getLocation());
                    if (hopNode.getParent() != null) {
                        node.setParent(new NodeBase(hopNode.getParent()));
                    }
                    node.setLevel(hopNode.getLevel());
                }
                //Retrieve nextHeartbeat
                NextHeartbeatDataAccess nextHBDA = (NextHeartbeatDataAccess) RMStorageFactory
                        .getDataAccess(NextHeartbeatDataAccess.class);
                boolean nextHeartbeat = nextHBDA.findEntry(id);
                //Create Resource
                ResourceOption resourceOption = null;
                if (res != null) {
                    resourceOption = ResourceOption.newInstance(org.apache.hadoop.yarn.api.records.Resource
                            .newInstance(res.getMemory(), res.getVirtualCores()),
                            hopRMNode.getOvercommittimeout());
                }
                rmNode = new RMNodeImpl(nodeId, context, hopRMNode.getHostName(), hopRMNode.getCommandPort(),
                        hopRMNode.getHttpPort(), node, resourceOption, hopRMNode.getNodemanagerVersion(),
                        hopRMNode.getHealthReport(), hopRMNode.getLastHealthReportTime(), nextHeartbeat,
                        conf.getBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED,
                                YarnConfiguration.DEFAULT_HOPS_DISTRIBUTED_RT_ENABLED));

                ((RMNodeImpl) rmNode).setState(hopRMNode.getCurrentState());
                // *** Recover maps/lists of RMNode ***
                //Use a cache for retrieved ContainerStatus
                Map<String, ContainerStatus> hopContainerStatuses = new HashMap<String, ContainerStatus>();
                //1. Recover JustLaunchedContainers
                JustLaunchedContainersDataAccess jlcDA = (JustLaunchedContainersDataAccess) RMStorageFactory
                        .getDataAccess(JustLaunchedContainersDataAccess.class);
                ContainerStatusDataAccess containerStatusDA = (ContainerStatusDataAccess) RMStorageFactory
                        .getDataAccess(ContainerStatusDataAccess.class);
                List<JustLaunchedContainers> hopJlcList = jlcDA.findByRMNode(id);
                if (hopJlcList != null && !hopJlcList.isEmpty()) {
                    Map<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus> justLaunchedContainers = new HashMap<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus>();
                    for (JustLaunchedContainers hop : hopJlcList) {
                        //Create ContainerId
                        org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                                .toContainerId(hop.getContainerId());
                        //Find and create ContainerStatus
                        if (!hopContainerStatuses.containsKey(hop.getContainerId())) {
                            hopContainerStatuses.put(hop.getContainerId(),
                                    (ContainerStatus) containerStatusDA.findEntry(hop.getContainerId(), id));
                        }
                        org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                                .newInstance(cid,
                                        ContainerState.valueOf(
                                                hopContainerStatuses.get(hop.getContainerId()).getState()),
                                        hopContainerStatuses.get(hop.getContainerId()).getDiagnostics(),
                                        hopContainerStatuses.get(hop.getContainerId()).getExitstatus());
                        justLaunchedContainers.put(cid, conStatus);
                    }
                    ((RMNodeImpl) rmNode).setJustLaunchedContainers(justLaunchedContainers);
                }
                //2. Return ContainerIdToClean
                ContainerIdToCleanDataAccess cidToCleanDA = (ContainerIdToCleanDataAccess) RMStorageFactory
                        .getDataAccess(ContainerIdToCleanDataAccess.class);
                List<ContainerId> cidToCleanList = cidToCleanDA.findByRMNode(id);
                if (cidToCleanList != null && !cidToCleanList.isEmpty()) {
                    Set<org.apache.hadoop.yarn.api.records.ContainerId> containersToClean = new TreeSet<org.apache.hadoop.yarn.api.records.ContainerId>();
                    for (ContainerId hop : cidToCleanList) {
                        //Create ContainerId
                        containersToClean.add(ConverterUtils.toContainerId(hop.getContainerId()));
                    }
                    ((RMNodeImpl) rmNode).setContainersToClean(containersToClean);
                }
                //3. Finished Applications
                FinishedApplicationsDataAccess finishedAppsDA = (FinishedApplicationsDataAccess) RMStorageFactory
                        .getDataAccess(FinishedApplicationsDataAccess.class);
                List<FinishedApplications> hopFinishedAppsList = finishedAppsDA.findByRMNode(id);
                if (hopFinishedAppsList != null && !hopFinishedAppsList.isEmpty()) {
                    List<ApplicationId> finishedApps = new ArrayList<ApplicationId>();
                    for (FinishedApplications hop : hopFinishedAppsList) {
                        finishedApps.add(ConverterUtils.toApplicationId(hop.getApplicationId()));
                    }
                    ((RMNodeImpl) rmNode).setFinishedApplications(finishedApps);
                }

                //4. UpdadedContainerInfo
                UpdatedContainerInfoDataAccess uciDA = (UpdatedContainerInfoDataAccess) RMStorageFactory
                        .getDataAccess(UpdatedContainerInfoDataAccess.class);
                //Retrieve all UpdatedContainerInfo entries for this particular RMNode
                Map<Integer, List<UpdatedContainerInfo>> hopUpdatedContainerInfoMap = uciDA.findByRMNode(id);
                if (hopUpdatedContainerInfoMap != null && !hopUpdatedContainerInfoMap.isEmpty()) {
                    ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo>();
                    for (int uciId : hopUpdatedContainerInfoMap.keySet()) {
                        for (UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoMap.get(uciId)) {
                            List<org.apache.hadoop.yarn.api.records.ContainerStatus> newlyAllocated = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                            List<org.apache.hadoop.yarn.api.records.ContainerStatus> completed = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                            //Retrieve containerstatus entries for the particular updatedcontainerinfo
                            org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                                    .toContainerId(hopUCI.getContainerId());
                            if (!hopContainerStatuses.containsKey(hopUCI.getContainerId())) {
                                hopContainerStatuses.put(hopUCI.getContainerId(),
                                        (ContainerStatus) containerStatusDA.findEntry(hopUCI.getContainerId(),
                                                id));
                            }
                            org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                                    .newInstance(cid,
                                            ContainerState.valueOf(hopContainerStatuses
                                                    .get(hopUCI.getContainerId()).getState()),
                                            hopContainerStatuses.get(hopUCI.getContainerId()).getDiagnostics(),
                                            hopContainerStatuses.get(hopUCI.getContainerId()).getExitstatus());
                            //Check ContainerStatus state to add it to appropriate list
                            if (conStatus != null) {
                                if (conStatus.getState().toString()
                                        .equals(TablesDef.ContainerStatusTableDef.STATE_RUNNING)) {
                                    newlyAllocated.add(conStatus);
                                } else if (conStatus.getState().toString()
                                        .equals(TablesDef.ContainerStatusTableDef.STATE_COMPLETED)) {
                                    completed.add(conStatus);
                                }
                            }
                            org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci = new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo(
                                    newlyAllocated, completed, hopUCI.getUpdatedContainerInfoId());
                            updatedContainerInfoQueue.add(uci);
                            ((RMNodeImpl) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue);
                            //Update uci counter
                            ((RMNodeImpl) rmNode).setUpdatedContainerInfoId(hopRMNode.getUciId());
                        }
                    }
                }

                //5. Retrieve latestNodeHeartBeatResponse
                NodeHBResponseDataAccess hbDA = (NodeHBResponseDataAccess) RMStorageFactory
                        .getDataAccess(NodeHBResponseDataAccess.class);
                NodeHBResponse hopHB = (NodeHBResponse) hbDA.findById(id);
                if (hopHB != null) {
                    NodeHeartbeatResponse hb = new NodeHeartbeatResponsePBImpl(
                            YarnServerCommonServiceProtos.NodeHeartbeatResponseProto
                                    .parseFrom(hopHB.getResponse()));
                    ((RMNodeImpl) rmNode).setLatestNodeHBResponse(hb);
                }
            }
            connector.commit();
            return rmNode;
        }
    };
    return (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) getRMNodeHandler.handle();
}