Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:io.hops.metadata.util.RMUtilities.java

License:Apache License

/**
 * Recover inactive nodes map of RMContextImpl.
 *
 * @param rmContext//from w  w  w . j a v  a 2  s. c  o  m
 * @param state
 * @return
 * @throws java.lang.Exception
 */
//For testing TODO move to test
public static Map<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode> getRMContextInactiveNodes(
        final RMContext rmContext, final RMState state, final Configuration conf) throws Exception {
    LightWeightRequestHandler getRMContextInactiveNodesHandler = new LightWeightRequestHandler(
            YARNOperationType.TEST) {
        @Override
        public Object performTask() throws StorageException {
            connector.beginTransaction();
            connector.writeLock();
            ConcurrentMap<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode> inactiveNodes = new ConcurrentHashMap<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode>();
            //Retrieve rmctxnodes table entries
            RMContextInactiveNodesDataAccess rmctxInactiveNodesDA = (RMContextInactiveNodesDataAccess) RMStorageFactory
                    .getDataAccess(RMContextInactiveNodesDataAccess.class);
            ResourceDataAccess DA = (ResourceDataAccess) YarnAPIStorageFactory
                    .getDataAccess(ResourceDataAccess.class);
            RMNodeDataAccess rmDA = (RMNodeDataAccess) RMStorageFactory.getDataAccess(RMNodeDataAccess.class);
            List<RMContextInactiveNodes> hopRMContextInactiveNodes = rmctxInactiveNodesDA.findAll();
            if (hopRMContextInactiveNodes != null && !hopRMContextInactiveNodes.isEmpty()) {
                for (RMContextInactiveNodes key : hopRMContextInactiveNodes) {

                    NodeId nodeId = ConverterUtils.toNodeId(key.getRmnodeid());
                    //retrieve RMNode in order to create a new FiCaSchedulerNode
                    RMNode hopRMNode = (RMNode) rmDA.findByNodeId(key.getRmnodeid());
                    //Retrieve resource of RMNode
                    Resource res = (Resource) DA.findEntry(hopRMNode.getNodeId(), Resource.TOTAL_CAPABILITY,
                            Resource.RMNODE);
                    //Retrieve and Initialize NodeBase for RMNode
                    NodeDataAccess nodeDA = (NodeDataAccess) RMStorageFactory
                            .getDataAccess(NodeDataAccess.class);
                    //Retrieve and Initialize NodeBase for RMNode
                    org.apache.hadoop.net.Node node = null;
                    if (hopRMNode.getNodeId() != null) {
                        Node hopNode = (Node) nodeDA.findById(hopRMNode.getNodeId());
                        node = new NodeBase(hopNode.getName(), hopNode.getLocation());
                        if (hopNode.getParent() != null) {
                            node.setParent(new NodeBase(hopNode.getParent()));
                        }
                        node.setLevel(hopNode.getLevel());
                    }
                    //Retrieve nextHeartbeat
                    NextHeartbeatDataAccess nextHBDA = (NextHeartbeatDataAccess) RMStorageFactory
                            .getDataAccess(NextHeartbeatDataAccess.class);
                    boolean nextHeartbeat = nextHBDA.findEntry(key.getRmnodeid());
                    org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode rmNode = new RMNodeImpl(nodeId,
                            rmContext, hopRMNode.getHostName(), hopRMNode.getCommandPort(),
                            hopRMNode.getHttpPort(), node,
                            ResourceOption.newInstance(org.apache.hadoop.yarn.api.records.Resource.newInstance(
                                    res.getMemory(), res.getVirtualCores()), hopRMNode.getOvercommittimeout()),
                            hopRMNode.getNodemanagerVersion(), hopRMNode.getHealthReport(),
                            hopRMNode.getLastHealthReportTime(), nextHeartbeat,
                            conf.getBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED,
                                    YarnConfiguration.DEFAULT_HOPS_DISTRIBUTED_RT_ENABLED));
                    ((RMNodeImpl) rmNode).setState(hopRMNode.getCurrentState());
                    alreadyRecoveredRMContextInactiveNodes.put(rmNode.getNodeID().getHost(), rmNode);
                    inactiveNodes.put(rmNode.getNodeID().getHost(), rmNode);

                }
            }
            connector.commit();
            return inactiveNodes;
        }
    };
    try {
        if (alreadyRecoveredRMContextInactiveNodes.isEmpty()) {
            Map<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode> result = (Map<String, org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode>) getRMContextInactiveNodesHandler
                    .handle();
            for (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode node : result.values()) {
                node.recover(state);
            }
            return result;
        } else {
            return alreadyRecoveredRMContextInactiveNodes;
        }
    } catch (IOException ex) {
        LOG.error("HOP", ex);
    }
    return null;
}

From source file:io.hops.resolvingcache.Cache.java

License:Apache License

protected void setConfiguration(Configuration conf) throws IOException {
    isEnabled = conf.getBoolean(DFSConfigKeys.DFS_RESOLVING_CACHE_ENABLED,
            DFSConfigKeys.DFS_RESOLVING_CACHE_ENABLED_DEFAULT);

    if (isEnabled) {
        start();//from   w ww.j  a  v  a 2 s. c o  m
    }
}

From source file:io.hops.tensorflow.TimelineHandler.java

License:Apache License

public void startClient(final Configuration conf) throws YarnException, IOException, InterruptedException {
    try {//from w w w.j  av  a  2s.  c  o  m
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,
                        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) {
                    // Creating the Timeline Client
                    timelineClient = TimelineClient.createTimelineClient();
                    timelineClient.init(conf);
                    timelineClient.start();
                } else {
                    timelineClient = null;
                    LOG.warn("Timeline service is not enabled");
                }
                return null;
            }
        });
    } catch (UndeclaredThrowableException e) {
        throw new YarnException(e.getCause());
    }
}

From source file:io.hops.transaction.lock.LockFactory.java

License:Apache License

public void setConfiguration(Configuration conf) {
    Lock.enableSetPartitionKey(conf.getBoolean(DFSConfigKeys.DFS_SET_PARTITION_KEY_ENABLED,
            DFSConfigKeys.DFS_SET_PARTITION_KEY_ENABLED_DEFAULT));
    BaseINodeLock.setDefaultLockType(getPrecedingPathLockType(conf));
}

From source file:io.prestosql.plugin.hive.s3.PrestoS3FileSystem.java

License:Apache License

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    requireNonNull(uri, "uri is null");
    requireNonNull(conf, "conf is null");
    super.initialize(uri, conf);
    setConf(conf);/*w  ww  .j  a v a2  s. co  m*/

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR));

    HiveS3Config defaults = new HiveS3Config();
    this.stagingDirectory = new File(
            conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
    this.maxBackoffTime = Duration
            .valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
    this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration
            .valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
    this.multiPartUploadMinFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE,
            defaults.getS3MultipartMinFileSize().toBytes());
    this.multiPartUploadMinPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE,
            defaults.getS3MultipartMinPartSize().toBytes());
    this.isPathStyleAccess = conf.getBoolean(S3_PATH_STYLE_ACCESS, defaults.isS3PathStyleAccess());
    this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS,
            defaults.isS3UseInstanceCredentials());
    this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION,
            defaults.isPinS3ClientToCurrentRegion());
    verify((pinS3ClientToCurrentRegion && conf.get(S3_ENDPOINT) == null) || !pinS3ClientToCurrentRegion,
            "Invalid configuration: either endpoint can be set or S3 client can be pinned to the current region");
    this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());
    this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name()));
    this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId());
    this.s3AclType = PrestoS3AclType.valueOf(conf.get(S3_ACL_TYPE, defaults.getS3AclType().name()));
    String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());

    ClientConfiguration configuration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
            .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections)
            .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(S3_USER_AGENT_SUFFIX);

    this.credentialsProvider = createAwsCredentialsProvider(uri, conf);
    this.s3 = createAmazonS3Client(conf, configuration);
}

From source file:io.transwarp.flume.sink.HDFSCompressedDataStream.java

License:Apache License

@Override
public void open(String filePath, CompressionCodec codec, CompressionType cType) throws IOException {
    Configuration conf = new Configuration();
    Path dstPath = new Path(filePath);
    FileSystem hdfs = dstPath.getFileSystem(conf);
    if (useRawLocalFileSystem) {
        if (hdfs instanceof LocalFileSystem) {
            hdfs = ((LocalFileSystem) hdfs).getRaw();
        } else {/*from  w  ww .j  av  a 2  s  .c  om*/
            logger.warn("useRawLocalFileSystem is set to true but file system "
                    + "is not of type LocalFileSystem: " + hdfs.getClass().getName());
        }
    }
    boolean appending = false;
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
        fsOut = hdfs.append(dstPath);
        appending = true;
    } else {
        fsOut = hdfs.create(dstPath);
    }
    if (compressor == null) {
        compressor = CodecPool.getCompressor(codec, conf);
    }
    cmpOut = codec.createOutputStream(fsOut, compressor);
    serializer = EventSerializerFactory.getInstance(serializerType, serializerContext, cmpOut);
    if (appending && !serializer.supportsReopen()) {
        cmpOut.close();
        serializer = null;
        throw new IOException("serializer (" + serializerType + ") does not support append");
    }

    registerCurrentStream(fsOut, hdfs, dstPath);

    if (appending) {
        serializer.afterReopen();
    } else {
        serializer.afterCreate();
    }
    isFinished = false;
}

From source file:ir.co.bayan.simorq.zal.extractor.nutch.OPICScoringFilter.java

License:Apache License

@Override
public void setConf(Configuration conf) {
    this.conf = conf;
    scorePower = conf.getFloat("indexer.score.power", 0.5f);
    internalScoreFactor = conf.getFloat("db.score.link.internal", 1.0f);
    externalScoreFactor = conf.getFloat("db.score.link.external", 1.0f);
    countFiltered = conf.getBoolean("db.score.count.filtered", false);
}

From source file:it.crs4.pydoop.mapreduce.pipes.PipesMapper.java

License:Apache License

@Override
public void run(Context context) throws IOException, InterruptedException {
    setup(context);/*from   w  ww. j a v a 2  s. c  om*/
    Configuration conf = context.getConfiguration();
    InputSplit split = context.getInputSplit();
    // FIXME: do we really need to be so convoluted?
    InputFormat<K1, V1> inputFormat;
    try {
        inputFormat = (InputFormat<K1, V1>) ReflectionUtils.newInstance(context.getInputFormatClass(), conf);
    } catch (ClassNotFoundException ce) {
        throw new RuntimeException("class not found", ce);
    }
    RecordReader<K1, V1> input = inputFormat.createRecordReader(split, context);
    input.initialize(split, context);
    boolean isJavaInput = Submitter.getIsJavaRecordReader(conf);
    try {
        // FIXME: what happens for a java mapper and no java record reader?
        DummyRecordReader fakeInput = (!isJavaInput && !Submitter.getIsJavaMapper(conf))
                ? (DummyRecordReader) input
                : null;
        application = new Application<K1, V1, K2, V2>(context, fakeInput);
    } catch (InterruptedException ie) {
        throw new RuntimeException("interrupted", ie);
    }
    DownwardProtocol<K1, V1> downlink = application.getDownlink();
    // FIXME: InputSplit is not Writable, but still, this is ugly...
    downlink.runMap((FileSplit) context.getInputSplit(), context.getNumReduceTasks(), isJavaInput);
    boolean skipping = conf.getBoolean(context.SKIP_RECORDS, false);
    boolean sent_input_types = false;
    try {
        if (isJavaInput) {
            // FIXME
            while (input.nextKeyValue()) {
                if (!sent_input_types) {
                    sent_input_types = true;
                    NullWritable n = NullWritable.get();
                    String kclass_name = n.getClass().getName();
                    String vclass_name = n.getClass().getName();
                    if (input.getCurrentKey() != null) {
                        kclass_name = input.getCurrentKey().getClass().getName();
                    }
                    if (input.getCurrentValue() != null) {
                        vclass_name = input.getCurrentValue().getClass().getName();
                    }
                    downlink.setInputTypes(kclass_name, vclass_name);
                }
                downlink.mapItem(input.getCurrentKey(), input.getCurrentValue());
                if (skipping) {
                    //flush the streams on every record input if running in skip mode
                    //so that we don't buffer other records surrounding a bad record.
                    downlink.flush();
                }
            }
            downlink.endOfInput();
        }
        application.waitForFinish();
    } catch (Throwable t) {
        application.abort(t);
    } finally {
        cleanup(context);
    }
}

From source file:it.crs4.pydoop.mapreduce.pipes.CommandLineParser.java

License:Apache License

/**
 * Check whether the job is using a Java RecordReader
 * @param conf the configuration to check
 * @return is it a Java RecordReader?/*from   w ww  .  j  a  v a2s.c o  m*/
 */
public static boolean getIsJavaRecordReader(Configuration conf) {
    return conf.getBoolean(Submitter.IS_JAVA_RR, false);
}

From source file:it.crs4.pydoop.mapreduce.pipes.CommandLineParser.java

License:Apache License

/**
 * Check whether the job is using a Java Mapper.
 * @param conf the configuration to check
 * @return is it a Java Mapper?//ww  w .java  2  s.  c  o  m
 */
public static boolean getIsJavaMapper(Configuration conf) {
    return conf.getBoolean(Submitter.IS_JAVA_MAP, false);
}