Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:com.marklogic.mapreduce.ContentWriter.java

License:Apache License

public ContentWriter(Configuration conf, Map<String, ContentSource> forestSourceMap, boolean fastLoad,
        AssignmentManager am) {//from   w  w  w  .ja  va 2  s  .  c  o  m
    super(conf, null);

    this.fastLoad = fastLoad;

    this.forestSourceMap = forestSourceMap;

    this.am = am;

    permsMap = new HashMap<String, ContentPermission[]>();

    int srcMapSize = forestSourceMap.size();
    forestIds = new String[srcMapSize];
    // key order in key set is guaranteed by LinkedHashMap,
    // i.e., the order keys are inserted
    forestIds = forestSourceMap.keySet().toArray(forestIds);
    hostId = (int) (Math.random() * srcMapSize);

    // arraySize is the number of forests in fast load mode; 1 otherwise.
    int arraySize = fastLoad ? srcMapSize : 1;
    sessions = new Session[arraySize];
    stmtCounts = new int[arraySize];

    outputDir = conf.get(OUTPUT_DIRECTORY);
    batchSize = conf.getInt(BATCH_SIZE, DEFAULT_BATCH_SIZE);

    pendingUris = new HashMap[arraySize];
    for (int i = 0; i < arraySize; i++) {
        pendingUris[i] = new HashMap<Content, DocumentURI>();
    }

    if (fastLoad && (am.getPolicy().getPolicyKind() == AssignmentPolicy.Kind.STATISTICAL
            || am.getPolicy().getPolicyKind() == AssignmentPolicy.Kind.RANGE)) {
        countBased = true;
        if (batchSize > 1) {
            forestContents = new Content[1][batchSize];
            counts = new int[1];
        }
        sfId = -1;
    } else {
        if (batchSize > 1) {
            forestContents = new Content[arraySize][batchSize];
            counts = new int[arraySize];
        }
        sfId = 0;
    }

    String[] perms = conf.getStrings(OUTPUT_PERMISSION);
    List<ContentPermission> permissions = null;
    if (perms != null && perms.length > 0) {
        int i = 0;
        while (i + 1 < perms.length) {
            String roleName = perms[i++];
            if (roleName == null || roleName.isEmpty()) {
                LOG.error("Illegal role name: " + roleName);
                continue;
            }
            String perm = perms[i].trim();
            ContentCapability capability = null;
            if (perm.equalsIgnoreCase(ContentCapability.READ.toString())) {
                capability = ContentCapability.READ;
            } else if (perm.equalsIgnoreCase(ContentCapability.EXECUTE.toString())) {
                capability = ContentCapability.EXECUTE;
            } else if (perm.equalsIgnoreCase(ContentCapability.INSERT.toString())) {
                capability = ContentCapability.INSERT;
            } else if (perm.equalsIgnoreCase(ContentCapability.UPDATE.toString())) {
                capability = ContentCapability.UPDATE;
            } else {
                LOG.error("Illegal permission: " + perm);
            }
            if (capability != null) {
                if (permissions == null) {
                    permissions = new ArrayList<ContentPermission>();
                }
                permissions.add(new ContentPermission(capability, roleName));
            }
            i++;
        }
    }

    options = new ContentCreateOptions();
    String[] collections = conf.getStrings(OUTPUT_COLLECTION);
    if (collections != null) {
        for (int i = 0; i < collections.length; i++) {
            collections[i] = collections[i].trim();
        }
        options.setCollections(collections);
    }

    options.setQuality(conf.getInt(OUTPUT_QUALITY, 0));
    if (permissions != null) {
        options.setPermissions(permissions.toArray(new ContentPermission[permissions.size()]));
    }
    String contentTypeStr = conf.get(CONTENT_TYPE, DEFAULT_CONTENT_TYPE);
    ContentType contentType = ContentType.valueOf(contentTypeStr);
    if (contentType == ContentType.UNKNOWN) {
        formatNeeded = true;
    } else {
        options.setFormat(contentType.getDocumentFormat());
    }

    options.setLanguage(conf.get(OUTPUT_CONTENT_LANGUAGE));
    String repairLevel = conf.get(OUTPUT_XML_REPAIR_LEVEL, DEFAULT_OUTPUT_XML_REPAIR_LEVEL).toLowerCase();
    options.setNamespace(conf.get(OUTPUT_CONTENT_NAMESPACE));
    if (DocumentRepairLevel.DEFAULT.toString().equals(repairLevel)) {
        options.setRepairLevel(DocumentRepairLevel.DEFAULT);
    } else if (DocumentRepairLevel.NONE.toString().equals(repairLevel)) {
        options.setRepairLevel(DocumentRepairLevel.NONE);
    } else if (DocumentRepairLevel.FULL.toString().equals(repairLevel)) {
        options.setRepairLevel(DocumentRepairLevel.FULL);
    }

    streaming = conf.getBoolean(OUTPUT_STREAMING, false);
    tolerateErrors = conf.getBoolean(OUTPUT_TOLERATE_ERRORS, false);

    String encoding = conf.get(MarkLogicConstants.OUTPUT_CONTENT_ENCODING);
    if (encoding != null) {
        options.setEncoding(encoding);
    }

    options.setTemporalCollection(conf.get(TEMPORAL_COLLECTION));

    needCommit = txnSize > 1 || (batchSize > 1 && tolerateErrors);
    if (needCommit) {
        commitUris = new ArrayList[arraySize];
        for (int i = 0; i < arraySize; i++) {
            commitUris[i] = new ArrayList<DocumentURI>(txnSize * batchSize);
        }
    }
}

From source file:com.marklogic.mapreduce.PropertyOpType.java

License:Apache License

public String getQuery(Configuration conf) {
    boolean alwaysCreate = conf.getBoolean(MarkLogicConstants.OUTPUT_PROPERTY_ALWAYS_CREATE, false);

    StringBuilder buf = new StringBuilder();
    buf.append("xquery version \"1.0-ml\"; \n");
    buf.append("declare variable $");
    buf.append(PropertyWriter.DOCURI_VARIABLE_NAME);
    buf.append(" as xs:string external;\n");
    buf.append("declare variable $");
    buf.append(PropertyWriter.NODE_VARIABLE_NAME);
    buf.append(" as element() external;\n");
    if (!alwaysCreate) {
        buf.append("let $exist := fn:exists(fn:doc($");
        buf.append(PropertyWriter.DOCURI_VARIABLE_NAME);
        buf.append("))\nreturn if ($exist) then \n");
    }//ww  w  .  j  a  v  a  2  s.  c  o  m
    buf.append(getFunctionName());
    buf.append("($");
    buf.append(PropertyWriter.DOCURI_VARIABLE_NAME);
    buf.append(", $");
    buf.append(PropertyWriter.NODE_VARIABLE_NAME);
    if (!alwaysCreate) {
        buf.append(") else ()");
    } else {
        buf.append(")");
    }

    return buf.toString();
}

From source file:com.marklogic.mapreduce.utilities.InternalUtilities.java

License:Apache License

/**
 * Get input content source./*from  w w  w .j  ava2s  . c o m*/
 *
 * @param conf job configuration
 * @param host host to connect to
 * @return content source
 * @throws IOException 
 * @throws XccConfigException
 */
public static ContentSource getInputContentSource(Configuration conf, String host)
        throws XccConfigException, IOException {
    String user = conf.get(INPUT_USERNAME, "");
    String password = conf.get(INPUT_PASSWORD, "");
    String port = conf.get(INPUT_PORT, "8000");
    String db = conf.get(INPUT_DATABASE_NAME);
    int portInt = Integer.parseInt(port);
    boolean useSsl = conf.getBoolean(INPUT_USE_SSL, false);
    if (useSsl) {
        Class<? extends SslConfigOptions> sslOptionClass = conf.getClass(INPUT_SSL_OPTIONS_CLASS, null,
                SslConfigOptions.class);
        if (sslOptionClass != null) {
            SslConfigOptions sslOptions = (SslConfigOptions) ReflectionUtils.newInstance(sslOptionClass, conf);

            // construct content source
            return getSecureContentSource(host, portInt, user, password, db, sslOptions);
        }
    }
    return ContentSourceFactory.newContentSource(host, portInt, user, password, db);
}

From source file:com.marklogic.mapreduce.utilities.InternalUtilities.java

License:Apache License

/**
 * Get output content source./*from w w w . ja  v a  2  s .  c  om*/
 *
 * @param conf job configuration
 * @param hostName host name
 * @return content source
 * @throws IOException 
 * @throws XccConfigException 
 * @throws IOException 
 */
public static ContentSource getOutputContentSource(Configuration conf, String hostName)
        throws XccConfigException, IOException {
    String user = conf.get(OUTPUT_USERNAME, "");
    String password = conf.get(OUTPUT_PASSWORD, "");
    String port = conf.get(OUTPUT_PORT, "8000");
    String db = conf.get(OUTPUT_DATABASE_NAME);
    int portInt = Integer.parseInt(port);
    boolean useSsl = conf.getBoolean(OUTPUT_USE_SSL, false);
    if (useSsl) {
        Class<? extends SslConfigOptions> sslOptionClass = conf.getClass(OUTPUT_SSL_OPTIONS_CLASS, null,
                SslConfigOptions.class);
        if (sslOptionClass != null) {
            SslConfigOptions sslOptions = (SslConfigOptions) ReflectionUtils.newInstance(sslOptionClass, conf);

            // construct content source
            return getSecureContentSource(hostName, portInt, user, password, db, sslOptions);
        }
    }
    return ContentSourceFactory.newContentSource(hostName, portInt, user, password, db);
}

From source file:com.mellanox.r4h.DFSClient.java

License:Apache License

/**
 * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
 * If HA is enabled and a positive value is set for {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
 * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} as its RetryInvocationHandler. Otherwise one of nameNodeUri or
 * rpcNamenode/*from ww w. j a  va  2 s.  c  om*/
 * must be null.
 */
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats)
        throws IOException {
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
    traceSampler = new SamplerBuilder(TraceUtils.wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf))
            .build();
    // Copy only the required DFSClient configuration
    this.dfsClientConf = new DFSClientConfBridge2_7(conf);
    if (this.dfsClientConf.isUseLegacyBlockReaderLocal()) {
        LOG.debug("Using legacy short-circuit local reads.");
    }
    this.conf = conf;
    this.stats = stats;
    this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
    this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);

    this.ugi = UserGroupInformation.getCurrentUser();

    this.authority = nameNodeUri == null ? "null" : nameNodeUri.getAuthority();
    this.clientName = "DFSClient_" + dfsClientConf.getTaskId() + "_" + DFSUtil.getRandom().nextInt() + "_"
            + Thread.currentThread().getId();
    provider = DFSUtil.createKeyProvider(conf);
    if (LOG.isDebugEnabled()) {
        if (provider == null) {
            LOG.debug("No KeyProvider found.");
        } else {
            LOG.debug("Found KeyProvider: " + provider.toString());
        }
    }
    int numResponseToDrop = conf.getInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
            DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
    NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
    AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
    if (numResponseToDrop > 0) {
        // This case is used for testing.
        LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to "
                + numResponseToDrop + ", this hacked client will proactively drop responses");
        proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, nameNodeUri, ClientProtocol.class,
                numResponseToDrop, nnFallbackToSimpleAuth);
    }

    if (proxyInfo != null) {
        this.dtService = proxyInfo.getDelegationTokenService();
        this.namenode = proxyInfo.getProxy();
    } else if (rpcNamenode != null) {
        // This case is used for testing.
        Preconditions.checkArgument(nameNodeUri == null);
        this.namenode = rpcNamenode;
        dtService = null;
    } else {
        Preconditions.checkArgument(nameNodeUri != null, "null URI");
        proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class,
                nnFallbackToSimpleAuth);
        this.dtService = proxyInfo.getDelegationTokenService();
        this.namenode = proxyInfo.getProxy();
    }

    String localInterfaces[] = conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
    localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
    if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
        LOG.debug("Using local interfaces [" + Joiner.on(',').join(localInterfaces) + "] with addresses ["
                + Joiner.on(',').join(localInterfaceAddrs) + "]");
    }

    Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? null
            : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false);
    Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? null
            : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0);
    Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ? null
            : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false);
    this.defaultReadCachingStrategy = new CachingStrategy(readDropBehind, readahead);
    this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead);
    this.clientContext = ClientContext.get(conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
            dfsClientConf);
    this.hedgedReadThresholdMillis = conf.getLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
            DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS);
    int numThreads = conf.getInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
            DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE);
    if (numThreads > 0) {
        this.initThreadsNumForHedgedReads(numThreads);
    }
    this.saslClient = new SaslDataTransferClient(conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
            TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
}

From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java

License:Apache License

public static boolean isJobVerbose(final Configuration conf) {
    return conf.getBoolean(JOB_VERBOSE, false);
}

From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java

License:Apache License

public static boolean isJobBackground(final Configuration conf) {
    return conf.getBoolean(JOB_BACKGROUND, false);
}

From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java

License:Apache License

public static boolean getLazyBSON(final Configuration conf) {
    return conf.getBoolean(INPUT_LAZY_BSON, false);
}

From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java

License:Apache License

/**
 * if TRUE, Splits will be queried using $lt/$gt instead of $max and $min. This allows the database's query optimizer to choose the best
 * index, instead of being forced to use the one in the $max/$min keys. This will only work if the key used for splitting is *not* a
 * compound key. Make sure that all values under the splitting key are of the same type, or this will cause incomplete results.
 *///from w  ww .  ja  v  a  2s. c o  m
public static boolean isRangeQueryEnabled(final Configuration conf) {
    return conf.getBoolean(SPLITS_USE_RANGEQUERY, false);
}

From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java

License:Apache License

/**
 * if TRUE, Splits will be read by connecting to the individual shard servers, Only use this ( issue has to do with chunks moving /
 * relocating during balancing phases)//from   ww w  .  java2s .  co  m
 */
public static boolean canReadSplitsFromShards(final Configuration conf) {
    return conf.getBoolean(SPLITS_USE_SHARDS, false);
}