List of usage examples for org.apache.hadoop.conf Configuration getLong
public long getLong(String name, long defaultValue)
name
property as a long
. From source file:org.apache.rya.indexing.accumulo.ConfigUtils.java
License:Apache License
public static long getWriterMaxMemory(final Configuration conf) { return conf.getLong(CLOUDBASE_WRITER_MAX_MEMORY, WRITER_MAX_MEMORY); }
From source file:org.apache.rya.mongodb.batch.MongoDbBatchWriterUtils.java
License:Apache License
/** * The time to wait in milliseconds to flush all statements out that are * queued for insertion if the queue has not filled up to its capacity. * @param conf the {@link Configuration} to check. * @return the configured value or the default value. *//*from w w w.java 2 s . c o m*/ public static long getConfigBatchFlushTimeMs(final Configuration conf) { return conf.getLong(BATCH_FLUSH_TIME_MS_TAG, MongoDbBatchWriterConfig.DEFAULT_BATCH_FLUSH_TIME_MS); }
From source file:org.apache.rya.prospector.mr.ProspectorReducer.java
License:Apache License
@Override public void setup(Context context) throws IOException, InterruptedException { super.setup(context); final Configuration conf = context.getConfiguration(); final long now = conf.getLong("DATE", System.currentTimeMillis()); truncatedDate = DateUtils.truncate(new Date(now), Calendar.MINUTE); this.plans = ProspectorUtils.planMap(manager.getPlans()); }
From source file:org.apache.sentry.api.generic.thrift.SentryGenericServiceClientDefaultImpl.java
License:Apache License
/** * Initialize client with the given configuration, using specified transport pool * implementation for obtaining transports. * @param conf Sentry Configuration/* www . j a v a 2 s .com*/ * @param transportPool source of connected transports */ SentryGenericServiceClientDefaultImpl(Configuration conf, SentryTransportPool transportPool) { //TODO(kalyan) need to find appropriate place to add it // if (kerberos) { // // since the client uses hadoop-auth, we need to set kerberos in // // hadoop-auth if we plan to use kerberos // conf.set(HADOOP_SECURITY_AUTHENTICATION, SentryConstants.KERBEROS_MoODE); // } maxMessageSize = conf.getLong(ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE, ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE_DEFAULT); this.transportPool = transportPool; }
From source file:org.apache.sentry.api.service.thrift.SentryPolicyServiceClientDefaultImpl.java
License:Apache License
/** * Initialize the sentry configurations. *///from w w w .j a v a 2 s . c o m public SentryPolicyServiceClientDefaultImpl(Configuration conf, SentryTransportPool transportPool) throws IOException { maxMessageSize = conf.getLong(ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE, ClientConfig.SENTRY_POLICY_CLIENT_THRIFT_MAX_MESSAGE_SIZE_DEFAULT); this.transportPool = transportPool; }
From source file:org.apache.sentry.core.common.transport.SentryHDFSClientTransportConfig.java
License:Apache License
@Override public long getSentryRpcConnRetryDelayInMs(Configuration conf) { return conf.getLong(SERVER_RPC_CONN_RETRY_DELAY_MS, SERVER_RPC_CONN_RETRY_DELAY_MS_DEFAULT); }
From source file:org.apache.sentry.core.common.transport.SentryHDFSClientTransportConfig.java
License:Apache License
@Override public long getMinEvictableTimeSec(Configuration conf) { return conf.getLong(SENTRY_POOL_MIN_EVICTION_TIME_SEC, SENTRY_POOL_MIN_EVICTION_TIME_SEC_DEFAULT); }
From source file:org.apache.sentry.core.common.transport.SentryHDFSClientTransportConfig.java
License:Apache License
@Override public long getTimeBetweenEvictionRunsSec(Configuration conf) { return conf.getLong(SENTRY_POOL_EVICTION_INTERVAL_SEC, SENTRY_POOL_EVICTION_INTERVAL_SEC_DEFAULT); }
From source file:org.apache.sentry.hdfs.SentryAuthorizationProvider.java
License:Apache License
@Override public synchronized void start() { if (started) { throw new IllegalStateException("Provider already started"); }//from w w w . jav a 2 s . c o m started = true; try { if (!conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false)) { throw new RuntimeException("HDFS ACLs must be enabled"); } defaultAuthzProvider = new DefaultAuthorizationProvider(); defaultAuthzProvider.start(); // Configuration is read from hdfs-sentry.xml and NN configuration, in // that order of precedence. Configuration conf = new Configuration(this.conf); conf.addResource(SentryAuthorizationConstants.CONFIG_FILE); user = conf.get(SentryAuthorizationConstants.HDFS_USER_KEY, SentryAuthorizationConstants.HDFS_USER_DEFAULT); group = conf.get(SentryAuthorizationConstants.HDFS_GROUP_KEY, SentryAuthorizationConstants.HDFS_GROUP_DEFAULT); permission = FsPermission .createImmutable((short) conf.getLong(SentryAuthorizationConstants.HDFS_PERMISSION_KEY, SentryAuthorizationConstants.HDFS_PERMISSION_DEFAULT)); originalAuthzAsAcl = conf.getBoolean(SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_KEY, SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_DEFAULT); LOG.info("Starting"); LOG.info("Config: hdfs-user[{}] hdfs-group[{}] hdfs-permission[{}] " + "include-hdfs-authz-as-acl[{}]", new Object[] { user, group, permission, originalAuthzAsAcl }); if (authzInfo == null) { authzInfo = new SentryAuthorizationInfo(conf); } authzInfo.start(); } catch (Exception ex) { throw new RuntimeException(ex); } }
From source file:org.apache.sentry.hdfs.SentryHDFSServiceClientDefaultImpl.java
License:Apache License
SentryHDFSServiceClientDefaultImpl(Configuration conf, SentryTransportPool transportPool) { maxMessageSize = conf.getLong(ClientConfig.SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE, ClientConfig.SENTRY_HDFS_THRIFT_MAX_MESSAGE_SIZE_DEFAULT); useCompactTransport = conf.getBoolean(ClientConfig.USE_COMPACT_TRANSPORT, ClientConfig.USE_COMPACT_TRANSPORT_DEFAULT); this.transportPool = transportPool; }