Example usage for org.apache.hadoop.conf Configuration getLong

List of usage examples for org.apache.hadoop.conf Configuration getLong

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getLong.

Prototype

public long getLong(String name, long defaultValue) 

Source Link

Document

Get the value of the name property as a long.

Usage

From source file:org.apache.sentry.hdfs.SentryINodeAttributesProvider.java

License:Apache License

@Override
public void start() {
    if (started) {
        throw new IllegalStateException("Provider already started");
    }/*  ww  w.  j  av a 2  s  .  c  o m*/
    started = true;
    try {
        if (!conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false)) {
            throw new RuntimeException("HDFS ACLs must be enabled");
        }
        Configuration conf = new Configuration(this.conf);
        conf.addResource(SentryAuthorizationConstants.CONFIG_FILE, true);
        user = conf.get(SentryAuthorizationConstants.HDFS_USER_KEY,
                SentryAuthorizationConstants.HDFS_USER_DEFAULT);
        group = conf.get(SentryAuthorizationConstants.HDFS_GROUP_KEY,
                SentryAuthorizationConstants.HDFS_GROUP_DEFAULT);
        permission = FsPermission
                .createImmutable((short) conf.getLong(SentryAuthorizationConstants.HDFS_PERMISSION_KEY,
                        SentryAuthorizationConstants.HDFS_PERMISSION_DEFAULT));
        originalAuthzAsAcl = conf.getBoolean(SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_KEY,
                SentryAuthorizationConstants.INCLUDE_HDFS_AUTHZ_AS_ACL_DEFAULT);

        LOG.info("Starting");
        LOG.info("Config: hdfs-user[{}] hdfs-group[{}] hdfs-permission[{}] " + "include-hdfs-authz-as-acl[{}]",
                new Object[] { user, group, permission, originalAuthzAsAcl });

        if (authzInfo == null) {
            authzInfo = new SentryAuthorizationInfo(conf);
        }
        authzInfo.start();
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}

From source file:org.apache.sentry.provider.db.generic.UpdatableCache.java

License:Apache License

UpdatableCache(Configuration conf, String componentType, String serviceName,
        TSentryPrivilegeConverter tSentryPrivilegeConverter) {
    this.conf = conf;
    this.componentType = componentType;
    this.serviceName = serviceName;
    this.tSentryPrivilegeConverter = tSentryPrivilegeConverter;

    // check caching configuration
    this.cacheTtlNs = TimeUnit.MILLISECONDS.toNanos(conf.getLong(ApiConstants.ClientConfig.CACHE_TTL_MS,
            ApiConstants.ClientConfig.CACHING_TTL_MS_DEFAULT));
    this.allowedUpdateFailuresCount = conf.getInt(
            ApiConstants.ClientConfig.CACHE_UPDATE_FAILURES_BEFORE_PRIV_REVOKE,
            ApiConstants.ClientConfig.CACHE_UPDATE_FAILURES_BEFORE_PRIV_REVOKE_DEFAULT);
}

From source file:org.apache.sentry.service.thrift.SentryService.java

License:Apache License

public SentryService(Configuration conf) throws Exception {
    this.conf = conf;
    int port = conf.getInt(ServerConfig.RPC_PORT, ServerConfig.RPC_PORT_DEFAULT);
    if (port == 0) {
        port = findFreePort();/*from  w  ww.  j  ava 2 s. c o m*/
        conf.setInt(ServerConfig.RPC_PORT, port);
    }
    this.address = NetUtils
            .createSocketAddr(conf.get(ServerConfig.RPC_ADDRESS, ServerConfig.RPC_ADDRESS_DEFAULT), port);
    LOGGER.info("Configured on address {}", address);
    kerberos = ServerConfig.SECURITY_MODE_KERBEROS
            .equalsIgnoreCase(conf.get(ServerConfig.SECURITY_MODE, ServerConfig.SECURITY_MODE_KERBEROS).trim());
    maxThreads = conf.getInt(ServerConfig.RPC_MAX_THREADS, ServerConfig.RPC_MAX_THREADS_DEFAULT);
    minThreads = conf.getInt(ServerConfig.RPC_MIN_THREADS, ServerConfig.RPC_MIN_THREADS_DEFAULT);
    maxMessageSize = conf.getLong(ServerConfig.SENTRY_POLICY_SERVER_THRIFT_MAX_MESSAGE_SIZE,
            ServerConfig.SENTRY_POLICY_SERVER_THRIFT_MAX_MESSAGE_SIZE_DEFAULT);
    if (kerberos) {
        // Use Hadoop libraries to translate the _HOST placeholder with actual hostname
        try {
            String rawPrincipal = Preconditions.checkNotNull(conf.get(ServerConfig.PRINCIPAL),
                    ServerConfig.PRINCIPAL + " is required");
            principal = SecurityUtil.getServerPrincipal(rawPrincipal, address.getAddress());
        } catch (IOException io) {
            throw new RuntimeException("Can't translate kerberos principal'", io);
        }
        LOGGER.info("Using kerberos principal: {}", principal);

        principalParts = SaslRpcServer.splitKerberosName(principal);
        Preconditions.checkArgument(principalParts.length == 3,
                "Kerberos principal should have 3 parts: " + principal);
        keytab = Preconditions.checkNotNull(conf.get(ServerConfig.KEY_TAB),
                ServerConfig.KEY_TAB + " is required");
        File keytabFile = new File(keytab);
        Preconditions.checkState(keytabFile.isFile() && keytabFile.canRead(),
                "Keytab %s does not exist or is not readable.", keytab);
    } else {
        principal = null;
        principalParts = null;
        keytab = null;
    }
    ThreadFactory sentryServiceThreadFactory = new ThreadFactoryBuilder()
            .setNameFormat(SENTRY_SERVICE_THREAD_NAME).build();
    serviceExecutor = Executors.newSingleThreadExecutor(sentryServiceThreadFactory);
    this.sentryStore = getSentryStore(conf);
    sentryStore.setPersistUpdateDeltas(SentryServiceUtil.isHDFSSyncEnabled(conf));
    this.leaderMonitor = LeaderStatusMonitor.getLeaderStatusMonitor(conf);

    status = Status.NOT_STARTED;

    // Enable signal handler for HA leader/follower status if configured
    String sigName = conf.get(ServerConfig.SERVER_HA_STANDBY_SIG);
    if ((sigName != null) && !sigName.isEmpty()) {
        LOGGER.info("Registering signal handler {} for HA", sigName);
        try {
            registerSigListener(sigName, this);
        } catch (Exception e) {
            LOGGER.error("Failed to register signal", e);
        }
    }
}

From source file:org.apache.sentry.service.thrift.SentryService.java

License:Apache License

private void startHMSFollower(Configuration conf) throws Exception {
    boolean syncPolicyStore = SentryServiceUtil.isSyncPolicyStoreEnabled(conf);

    if ((!SentryServiceUtil.isHDFSSyncEnabled(conf)) && (!syncPolicyStore)) {
        LOGGER.info("HMS follower is not started because HDFS sync is disabled and perm sync is disabled");
        return;/*from  w  w w.  j  av a 2 s .c  om*/
    }

    String metastoreURI = SentryServiceUtil.getHiveMetastoreURI();
    if (metastoreURI == null) {
        LOGGER.info("Metastore uri is not configured. Do not start HMSFollower");
        return;
    }

    LOGGER.info("Starting HMSFollower to HMS {}", metastoreURI);

    Preconditions.checkState(hmsFollower == null);
    Preconditions.checkState(hmsFollowerExecutor == null);
    Preconditions.checkState(hiveConnectionFactory == null);

    hiveConnectionFactory = new HiveSimpleConnectionFactory(conf, new HiveConf());
    hiveConnectionFactory.init();
    hmsFollower = new HMSFollower(conf, sentryStore, leaderMonitor, hiveConnectionFactory);
    long initDelay = conf.getLong(ServerConfig.SENTRY_HMSFOLLOWER_INIT_DELAY_MILLS,
            ServerConfig.SENTRY_HMSFOLLOWER_INIT_DELAY_MILLS_DEFAULT);
    long period = conf.getLong(ServerConfig.SENTRY_HMSFOLLOWER_INTERVAL_MILLS,
            ServerConfig.SENTRY_HMSFOLLOWER_INTERVAL_MILLS_DEFAULT);
    try {
        ThreadFactory hmsFollowerThreadFactory = new ThreadFactoryBuilder()
                .setNameFormat(HMSFOLLOWER_THREAD_NAME).build();
        hmsFollowerExecutor = Executors.newScheduledThreadPool(1, hmsFollowerThreadFactory);
        hmsFollowerExecutor.scheduleAtFixedRate(hmsFollower, initDelay, period, TimeUnit.MILLISECONDS);
    } catch (IllegalArgumentException e) {
        LOGGER.error(
                String.format("Could not start HMSFollower due to illegal argument. period is %s ms", period),
                e);
        throw e;
    }
}

From source file:org.apache.sentry.service.thrift.SentryService.java

License:Apache License

private void stopHMSFollower(Configuration conf) {
    if ((hmsFollowerExecutor == null) || (hmsFollower == null)) {
        Preconditions.checkState(hmsFollower == null);
        Preconditions.checkState(hmsFollowerExecutor == null);

        LOGGER.debug(/* w  ww .ja v  a  2 s  .  c  o m*/
                "Skip shuting down hmsFollowerExecutor and closing hmsFollower because they are not created");
        return;
    }

    Preconditions.checkNotNull(hmsFollowerExecutor);
    Preconditions.checkNotNull(hmsFollower);
    Preconditions.checkNotNull(hiveConnectionFactory);

    // use follower scheduling interval as timeout for shutting down its executor as
    // such scheduling interval should be an upper bound of how long the task normally takes to finish
    long timeoutValue = conf.getLong(ServerConfig.SENTRY_HMSFOLLOWER_INTERVAL_MILLS,
            ServerConfig.SENTRY_HMSFOLLOWER_INTERVAL_MILLS_DEFAULT);
    try {
        SentryServiceUtil.shutdownAndAwaitTermination(hmsFollowerExecutor, "hmsFollowerExecutor", timeoutValue,
                TimeUnit.MILLISECONDS, LOGGER);
    } finally {
        try {
            hiveConnectionFactory.close();
        } catch (Exception e) {
            LOGGER.error("Can't close HiveConnectionFactory", e);
        }
        hmsFollowerExecutor = null;
        hiveConnectionFactory = null;
        try {
            // close connections
            hmsFollower.close();
        } catch (Exception ex) {
            LOGGER.error("HMSFollower.close() failed", ex);
        } finally {
            hmsFollower = null;
        }
    }
}

From source file:org.apache.sentry.service.thrift.SentryService.java

License:Apache License

private void startSentryStoreCleaner(Configuration conf) {
    Preconditions.checkState(sentryStoreCleanService == null);

    // If SENTRY_STORE_CLEAN_PERIOD_SECONDS is set to positive, the background SentryStore cleaning
    // thread is enabled. Currently, it only purges the delta changes {@link MSentryChange} in
    // the sentry store.
    long storeCleanPeriodSecs = conf.getLong(ServerConfig.SENTRY_STORE_CLEAN_PERIOD_SECONDS,
            ServerConfig.SENTRY_STORE_CLEAN_PERIOD_SECONDS_DEFAULT);
    if (storeCleanPeriodSecs <= 0) {
        return;/*w w  w. ja v a 2s.c  o m*/
    }

    try {
        Runnable storeCleaner = new Runnable() {
            @Override
            public void run() {
                if (leaderMonitor.isLeader()) {
                    sentryStore.purgeDeltaChangeTables();
                    sentryStore.purgeNotificationIdTable();
                }
            }
        };

        ThreadFactory sentryStoreCleanerThreadFactory = new ThreadFactoryBuilder()
                .setNameFormat(STORE_CLEANER_THREAD_NAME).build();
        sentryStoreCleanService = Executors.newSingleThreadScheduledExecutor(sentryStoreCleanerThreadFactory);
        sentryStoreCleanService.scheduleWithFixedDelay(storeCleaner, 0, storeCleanPeriodSecs, TimeUnit.SECONDS);

        LOGGER.info("sentry store cleaner is scheduled with interval {} seconds", storeCleanPeriodSecs);
    } catch (IllegalArgumentException e) {
        LOGGER.error("Could not start SentryStoreCleaner due to illegal argument", e);
        sentryStoreCleanService = null;
    }
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestHDFSIntegrationTogglingConf.java

License:Apache License

private static long getSleepTimeAfterFollowerRestart(Configuration conf) {
    long followerInitDelay = conf.getLong(ServiceConstants.ServerConfig.SENTRY_HMSFOLLOWER_INIT_DELAY_MILLS,
            ServiceConstants.ServerConfig.SENTRY_HMSFOLLOWER_INIT_DELAY_MILLS_DEFAULT);
    long followerInterval = conf.getLong(ServiceConstants.ServerConfig.SENTRY_HMSFOLLOWER_INTERVAL_MILLS,
            ServiceConstants.ServerConfig.SENTRY_HMSFOLLOWER_INTERVAL_MILLS_DEFAULT);
    long refreshIntervalMillisec = conf.getInt(SentryAuthorizationConstants.CACHE_REFRESH_INTERVAL_KEY,
            SentryAuthorizationConstants.CACHE_REFRESH_INTERVAL_DEFAULT);

    return (followerInitDelay + followerInterval + refreshIntervalMillisec) * 2;
}

From source file:org.apache.slider.server.services.security.FsDelegationTokenManager.java

License:Apache License

public void acquireDelegationToken(Configuration configuration) throws IOException, InterruptedException {
    if (remoteUser == null) {
        createRemoteUser(configuration);
    }/*  w ww.  ja va 2  s .co  m*/
    if (SliderUtils.isHadoopClusterSecure(configuration) && renewingAction == null) {
        renewInterval = configuration.getLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
                DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
        // constructor of action will retrieve initial token.  One may already be
        // associated with user, but its lifecycle/management is not clear so let's
        // create and manage a token explicitly
        renewAction = new RenewAction("HDFS renew", configuration);
        // set retrieved token as the user associated delegation token and
        // start a renewing action to renew
        Token<?> token = renewAction.getToken();
        currentUser.addToken(token.getService(), token);
        log.info("HDFS delegation token {} acquired and set as credential for current user", token);
        renewingAction = new RenewingAction<RenewAction>(renewAction, (int) renewInterval, (int) renewInterval,
                TimeUnit.MILLISECONDS, getRenewingLimit());
        log.info("queuing HDFS delegation token renewal interval of {} milliseconds", renewInterval);
        queue(renewingAction);
    }
}

From source file:org.apache.sqoop.connector.hdfs.HdfsPartitioner.java

License:Apache License

@Override
public List<Partition> getPartitions(PartitionerContext context, LinkConfiguration linkConfiguration,
        FromJobConfiguration fromJobConfig) {

    Configuration conf = HdfsUtils.configureURI(((PrefixContext) context.getContext()).getConfiguration(),
            linkConfiguration);//from  w  ww.  j  a  v a 2s.c o  m

    try {
        long numInputBytes = getInputSize(conf, fromJobConfig.fromJobConfig.inputDirectory);
        maxSplitSize = numInputBytes / context.getMaxPartitions();

        if (numInputBytes % context.getMaxPartitions() != 0) {
            maxSplitSize += 1;
        }

        long minSizeNode = 0;
        long minSizeRack = 0;
        long maxSize = 0;

        // the values specified by setxxxSplitSize() takes precedence over the
        // values that might have been specified in the config
        if (minSplitSizeNode != 0) {
            minSizeNode = minSplitSizeNode;
        } else {
            minSizeNode = conf.getLong(SPLIT_MINSIZE_PERNODE, 0);
        }
        if (minSplitSizeRack != 0) {
            minSizeRack = minSplitSizeRack;
        } else {
            minSizeRack = conf.getLong(SPLIT_MINSIZE_PERRACK, 0);
        }
        if (maxSplitSize != 0) {
            maxSize = maxSplitSize;
        } else {
            maxSize = conf.getLong("mapreduce.input.fileinputformat.split.maxsize", 0);
        }
        if (minSizeNode != 0 && maxSize != 0 && minSizeNode > maxSize) {
            throw new IOException("Minimum split size pernode " + minSizeNode
                    + " cannot be larger than maximum split size " + maxSize);
        }
        if (minSizeRack != 0 && maxSize != 0 && minSizeRack > maxSize) {
            throw new IOException("Minimum split size per rack" + minSizeRack
                    + " cannot be larger than maximum split size " + maxSize);
        }
        if (minSizeRack != 0 && minSizeNode > minSizeRack) {
            throw new IOException("Minimum split size per node" + minSizeNode
                    + " cannot be smaller than minimum split " + "size per rack " + minSizeRack);
        }

        // all the files in input set
        String indir = fromJobConfig.fromJobConfig.inputDirectory;
        FileSystem fs = FileSystem.get(conf);

        List<Path> paths = new LinkedList<Path>();
        for (FileStatus status : fs.listStatus(new Path(indir))) {
            if (!status.isDir()) {
                paths.add(status.getPath());
            }
        }

        List<Partition> partitions = new ArrayList<Partition>();
        if (paths.size() == 0) {
            return partitions;
        }

        // create splits for all files that are not in any pool.
        getMoreSplits(conf, paths, maxSize, minSizeNode, minSizeRack, partitions);

        // free up rackToNodes map
        rackToNodes.clear();

        return partitions;

    } catch (IOException e) {
        throw new SqoopException(HdfsConnectorError.GENERIC_HDFS_CONNECTOR_0000, e);
    }
}

From source file:org.apache.sqoop.job.mr.SqoopInputFormat.java

License:Apache License

@Override
public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();

    String partitionerName = conf.get(MRJobConstants.JOB_ETL_PARTITIONER);
    Partitioner partitioner = (Partitioner) ClassUtils.instantiate(partitionerName);

    PrefixContext connectorContext = new PrefixContext(conf, MRJobConstants.PREFIX_CONNECTOR_FROM_CONTEXT);
    Object connectorConnection = MRConfigurationUtils.getConnectorLinkConfig(Direction.FROM, conf);
    Object connectorJob = MRConfigurationUtils.getConnectorJobConfig(Direction.FROM, conf);
    Schema schema = MRConfigurationUtils.getConnectorSchema(Direction.FROM, conf);

    long maxPartitions = conf.getLong(MRJobConstants.JOB_ETL_EXTRACTOR_NUM, 10);
    PartitionerContext partitionerContext = new PartitionerContext(connectorContext, maxPartitions, schema);

    List<Partition> partitions = partitioner.getPartitions(partitionerContext, connectorConnection,
            connectorJob);/*from   ww  w.  ja  v  a 2s. c  om*/
    List<InputSplit> splits = new LinkedList<InputSplit>();
    for (Partition partition : partitions) {
        LOG.debug("Partition: " + partition);
        SqoopSplit split = new SqoopSplit();
        split.setPartition(partition);
        splits.add(split);
    }

    if (splits.size() > maxPartitions) {
        throw new SqoopException(MRExecutionError.MAPRED_EXEC_0025,
                String.format("Got %d, max was %d", splits.size(), maxPartitions));
    }

    return splits;
}