Example usage for java.util Properties stringPropertyNames

List of usage examples for java.util Properties stringPropertyNames

Introduction

In this page you can find the example usage for java.util Properties stringPropertyNames.

Prototype

public Set<String> stringPropertyNames() 

Source Link

Document

Returns an unmodifiable set of keys from this property list where the key and its corresponding value are strings, including distinct keys in the default property list if a key of the same name has not already been found from the main properties list.

Usage

From source file:ren.hankai.cordwood.core.util.RuntimeVariables.java

/**
 * ????/*from  w ww  . j ava2  s .  c o m*/
 *
 * @return ???
 * @author hankai
 * @since Nov 22, 2018 3:46:56 PM
 */
private static Map<String, String> getVariables() {
    // ??
    if (cacheSeconds > 0) {
        final long timestamp = System.currentTimeMillis() / 1000;
        if ((timestamp - lastScanTimestamp) > cacheSeconds) {
            variables = null;
        }
    }
    if (variables == null) {
        variables = new HashMap<>();
        try {
            final Properties props = new Properties();
            final File file = getVariablesFile();
            if (file.exists()) {
                props.load(new FileReader(file));
                final Set<String> keyset = props.stringPropertyNames();
                for (final String key : keyset) {
                    variables.put(key, props.getProperty(key));
                }
            }
            lastScanTimestamp = System.currentTimeMillis() / 1000;
        } catch (final FileNotFoundException ex) {
            logger.error(String.format("Runtime variables file \"%s\" not found!", savePath), ex);
        } catch (final IOException ex) {
            logger.error(String.format("Failed to load runtime variables from file \"%s\"!", savePath), ex);
        }
    }
    return variables;
}

From source file:com.jkoolcloud.tnt4j.streams.custom.kafka.interceptors.reporters.trace.MsgTraceReporter.java

protected static Properties extractKafkaProperties(Properties kafkaProperties) {
    Properties props = new Properties();
    for (String key : kafkaProperties.stringPropertyNames()) {
        if (key.startsWith(TRACER_PROPERTY_PREFIX)) {
            props.put(key.substring(TRACER_PROPERTY_PREFIX.length()), kafkaProperties.getProperty(key));
        }/*from w  w  w .  j a va2s  .  co m*/
    }
    return props;
}

From source file:eu.stratosphere.pact.test.util.TestBase.java

/**
 * Helper method to ease the pain to construct valid JUnit test parameter
 * lists/*w w  w  .  j a  va 2s .co m*/
 * 
 * @param tConfigs
 *            list of PACT test configurations
 * @return list of JUnit test configurations
 * @throws IOException
 * @throws FileNotFoundException
 */
protected static Collection<Object[]> toParameterList(Class<? extends TestBase> parent,
        List<Configuration> testConfigs) throws FileNotFoundException, IOException {
    String testClassName = parent.getName();

    File configDir = new File(Constants.TEST_CONFIGS);

    List<String> clusterConfigs = new ArrayList<String>();

    if (configDir.isDirectory()) {
        for (File configFile : configDir.listFiles()) {
            Properties p = new Properties();
            p.load(new FileInputStream(configFile));

            for (String key : p.stringPropertyNames()) {
                if (key.endsWith(testClassName)) {
                    for (String config : p.getProperty(key).split(",")) {
                        clusterConfigs.add(config);
                    }
                }
            }
        }
    }

    if (clusterConfigs.isEmpty()) {
        LOG.warn("No test config defined for test-class '" + testClassName + "'. Using default config: '"
                + Constants.DEFAULT_TEST_CONFIG + "'.");
        clusterConfigs.add(Constants.DEFAULT_TEST_CONFIG);
    }

    LinkedList<Object[]> configs = new LinkedList<Object[]>();
    for (String clusterConfig : clusterConfigs) {
        for (Configuration testConfig : testConfigs) {
            Object[] c = { clusterConfig, testConfig };
            configs.add(c);
        }
    }

    return configs;
}

From source file:org.marketcetera.photon.strategy.engine.AbstractStrategyEngineConnection.java

/**
 * Return string based properties as a map.
 * /*from w ww .  ja v a2  s. com*/
 * @param properties
 *            the string-encoded properties
 * @return a map of properties
 */
protected static Map<String, String> getPropertiesMap(String properties) {
    Map<String, String> map = Maps.newHashMap();
    Properties props = Util.propertiesFromString(properties);
    if (props != null) {
        for (String key : props.stringPropertyNames()) {
            map.put(key, props.getProperty(key));
        }
    }
    return map;
}

From source file:org.apache.geode.management.internal.cli.commands.ConnectCommand.java

private static boolean containsSSLConfig(Properties properties) {
    return properties.stringPropertyNames().stream().anyMatch(key -> key.startsWith("ssl-"));
}

From source file:org.apache.geode.management.internal.cli.commands.ConnectCommand.java

static boolean containsLegacySSLConfig(Properties properties) {
    return properties.stringPropertyNames().stream().anyMatch(key -> key.startsWith(CLUSTER_SSL_PREFIX)
            || key.startsWith(JMX_MANAGER_SSL_PREFIX) || key.startsWith(HTTP_SERVICE_SSL_PREFIX));
}

From source file:com.opengamma.examples.simulated.DBTestUtils.java

public static Properties loadProperties(String configResourceLocation) throws IOException {
    Resource resource = ResourceUtils.createResource(configResourceLocation);
    Properties props = new Properties();
    props.load(resource.getInputStream());

    String nextConfiguration = props.getProperty("MANAGER.NEXT.FILE");
    if (nextConfiguration != null) {
        resource = ResourceUtils.createResource(nextConfiguration);
        Properties parentProps = new Properties();
        parentProps.load(resource.getInputStream());
        for (String key : props.stringPropertyNames()) {
            parentProps.put(key, props.getProperty(key));
        }/*from   w  ww .  j av a2  s .c o m*/
        props = parentProps;
    }

    for (String key : props.stringPropertyNames()) {
        s_logger.debug("\t{}={}", key, props.getProperty(key));
    }

    return props;
}

From source file:org.apache.ambari.client.ClusterCreate.java

public static List<RoleToNodes> getRoleToNodesList(Properties roleToNodeExpressions) {
    if (roleToNodeExpressions == null) {
        return null;
    }/*from   w  ww  .  j av a 2s .  c  o  m*/
    ;

    List<RoleToNodes> roleToNodesMap = new ArrayList<RoleToNodes>();
    for (String roleName : roleToNodeExpressions.stringPropertyNames()) {
        RoleToNodes e = new RoleToNodes();
        e.setRoleName(roleName);
        e.setNodes(roleToNodeExpressions.getProperty(roleName));
        roleToNodesMap.add(e);
    }
    return roleToNodesMap;
}

From source file:com.marklogic.client.example.util.Bootstrapper.java

static public List<String> listInvalidKeys(Properties properties) {
    Map<String, String> propNames = getPropNames();

    List<String> invalid = null;
    for (String key : properties.stringPropertyNames()) {
        if (propNames.containsKey(key))
            continue;

        if (invalid == null)
            invalid = new ArrayList<String>();

        invalid.add(key);/*w  w  w . j a va  2 s  .  c  o  m*/
    }

    return invalid;
}

From source file:org.apache.hadoop.hive.ql.exec.spark.HiveSparkClientFactory.java

public static Map<String, String> initiateSparkConf(HiveConf hiveConf) {
    Map<String, String> sparkConf = new HashMap<String, String>();
    HBaseConfiguration.addHbaseResources(hiveConf);

    // set default spark configurations.
    sparkConf.put("spark.master", SPARK_DEFAULT_MASTER);
    final String appNameKey = "spark.app.name";
    String appName = hiveConf.get(appNameKey);
    if (appName == null) {
        appName = SPARK_DEFAULT_APP_NAME;
    }/*from   www.  java  2 s. c o m*/
    sparkConf.put(appNameKey, appName);
    sparkConf.put("spark.serializer", SPARK_DEFAULT_SERIALIZER);
    sparkConf.put("spark.kryo.referenceTracking", SPARK_DEFAULT_REFERENCE_TRACKING);

    // load properties from spark-defaults.conf.
    InputStream inputStream = null;
    try {
        inputStream = HiveSparkClientFactory.class.getClassLoader()
                .getResourceAsStream(SPARK_DEFAULT_CONF_FILE);
        if (inputStream != null) {
            LOG.info("loading spark properties from:" + SPARK_DEFAULT_CONF_FILE);
            Properties properties = new Properties();
            properties.load(new InputStreamReader(inputStream, CharsetNames.UTF_8));
            for (String propertyName : properties.stringPropertyNames()) {
                if (propertyName.startsWith("spark")) {
                    String value = properties.getProperty(propertyName);
                    sparkConf.put(propertyName, properties.getProperty(propertyName));
                    LOG.info(String.format("load spark property from %s (%s -> %s).", SPARK_DEFAULT_CONF_FILE,
                            propertyName, LogUtils.maskIfPassword(propertyName, value)));
                }
            }
        }
    } catch (IOException e) {
        LOG.info("Failed to open spark configuration file:" + SPARK_DEFAULT_CONF_FILE, e);
    } finally {
        if (inputStream != null) {
            try {
                inputStream.close();
            } catch (IOException e) {
                LOG.debug("Failed to close inputstream.", e);
            }
        }
    }

    // load properties from hive configurations, including both spark.* properties,
    // properties for remote driver RPC, and yarn properties for Spark on YARN mode.
    String sparkMaster = hiveConf.get("spark.master");
    if (sparkMaster == null) {
        sparkMaster = sparkConf.get("spark.master");
        hiveConf.set("spark.master", sparkMaster);
    }
    String deployMode = null;
    if (!SparkClientUtilities.isLocalMaster(sparkMaster)) {
        deployMode = hiveConf.get(SPARK_DEPLOY_MODE);
        if (deployMode == null) {
            deployMode = sparkConf.get(SPARK_DEPLOY_MODE);
            if (deployMode == null) {
                deployMode = SparkClientUtilities.getDeployModeFromMaster(sparkMaster);
            }
            if (deployMode == null) {
                deployMode = SPARK_DEFAULT_DEPLOY_MODE;
            }
            hiveConf.set(SPARK_DEPLOY_MODE, deployMode);
        }
    }
    if (SessionState.get() != null && SessionState.get().getConf() != null) {
        SessionState.get().getConf().set("spark.master", sparkMaster);
        if (deployMode != null) {
            SessionState.get().getConf().set(SPARK_DEPLOY_MODE, deployMode);
        }
    }
    if (SparkClientUtilities.isYarnClusterMode(sparkMaster, deployMode)) {
        sparkConf.put("spark.yarn.maxAppAttempts", "1");
    }
    for (Map.Entry<String, String> entry : hiveConf) {
        String propertyName = entry.getKey();
        if (propertyName.startsWith("spark")) {
            String value = hiveConf.get(propertyName);
            sparkConf.put(propertyName, value);
            LOG.info(String.format("load spark property from hive configuration (%s -> %s).", propertyName,
                    LogUtils.maskIfPassword(propertyName, value)));
        } else if (propertyName.startsWith("yarn") && SparkClientUtilities.isYarnMaster(sparkMaster)) {
            String value = hiveConf.get(propertyName);
            // Add spark.hadoop prefix for yarn properties as SparkConf only accept properties
            // started with spark prefix, Spark would remove spark.hadoop prefix lately and add
            // it to its hadoop configuration.
            sparkConf.put("spark.hadoop." + propertyName, value);
            LOG.info(String.format("load yarn property from hive configuration in %s mode (%s -> %s).",
                    sparkMaster, propertyName, LogUtils.maskIfPassword(propertyName, value)));
        } else if (propertyName.equals(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)) {
            String value = hiveConf.get(propertyName);
            if (value != null && !value.isEmpty()) {
                sparkConf.put("spark.hadoop." + propertyName, value);
            }
        } else if (propertyName.startsWith("hbase") || propertyName.startsWith("zookeeper.znode")) {
            // Add HBase related configuration to Spark because in security mode, Spark needs it
            // to generate hbase delegation token for Spark. This is a temp solution to deal with
            // Spark problem.
            String value = hiveConf.get(propertyName);
            sparkConf.put("spark.hadoop." + propertyName, value);
            LOG.info(String.format("load HBase configuration (%s -> %s).", propertyName,
                    LogUtils.maskIfPassword(propertyName, value)));
        } else if (propertyName.startsWith("oozie")) {
            String value = hiveConf.get(propertyName);
            sparkConf.put("spark." + propertyName, value);
            LOG.info(String.format("Pass Oozie configuration (%s -> %s).", propertyName,
                    LogUtils.maskIfPassword(propertyName, value)));
        }

        if (RpcConfiguration.HIVE_SPARK_RSC_CONFIGS.contains(propertyName)) {
            String value = RpcConfiguration.getValue(hiveConf, propertyName);
            sparkConf.put(propertyName, value);
            LOG.info(String.format("load RPC property from hive configuration (%s -> %s).", propertyName,
                    LogUtils.maskIfPassword(propertyName, value)));
        }
    }

    Set<String> classes = Sets.newHashSet(Splitter.on(",").trimResults().omitEmptyStrings()
            .split(Strings.nullToEmpty(sparkConf.get("spark.kryo.classesToRegister"))));
    classes.add(Writable.class.getName());
    classes.add(VectorizedRowBatch.class.getName());
    classes.add(BytesWritable.class.getName());
    classes.add(HiveKey.class.getName());
    sparkConf.put("spark.kryo.classesToRegister", Joiner.on(",").join(classes));

    // set yarn queue name
    final String sparkQueueNameKey = "spark.yarn.queue";
    if (SparkClientUtilities.isYarnMaster(sparkMaster) && hiveConf.get(sparkQueueNameKey) == null) {
        String queueName = hiveConf.get("mapreduce.job.queuename");
        if (queueName != null) {
            sparkConf.put(sparkQueueNameKey, queueName);
        }
    }

    // Disable it to avoid verbose app state report in yarn-cluster mode
    if (SparkClientUtilities.isYarnClusterMode(sparkMaster, deployMode)
            && sparkConf.get(SPARK_WAIT_APP_COMPLETE) == null) {
        sparkConf.put(SPARK_WAIT_APP_COMPLETE, "false");
    }

    // Set the credential provider passwords if found, if there is job specific password
    // the credential provider location is set directly in the execute method of LocalSparkClient
    // and submit method of RemoteHiveSparkClient when the job config is created
    String password = HiveConfUtil.getJobCredentialProviderPassword(hiveConf);
    if (password != null) {
        addCredentialProviderPassword(sparkConf, password);
    }
    return sparkConf;
}