Example usage for java.util Properties putAll

List of usage examples for java.util Properties putAll

Introduction

In this page you can find the example usage for java.util Properties putAll.

Prototype

@Override
    public synchronized void putAll(Map<?, ?> t) 

Source Link

Usage

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * This test ensures that when explicitly set to start from earliest record, the consumer
 * ignores the "auto.offset.reset" behaviour as well as any committed group offsets in Kafka.
 *//*  www. j ava2 s . co  m*/
public void runStartFromEarliestOffsets() throws Exception {
    // 3 partitions with 50 records each (0-49, so the expected commit offset of each partition should be 50)
    final int parallelism = 3;
    final int recordsInEachPartition = 50;

    final String topicName = writeSequence("testStartFromEarliestOffsetsTopic", recordsInEachPartition,
            parallelism, 1);

    final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost",
            flinkPort);
    env.getConfig().disableSysoutLogging();
    env.setParallelism(parallelism);

    Properties readProps = new Properties();
    readProps.putAll(standardProps);
    readProps.setProperty("auto.offset.reset", "latest"); // this should be ignored

    // the committed offsets should be ignored
    KafkaTestEnvironment.KafkaOffsetHandler kafkaOffsetHandler = kafkaServer.createOffsetHandler();
    kafkaOffsetHandler.setCommittedOffset(topicName, 0, 23);
    kafkaOffsetHandler.setCommittedOffset(topicName, 1, 31);
    kafkaOffsetHandler.setCommittedOffset(topicName, 2, 43);

    readSequence(env, StartupMode.EARLIEST, null, readProps, parallelism, topicName, recordsInEachPartition, 0);

    kafkaOffsetHandler.close();
    deleteTestTopic(topicName);
}

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * This test ensures that the consumer correctly uses group offsets in Kafka, and defaults to "auto.offset.reset"
 * behaviour when necessary, when explicitly configured to start from group offsets.
 *
 * The partitions and their committed group offsets are setup as:
 *    partition 0 --> committed offset 23
 *    partition 1 --> no commit offset
 *    partition 2 --> committed offset 43
 *
 * When configured to start from group offsets, each partition should read:
 *    partition 0 --> start from offset 23, read to offset 49 (27 records)
 *    partition 1 --> default to "auto.offset.reset" (set to earliest), so start from offset 0, read to offset 49 (50 records)
 *    partition 2 --> start from offset 43, read to offset 49 (7 records)
 *///  w ww  . java  2  s . c o  m
public void runStartFromGroupOffsets() throws Exception {
    // 3 partitions with 50 records each (offsets 0-49)
    final int parallelism = 3;
    final int recordsInEachPartition = 50;

    final String topicName = writeSequence("testStartFromGroupOffsetsTopic", recordsInEachPartition,
            parallelism, 1);

    final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost",
            flinkPort);
    env.getConfig().disableSysoutLogging();
    env.setParallelism(parallelism);

    Properties readProps = new Properties();
    readProps.putAll(standardProps);
    readProps.setProperty("auto.offset.reset", "earliest");

    // the committed group offsets should be used as starting points
    KafkaTestEnvironment.KafkaOffsetHandler kafkaOffsetHandler = kafkaServer.createOffsetHandler();

    // only partitions 0 and 2 have group offsets committed
    kafkaOffsetHandler.setCommittedOffset(topicName, 0, 23);
    kafkaOffsetHandler.setCommittedOffset(topicName, 2, 43);

    Map<Integer, Tuple2<Integer, Integer>> partitionsToValueCountAndStartOffsets = new HashMap<>();
    partitionsToValueCountAndStartOffsets.put(0, new Tuple2<>(27, 23)); // partition 0 should read offset 23-49
    partitionsToValueCountAndStartOffsets.put(1, new Tuple2<>(50, 0)); // partition 1 should read offset 0-49
    partitionsToValueCountAndStartOffsets.put(2, new Tuple2<>(7, 43)); // partition 2 should read offset 43-49

    readSequence(env, StartupMode.GROUP_OFFSETS, null, readProps, topicName,
            partitionsToValueCountAndStartOffsets);

    kafkaOffsetHandler.close();
    deleteTestTopic(topicName);
}

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * Tests the proper consumption when having a 1:1 correspondence between kafka partitions and
 * Flink sources./*from w  w  w. ja  v a 2  s. co  m*/
 */
public void runOneToOneExactlyOnceTest() throws Exception {

    final String topic = "oneToOneTopic";
    final int parallelism = 5;
    final int numElementsPerPartition = 1000;
    final int totalElements = parallelism * numElementsPerPartition;
    final int failAfterElements = numElementsPerPartition / 3;

    createTestTopic(topic, parallelism, 1);

    DataGenerators.generateRandomizedIntegerSequence(
            StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort), kafkaServer, topic,
            parallelism, numElementsPerPartition, true);

    // run the topology that fails and recovers

    DeserializationSchema<Integer> schema = new TypeInformationSerializationSchema<>(
            BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());

    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env.enableCheckpointing(500);
    env.setParallelism(parallelism);
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
    env.getConfig().disableSysoutLogging();

    Properties props = new Properties();
    props.putAll(standardProps);
    props.putAll(secureProps);

    FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);

    env.addSource(kafkaSource).map(new PartitionValidatingMapper(parallelism, 1))
            .map(new FailingIdentityMapper<Integer>(failAfterElements))
            .addSink(new ValidatingExactlyOnceSink(totalElements)).setParallelism(1);

    FailingIdentityMapper.failedBefore = false;
    tryExecute(env, "One-to-one exactly once test");

    deleteTestTopic(topic);
}

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * Tests the proper consumption when having fewer Flink sources than Kafka partitions, so
 * one Flink source will read multiple Kafka partitions.
 *//* ww  w.ja v a  2 s  .com*/
public void runOneSourceMultiplePartitionsExactlyOnceTest() throws Exception {
    final String topic = "oneToManyTopic";
    final int numPartitions = 5;
    final int numElementsPerPartition = 1000;
    final int totalElements = numPartitions * numElementsPerPartition;
    final int failAfterElements = numElementsPerPartition / 3;

    final int parallelism = 2;

    createTestTopic(topic, numPartitions, 1);

    DataGenerators.generateRandomizedIntegerSequence(
            StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort), kafkaServer, topic,
            numPartitions, numElementsPerPartition, false);

    // run the topology that fails and recovers

    DeserializationSchema<Integer> schema = new TypeInformationSerializationSchema<>(
            BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());

    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env.enableCheckpointing(500);
    env.setParallelism(parallelism);
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
    env.getConfig().disableSysoutLogging();

    Properties props = new Properties();
    props.putAll(standardProps);
    props.putAll(secureProps);
    FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);

    env.addSource(kafkaSource).map(new PartitionValidatingMapper(numPartitions, 3))
            .map(new FailingIdentityMapper<Integer>(failAfterElements))
            .addSink(new ValidatingExactlyOnceSink(totalElements)).setParallelism(1);

    FailingIdentityMapper.failedBefore = false;
    tryExecute(env, "One-source-multi-partitions exactly once test");

    deleteTestTopic(topic);
}

From source file:org.apache.sentry.provider.db.service.persistent.SentryStore.java

public SentryStore(Configuration conf) throws SentryNoSuchObjectException, SentryAccessDeniedException,
        SentryConfigurationException, IOException {
    commitSequenceId = 0;//from www .  ja v  a 2  s  .c o m
    this.conf = conf;
    Properties prop = new Properties();
    prop.putAll(ServerConfig.SENTRY_STORE_DEFAULTS);
    String jdbcUrl = conf.get(ServerConfig.SENTRY_STORE_JDBC_URL, "").trim();
    Preconditions.checkArgument(!jdbcUrl.isEmpty(),
            "Required parameter " + ServerConfig.SENTRY_STORE_JDBC_URL + " missing");
    String user = conf.get(ServerConfig.SENTRY_STORE_JDBC_USER, ServerConfig.SENTRY_STORE_JDBC_USER_DEFAULT)
            .trim();
    //Password will be read from Credential provider specified using property
    // CREDENTIAL_PROVIDER_PATH("hadoop.security.credential.provider.path" in sentry-site.xml
    // it falls back to reading directly from sentry-site.xml
    char[] passTmp = conf.getPassword(ServerConfig.SENTRY_STORE_JDBC_PASS);
    String pass = null;
    if (passTmp != null) {
        pass = new String(passTmp);
    } else {
        throw new SentryConfigurationException("Error reading " + ServerConfig.SENTRY_STORE_JDBC_PASS);
    }

    String driverName = conf.get(ServerConfig.SENTRY_STORE_JDBC_DRIVER,
            ServerConfig.SENTRY_STORE_JDBC_DRIVER_DEFAULT);
    prop.setProperty(ServerConfig.JAVAX_JDO_URL, jdbcUrl);
    prop.setProperty(ServerConfig.JAVAX_JDO_USER, user);
    prop.setProperty(ServerConfig.JAVAX_JDO_PASS, pass);
    prop.setProperty(ServerConfig.JAVAX_JDO_DRIVER_NAME, driverName);
    for (Map.Entry<String, String> entry : conf) {
        String key = entry.getKey();
        if (key.startsWith(ServerConfig.SENTRY_JAVAX_JDO_PROPERTY_PREFIX)
                || key.startsWith(ServerConfig.SENTRY_DATANUCLEUS_PROPERTY_PREFIX)) {
            key = StringUtils.removeStart(key, ServerConfig.SENTRY_DB_PROPERTY_PREFIX);
            prop.setProperty(key, entry.getValue());
        }
    }

    boolean checkSchemaVersion = conf
            .get(ServerConfig.SENTRY_VERIFY_SCHEM_VERSION, ServerConfig.SENTRY_VERIFY_SCHEM_VERSION_DEFAULT)
            .equalsIgnoreCase("true");
    if (!checkSchemaVersion) {
        prop.setProperty("datanucleus.autoCreateSchema", "true");
        prop.setProperty("datanucleus.fixedDatastore", "false");
    }

    // Disallow operations outside of transactions
    prop.setProperty("datanucleus.NontransactionalRead", "false");
    prop.setProperty("datanucleus.NontransactionalWrite", "false");

    pmf = JDOHelper.getPersistenceManagerFactory(prop);
    verifySentryStoreSchema(conf, checkSchemaVersion);

    // Kick off the thread that cleans orphaned privileges (unless told not to)
    privCleaner = this.new PrivCleaner();
    if (conf.get(ServerConfig.SENTRY_STORE_ORPHANED_PRIVILEGE_REMOVAL,
            ServerConfig.SENTRY_STORE_ORPHANED_PRIVILEGE_REMOVAL_DEFAULT).equalsIgnoreCase("true")) {
        privCleanerThread = new Thread(privCleaner);
        privCleanerThread.start();
    }
}

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * Tests the proper consumption when having more Flink sources than Kafka partitions, which means
 * that some Flink sources will read no partitions.
 *///from w ww  . j  av a 2 s.  c  o m
public void runMultipleSourcesOnePartitionExactlyOnceTest() throws Exception {
    final String topic = "manyToOneTopic";
    final int numPartitions = 5;
    final int numElementsPerPartition = 1000;
    final int totalElements = numPartitions * numElementsPerPartition;
    final int failAfterElements = numElementsPerPartition / 3;

    final int parallelism = 8;

    createTestTopic(topic, numPartitions, 1);

    DataGenerators.generateRandomizedIntegerSequence(
            StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort), kafkaServer, topic,
            numPartitions, numElementsPerPartition, true);

    // run the topology that fails and recovers

    DeserializationSchema<Integer> schema = new TypeInformationSerializationSchema<>(
            BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());

    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env.enableCheckpointing(500);
    env.setParallelism(parallelism);
    // set the number of restarts to one. The failing mapper will fail once, then it's only success exceptions.
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
    env.getConfig().disableSysoutLogging();
    env.setBufferTimeout(0);

    Properties props = new Properties();
    props.putAll(standardProps);
    props.putAll(secureProps);
    FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);

    env.addSource(kafkaSource).map(new PartitionValidatingMapper(numPartitions, 1))
            .map(new FailingIdentityMapper<Integer>(failAfterElements))
            .addSink(new ValidatingExactlyOnceSink(totalElements)).setParallelism(1);

    FailingIdentityMapper.failedBefore = false;
    tryExecute(env, "multi-source-one-partitions exactly once test");

    deleteTestTopic(topic);
}

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * Tests that the source can be properly canceled when reading full partitions. 
 */// ww w .j a  va2  s .  c  o  m
public void runFailOnDeployTest() throws Exception {
    final String topic = "failOnDeployTopic";

    createTestTopic(topic, 2, 1);

    DeserializationSchema<Integer> schema = new TypeInformationSerializationSchema<>(
            BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());

    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env.setParallelism(12); // needs to be more that the mini cluster has slots
    env.getConfig().disableSysoutLogging();

    Properties props = new Properties();
    props.putAll(standardProps);
    props.putAll(secureProps);
    FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);

    env.addSource(kafkaSource).addSink(new DiscardingSink<Integer>());

    try {
        env.execute("test fail on deploy");
        fail("this test should fail with an exception");
    } catch (ProgramInvocationException e) {

        // validate that we failed due to a NoResourceAvailableException
        Throwable cause = e.getCause();
        int depth = 0;
        boolean foundResourceException = false;

        while (cause != null && depth++ < 20) {
            if (cause instanceof NoResourceAvailableException) {
                foundResourceException = true;
                break;
            }
            cause = cause.getCause();
        }

        assertTrue("Wrong exception", foundResourceException);
    }

    deleteTestTopic(topic);
}

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * This test ensures that the consumer correctly uses user-supplied specific offsets when explicitly configured to
 * start from specific offsets. For partitions which a specific offset can not be found for, the starting position
 * for them should fallback to the group offsets behaviour.
 *
 * 4 partitions will have 50 records with offsets 0 to 49. The supplied specific offsets map is:
 *    partition 0 --> start from offset 19
 *    partition 1 --> not set/*from  ww w .jav  a 2  s. c om*/
 *    partition 2 --> start from offset 22
 *    partition 3 --> not set
 *    partition 4 --> start from offset 26 (this should be ignored because the partition does not exist)
 *
 * The partitions and their committed group offsets are setup as:
 *    partition 0 --> committed offset 23
 *    partition 1 --> committed offset 31
 *    partition 2 --> committed offset 43
 *    partition 3 --> no commit offset
 *
 * When configured to start from these specific offsets, each partition should read:
 *    partition 0 --> start from offset 19, read to offset 49 (31 records)
 *    partition 1 --> fallback to group offsets, so start from offset 31, read to offset 49 (19 records)
 *    partition 2 --> start from offset 22, read to offset 49 (28 records)
 *    partition 3 --> fallback to group offsets, but since there is no group offset for this partition,
 *                    will default to "auto.offset.reset" (set to "earliest"),
 *                    so start from offset 0, read to offset 49 (50 records)
 */
public void runStartFromSpecificOffsets() throws Exception {
    // 4 partitions with 50 records each (offsets 0-49)
    final int parallelism = 4;
    final int recordsInEachPartition = 50;

    final String topicName = writeSequence("testStartFromSpecificOffsetsTopic", recordsInEachPartition,
            parallelism, 1);

    final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost",
            flinkPort);
    env.getConfig().disableSysoutLogging();
    env.setParallelism(parallelism);

    Properties readProps = new Properties();
    readProps.putAll(standardProps);
    readProps.setProperty("auto.offset.reset", "earliest"); // partition 3 should default back to this behaviour

    Map<KafkaTopicPartition, Long> specificStartupOffsets = new HashMap<>();
    specificStartupOffsets.put(new KafkaTopicPartition(topicName, 0), 19L);
    specificStartupOffsets.put(new KafkaTopicPartition(topicName, 2), 22L);
    specificStartupOffsets.put(new KafkaTopicPartition(topicName, 4), 26L); // non-existing partition, should be ignored

    // only the committed offset for partition 1 should be used, because partition 1 has no entry in specific offset map
    KafkaTestEnvironment.KafkaOffsetHandler kafkaOffsetHandler = kafkaServer.createOffsetHandler();
    kafkaOffsetHandler.setCommittedOffset(topicName, 0, 23);
    kafkaOffsetHandler.setCommittedOffset(topicName, 1, 31);
    kafkaOffsetHandler.setCommittedOffset(topicName, 2, 43);

    Map<Integer, Tuple2<Integer, Integer>> partitionsToValueCountAndStartOffsets = new HashMap<>();
    partitionsToValueCountAndStartOffsets.put(0, new Tuple2<>(31, 19)); // partition 0 should read offset 19-49
    partitionsToValueCountAndStartOffsets.put(1, new Tuple2<>(19, 31)); // partition 1 should read offset 31-49
    partitionsToValueCountAndStartOffsets.put(2, new Tuple2<>(28, 22)); // partition 2 should read offset 22-49
    partitionsToValueCountAndStartOffsets.put(3, new Tuple2<>(50, 0)); // partition 3 should read offset 0-49

    readSequence(env, StartupMode.SPECIFIC_OFFSETS, specificStartupOffsets, readProps, topicName,
            partitionsToValueCountAndStartOffsets);

    kafkaOffsetHandler.close();
    deleteTestTopic(topicName);
}

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

public void runBrokerFailureTest() throws Exception {
    final String topic = "brokerFailureTestTopic";

    final int parallelism = 2;
    final int numElementsPerPartition = 1000;
    final int totalElements = parallelism * numElementsPerPartition;
    final int failAfterElements = numElementsPerPartition / 3;

    createTestTopic(topic, parallelism, 2);

    DataGenerators.generateRandomizedIntegerSequence(
            StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort), kafkaServer, topic,
            parallelism, numElementsPerPartition, true);

    // find leader to shut down
    int leaderId = kafkaServer.getLeaderToShutDown(topic);

    LOG.info("Leader to shutdown {}", leaderId);

    // run the topology (the consumers must handle the failures)

    DeserializationSchema<Integer> schema = new TypeInformationSerializationSchema<>(
            BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());

    StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env.setParallelism(parallelism);//from w w  w .  j  a va  2s .  c o  m
    env.enableCheckpointing(500);
    env.setRestartStrategy(RestartStrategies.noRestart());
    env.getConfig().disableSysoutLogging();

    Properties props = new Properties();
    props.putAll(standardProps);
    props.putAll(secureProps);
    FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);

    env.addSource(kafkaSource).map(new PartitionValidatingMapper(parallelism, 1))
            .map(new BrokerKillingMapper<Integer>(leaderId, failAfterElements))
            .addSink(new ValidatingExactlyOnceSink(totalElements)).setParallelism(1);

    BrokerKillingMapper.killedLeaderBefore = false;
    tryExecute(env, "Broker failure once test");

    // start a new broker:
    kafkaServer.restartBroker(leaderId);
}

From source file:com.aol.advertising.qiao.emitter.KafkaEmitter.java

private Properties buildProperties() {
    Properties props = new Properties();
    props.put(PROP_BROKER_LIST, brokerList);
    props.put(PROP_REQUEST_REQUIRED_ACKS, ackPolicy.value);
    props.put(PROP_MSG_MAX_RETRIES, String.valueOf(msgSendMaxRetries));
    props.put(PROP_CLIENT_ID, clientId);
    props.put(PROP_REQUEST_REQUIRED_ACKS, ackPolicy.value);

    if (serializerClassname != null)
        props.put(PROP_SERIALIZER_CLASS, serializerClassname);
    if (keySerializerClassname != null)
        props.put(PROP_KEY_SERIALIZER_CLASS, keySerializerClassname);

    if (partitionerClassname != null)
        props.put(PROP_PARTITIONER_CLASS, partitionerClassname);
    else {/*from   w  w w.  j a va  2s  .c  o  m*/
        String val = (String) props.get(PROP_PARTITIONER_CLASS);
        if (val == null || val.length() == 0) {
            props.put(PROP_PARTITIONER_CLASS, DEFAULT_PARTITIONER_CLASSNAME);
            logger.info("Kafka Producer uses non-sticky round-robin partitioner.");
        }
    }
    if (producerType != null)
        props.put(PROP_PRODUCER_TYPE, producerType);
    if (messagesPerBatch > 0)
        props.put(PROP_BATCH_NUM_MSGS, String.valueOf(messagesPerBatch));
    if (messagesBatchTimeoutMS > 0)
        props.put(PROP_BATCH_BUFFERING_MAX_MS, String.valueOf(messagesBatchTimeoutMS));
    if (maxMessagesBuffering > 0)
        props.put(PROP_QUEUE_BUFFERING_MAX_MESSAGES, String.valueOf(maxMessagesBuffering));
    if (sendBufferBytes > 0)
        props.put(PROP_SEND_BUFFER_BYTES, String.valueOf(sendBufferBytes));

    String prop_path = CommonUtils.getQiaoConfigDir() + File.separator + KAFKA_PROPERTY_FILE;
    File prop_file = new File(prop_path);
    Properties props2 = loadProperties(prop_file);
    if (props2 != null)
        props.putAll(props2);

    return props;
}