Example usage for java.util Properties putAll

List of usage examples for java.util Properties putAll

Introduction

In this page you can find the example usage for java.util Properties putAll.

Prototype

@Override
    public synchronized void putAll(Map<?, ?> t) 

Source Link

Usage

From source file:edu.buffalo.cse.pigout.Main.java

static int run(String args[], PigProgressNotificationListener listener) {
    int rc = 1;/*  w  w  w.  j a  v a  2s. com*/
    boolean verbose = false;
    boolean pigoutCalled = false;
    String logFileName = null;

    try {
        Configuration conf = new Configuration(false);
        GenericOptionsParser parser = new GenericOptionsParser(conf, args);
        conf = parser.getConfiguration();

        // create and update properties from configurations
        Properties properties = new Properties();
        PropertiesUtil.loadDefaultProperties(properties);
        PropertiesUtil.loadPropertiesFromFile(properties, "./conf/pigout.properties");
        properties.putAll(ConfigurationUtil.toProperties(conf));

        for (String key : properties.stringPropertyNames()) {
            log.debug(key + " = " + properties.getProperty(key));
        }

        if (listener == null) {
            listener = makeListener(properties);
        }
        String[] pigArgs = parser.getRemainingArgs();

        boolean userSpecifiedLog = false;
        boolean checkScriptOnly = false;

        BufferedReader pin = null;
        boolean debug = false;
        boolean dryrun = false;
        boolean embedded = false;
        List<String> params = new ArrayList<String>();
        List<String> paramFiles = new ArrayList<String>();
        HashSet<String> disabledOptimizerRules = new HashSet<String>();

        CmdLineParser opts = new CmdLineParser(pigArgs);
        opts.registerOpt('4', "log4jconf", CmdLineParser.ValueExpected.REQUIRED);
        opts.registerOpt('b', "brief", CmdLineParser.ValueExpected.NOT_ACCEPTED);
        opts.registerOpt('c', "check", CmdLineParser.ValueExpected.NOT_ACCEPTED);
        opts.registerOpt('d', "debug", CmdLineParser.ValueExpected.REQUIRED);
        opts.registerOpt('e', "execute", CmdLineParser.ValueExpected.NOT_ACCEPTED);
        opts.registerOpt('f', "file", CmdLineParser.ValueExpected.REQUIRED);
        opts.registerOpt('g', "embedded", CmdLineParser.ValueExpected.REQUIRED);
        opts.registerOpt('h', "help", CmdLineParser.ValueExpected.OPTIONAL);
        opts.registerOpt('i', "version", CmdLineParser.ValueExpected.OPTIONAL);
        opts.registerOpt('l', "logfile", CmdLineParser.ValueExpected.REQUIRED);
        opts.registerOpt('m', "param_file", CmdLineParser.ValueExpected.OPTIONAL);
        opts.registerOpt('p', "param", CmdLineParser.ValueExpected.OPTIONAL);
        opts.registerOpt('r', "dryrun", CmdLineParser.ValueExpected.NOT_ACCEPTED);
        opts.registerOpt('t', "optimizer_off", CmdLineParser.ValueExpected.REQUIRED);
        opts.registerOpt('v', "verbose", CmdLineParser.ValueExpected.NOT_ACCEPTED);
        opts.registerOpt('w', "warning", CmdLineParser.ValueExpected.NOT_ACCEPTED);
        opts.registerOpt('x', "exectype", CmdLineParser.ValueExpected.REQUIRED);
        opts.registerOpt('F', "stop_on_failure", CmdLineParser.ValueExpected.NOT_ACCEPTED);
        opts.registerOpt('M', "no_multiquery", CmdLineParser.ValueExpected.NOT_ACCEPTED);
        opts.registerOpt('P', "propertyFile", CmdLineParser.ValueExpected.REQUIRED);

        ExecMode mode = ExecMode.UNKNOWN;
        String file = null;
        String engine = null;
        ExecType execType = ExecType.LOCAL;

        // set up client side system properties in UDF context
        UDFContext.getUDFContext().setClientSystemProps(properties);

        char opt;
        //properties.setProperty("opt.multiquery",""+true);

        while ((opt = opts.getNextOpt()) != CmdLineParser.EndOfOpts) {
            switch (opt) {
            case '4':
                String log4jconf = opts.getValStr();
                if (log4jconf != null) {
                    properties.setProperty(LOG4J_CONF, log4jconf);
                }
                break;

            case 'b':
                properties.setProperty(BRIEF, "true");
                break;

            case 'c':
                checkScriptOnly = true;
                break;

            case 'd':
                String logLevel = opts.getValStr();
                if (logLevel != null) {
                    properties.setProperty(DEBUG, logLevel);
                }
                debug = true;
                break;

            case 'e':
                mode = ExecMode.STRING;
                break;

            case 'f':
                mode = ExecMode.FILE;
                file = opts.getValStr();
                break;

            case 'g':
                embedded = true;
                engine = opts.getValStr();
                break;

            case 'F':
                properties.setProperty("stop.on.failure", "" + true);
                break;

            case 'h':
                String topic = opts.getValStr();
                if (topic != null) {
                    System.out.println("Topic based help is not provided yet.");
                    usage();
                } else
                    usage();
                return ReturnCode.SUCCESS;

            case 'i':
                System.out.println(getVersionString());
                return ReturnCode.SUCCESS;

            case 'l':
                //call to method that validates the path to the log file
                //and sets up the file to store the client side log file
                String logFileParameter = opts.getValStr();
                if (logFileParameter != null && logFileParameter.length() > 0) {
                    logFileName = validateLogFile(logFileParameter, null);
                } else {
                    logFileName = validateLogFile(logFileName, null);
                }
                userSpecifiedLog = true;
                properties.setProperty("pig.logfile", (logFileName == null ? "" : logFileName));
                break;

            case 'm':
                //adds a parameter file
                paramFiles.add(opts.getValStr());
                break;

            case 'M':
                // turns off multiquery optimization
                properties.setProperty("opt.multiquery", "" + false);
                break;

            case 'p':
                //adds a parameter
                params.add(opts.getValStr());
                break;

            case 'r':
                // currently only used for parameter substitution
                // will be extended in the future
                dryrun = true;
                break;

            case 't':
                //disables a opt rule
                disabledOptimizerRules.add(opts.getValStr());
                break;

            case 'v':
                properties.setProperty(VERBOSE, "" + true);
                verbose = true;
                break;

            case 'w':
                properties.setProperty("aggregate.warning", "" + false);
                break;

            case 'x':
                //sets execution type:
                try {
                    execType = ExecType.fromString(opts.getValStr());
                } catch (IOException e) {
                    throw new RuntimeException("ERROR: Unrecognized exectype.", e);
                }
                break;

            case 'P': {
                InputStream inputStream = null;
                try {
                    FileLocalizer.FetchFileRet localFileRet = FileLocalizer.fetchFile(properties,
                            opts.getValStr());
                    inputStream = new BufferedInputStream(new FileInputStream(localFileRet.file));
                    properties.load(inputStream);
                } catch (IOException e) {
                    throw new RuntimeException("Unable to parse properties file '" + opts.getValStr() + "'");
                } finally {
                    if (inputStream != null) {
                        try {
                            inputStream.close();
                        } catch (IOException e) {
                        }
                    }
                }
            }
                break;

            default: {
                Character cc = Character.valueOf(opt);
                throw new AssertionError("Unhandled option " + cc.toString());
            }
            }
        }

        // create the context with the parameter
        PigContext pigContext = new PigContext(execType, properties);

        // create the static script state object
        String commandLine = LoadFunc.join((AbstractList<String>) Arrays.asList(args), " ");
        ScriptState scriptState = ScriptState.start(commandLine, pigContext);
        if (listener != null) {
            scriptState.registerListener(listener);
        }

        pigContext.getProperties().setProperty("pig.cmd.args", commandLine);

        if (logFileName == null && !userSpecifiedLog) {
            logFileName = validateLogFile(properties.getProperty("pig.logfile"), null);
        }

        pigContext.getProperties().setProperty("pig.logfile", (logFileName == null ? "" : logFileName));

        // configure logging
        configureLog4J(properties, pigContext);

        log.info(getVersionString().replace("\n", ""));

        if (logFileName != null) {
            log.info("Logging error messages to: " + logFileName);
        }

        if (!Boolean.valueOf(properties.getProperty(PROP_FILT_SIMPL_OPT, "false"))) {
            //turn off if the user has not explicitly turned on this optimization
            disabledOptimizerRules.add("FilterLogicExpressionSimplifier");
        }
        pigContext.getProperties().setProperty("pig.optimizer.rules",
                ObjectSerializer.serialize(disabledOptimizerRules));

        PigContext.setClassLoader(pigContext.createCl(null));

        // construct the parameter substitution preprocessor
        PigOutSh pigOutSh = null;
        BufferedReader in;
        String substFile = null;

        paramFiles = fetchRemoteParamFiles(paramFiles, properties);
        pigContext.setParams(params);
        pigContext.setParamFiles(paramFiles);

        switch (mode) {
        case FILE: {
            String remainders[] = opts.getRemainingArgs();

            if (remainders != null) {
                pigContext.getProperties().setProperty(PigContext.PIG_CMD_ARGS_REMAINDERS,
                        ObjectSerializer.serialize(remainders));
            }

            FileLocalizer.FetchFileRet localFileRet = FileLocalizer.fetchFile(properties, file);
            if (localFileRet.didFetch) {
                properties.setProperty("pig.jars.relative.to.dfs", "true");
            }

            scriptState.setFileName(file);

            if (embedded) {
                return runEmbeddedScript(pigContext, localFileRet.file.getPath(), engine);
            } else {
                SupportedScriptLang type = determineScriptType(localFileRet.file.getPath());

                log.debug("File: " + localFileRet.file.getPath() + " Script type: " + type);

                if (type != null) {
                    return runEmbeddedScript(pigContext, localFileRet.file.getPath(),
                            type.name().toLowerCase());
                }
            }
            //Reader is created by first loading "pigout.load.default.statements" or .pigoutbootup file if available
            in = new BufferedReader(new InputStreamReader(
                    Utils.getCompositeStream(new FileInputStream(localFileRet.file), properties)));

            //run parameter substitution preprocessor first
            substFile = file + ".substituted";

            pin = runParamPreprocessor(pigContext, in, substFile, debug || dryrun || checkScriptOnly);

            if (dryrun) {
                if (dryrun(substFile, pigContext)) {
                    log.info("Dry run completed. Substituted pig script is at " + substFile
                            + ". Expanded pig script is at " + file + ".expanded");
                } else {
                    log.info("Dry run completed. Substituted pig script is at " + substFile);
                }
                return ReturnCode.SUCCESS;
            }

            logFileName = validateLogFile(logFileName, file);
            pigContext.getProperties().setProperty("pig.logfile", (logFileName == null ? "" : logFileName));

            // Set job name based on name of the script
            pigContext.getProperties().setProperty(PigContext.JOB_NAME, "PigOut_" + new File(file).getName());

            if (!debug) {
                new File(substFile).deleteOnExit();
            }

            scriptState.setScript(new File(file));

            // From now on, PigOut starts...
            // Create a shell interface to PigOutServer
            pigOutSh = new PigOutSh(pin, pigContext);
            pigoutCalled = true;

            if (checkScriptOnly) { // -c option
                //Check syntax
                pigOutSh.checkScript(substFile);
                System.err.println(file + " syntax OK");
                return ReturnCode.SUCCESS;
            }

            // parseAndBuild() will parse, and then generate a script
            log.info("PigOut is parsing and analyzing the script...");
            pigOutSh.parseAndBuild();

            log.debug("PigOut is partitioning the plan...");
            pigOutSh.partition();

            return ReturnCode.SUCCESS;
        }
        case STRING: {
            log.error("Please use FILE mode.");
            return -1;
        }
        default:
            break;
        }

    } catch (ParseException e) {
        usage();
        rc = ReturnCode.PARSE_EXCEPTION;
        PigStatsUtil.setErrorMessage(e.getMessage());
        PigStatsUtil.setErrorThrowable(e);
    } catch (org.apache.pig.tools.parameters.ParseException e) {
        // usage();
        rc = ReturnCode.PARSE_EXCEPTION;
        PigStatsUtil.setErrorMessage(e.getMessage());
        PigStatsUtil.setErrorThrowable(e);
    } catch (IOException e) {
        if (e instanceof PigException) {
            PigException pe = (PigException) e;
            rc = (pe.retriable()) ? ReturnCode.RETRIABLE_EXCEPTION : ReturnCode.PIG_EXCEPTION;
            PigStatsUtil.setErrorMessage(pe.getMessage());
            PigStatsUtil.setErrorCode(pe.getErrorCode());
        } else {
            rc = ReturnCode.IO_EXCEPTION;
            PigStatsUtil.setErrorMessage(e.getMessage());
        }
        PigStatsUtil.setErrorThrowable(e);

        if (!pigoutCalled) {
            LogUtils.writeLog(e, logFileName, log, verbose, "Error before Pig is launched");
        }
    } catch (Throwable e) {
        rc = ReturnCode.THROWABLE_EXCEPTION;
        PigStatsUtil.setErrorMessage(e.getMessage());
        PigStatsUtil.setErrorThrowable(e);
        e.printStackTrace();
        if (!pigoutCalled) {
            LogUtils.writeLog(e, logFileName, log, verbose, "Error before Pig is launched");
        }
    } finally {
        // clear temp files
        FileLocalizer.deleteTempFiles();
        PerformanceTimerFactory.getPerfTimerFactory().dumpTimers();
    }

    return rc;

}

From source file:com.streamsets.pipeline.stage.origin.multikafka.MultiKafkaSource.java

private Properties getKafkaProperties(Stage.Context context) {
    Properties props = new Properties();
    props.putAll(conf.kafkaOptions);

    props.setProperty("bootstrap.servers", conf.brokerURI);
    props.setProperty("group.id", conf.consumerGroup);
    props.setProperty("max.poll.records", String.valueOf(batchSize));
    props.setProperty("enable.auto.commit", "true");
    props.setProperty("auto.commit.interval.ms", "1000");
    props.setProperty(KafkaConstants.KEY_DESERIALIZER_CLASS_CONFIG, conf.keyDeserializer.getKeyClass());
    props.setProperty(KafkaConstants.VALUE_DESERIALIZER_CLASS_CONFIG, conf.valueDeserializer.getValueClass());
    props.setProperty(KafkaConstants.CONFLUENT_SCHEMA_REGISTRY_URL_CONFIG,
            StringUtils.join(conf.dataFormatConfig.schemaRegistryUrls, ","));

    if (context.isPreview()) {
        props.setProperty(KafkaConstants.AUTO_OFFSET_RESET_CONFIG,
                KafkaConstants.AUTO_OFFSET_RESET_PREVIEW_VALUE);
    }/* w  w w  . j a  va  2  s.  c o m*/

    return props;
}

From source file:gobblin.service.SimpleKafkaSpecExecutorInstanceConsumer.java

@Override
public Future<? extends List<Pair<Verb, Spec>>> changedSpecs() {
    List<Pair<Verb, Spec>> changesSpecs = new ArrayList<>();
    initializeWatermarks();/*from ww  w.  j ava2s  .  c  om*/
    this.currentPartitionIdx = -1;
    while (!allPartitionsFinished()) {
        if (currentPartitionFinished()) {
            moveToNextPartition();
            continue;
        }
        if (this.messageIterator == null || !this.messageIterator.hasNext()) {
            try {
                this.messageIterator = fetchNextMessageBuffer();
            } catch (Exception e) {
                _log.error(String.format(
                        "Failed to fetch next message buffer for partition %s. Will skip this partition.",
                        getCurrentPartition()), e);
                moveToNextPartition();
                continue;
            }
            if (this.messageIterator == null || !this.messageIterator.hasNext()) {
                moveToNextPartition();
                continue;
            }
        }
        while (!currentPartitionFinished()) {
            if (!this.messageIterator.hasNext()) {
                break;
            }

            KafkaConsumerRecord nextValidMessage = this.messageIterator.next();

            // Even though we ask Kafka to give us a message buffer starting from offset x, it may
            // return a buffer that starts from offset smaller than x, so we need to skip messages
            // until we get to x.
            if (nextValidMessage.getOffset() < _nextWatermark.get(this.currentPartitionIdx)) {
                continue;
            }

            _nextWatermark.set(this.currentPartitionIdx, nextValidMessage.getNextOffset());
            try {
                final AvroJobSpec record;

                if (nextValidMessage instanceof ByteArrayBasedKafkaRecord) {
                    record = decodeRecord((ByteArrayBasedKafkaRecord) nextValidMessage);
                } else if (nextValidMessage instanceof DecodeableKafkaRecord) {
                    record = ((DecodeableKafkaRecord<?, AvroJobSpec>) nextValidMessage).getValue();
                } else {
                    throw new IllegalStateException(
                            "Unsupported KafkaConsumerRecord type. The returned record can either be ByteArrayBasedKafkaRecord"
                                    + " or DecodeableKafkaRecord");
                }

                JobSpec.Builder jobSpecBuilder = JobSpec.builder(record.getUri());

                Properties props = new Properties();
                props.putAll(record.getProperties());
                jobSpecBuilder.withJobCatalogURI(record.getUri()).withVersion(record.getVersion())
                        .withDescription(record.getDescription()).withConfigAsProperties(props);

                if (!record.getTemplateUri().isEmpty()) {
                    jobSpecBuilder.withTemplate(new URI(record.getTemplateUri()));
                }

                String verbName = record.getMetadata().get(VERB_KEY);
                Verb verb = Verb.valueOf(verbName);

                changesSpecs.add(new ImmutablePair<Verb, Spec>(verb, jobSpecBuilder.build()));
            } catch (Throwable t) {
                _log.error("Could not decode record at partition " + this.currentPartitionIdx + " offset "
                        + nextValidMessage.getOffset());
            }
        }
    }

    return new CompletedFuture(changesSpecs, null);
}

From source file:jetbrick.template.JetEngineFactoryBean.java

@Override
public void afterPropertiesSet() throws Exception {
    if (configFile == null && configProperties == null) {
        singleton = JetEngine.create();/*ww w. j a  v a2  s . c  om*/
        return;
    }

    Properties effectProps = new Properties();
    if (configFile != null) {
        check(configFile.getFile());
        effectProps.load(configFile.getInputStream());
    }
    if (configProperties != null) {
        effectProps.putAll(configProperties);
    }

    singleton = JetEngine.create(effectProps);
}

From source file:com.ironiacorp.persistence.hibernate.GenericHibernateDataSource.java

/**
 * Get the DDL script to update the database.
 *//*from w w w  .j  av  a2s  .  c o  m*/
public String getUpdateDDLScript() {
    Dialect dialect = Dialect.getDialect(hibernateConfig.getProperties());
    Properties props = new Properties();
    //        ConnectionProvider connectionProvider = null;
    DatabaseMetadata dm = null;

    props.putAll(dialect.getDefaultProperties());
    props.putAll(hibernateConfig.getProperties());
    /*       connectionProvider = ConnectionProviderFactory.newConnectionProvider(props);
                   
           try {
              dm = new DatabaseMetadata(connectionProvider.getConnection(), dialect);
           } catch ( SQLException e ) {
              log.debug("Could not get database DDL script", e);
           }
      */
    String[] script = hibernateConfig.generateSchemaUpdateScript(dialect, dm);
    return ArrayUtil.toString(script);
}

From source file:org.apache.falcon.oozie.process.ProcessExecutionWorkflowBuilder.java

@Override
public Properties build(Cluster cluster, Path buildPath) throws FalconException {
    WORKFLOWAPP wfApp = new WORKFLOWAPP();
    String wfName = EntityUtil.getWorkflowName(Tag.DEFAULT, entity).toString();

    String startAction = USER_ACTION_NAME;
    final boolean isTableStorageType = EntityUtil.isTableStorageType(cluster, entity);

    //Add pre-processing action
    if (shouldPreProcess()) {
        ACTION preProcessAction = getPreProcessingAction(isTableStorageType, Tag.DEFAULT);
        addTransition(preProcessAction, USER_ACTION_NAME, FAIL_POSTPROCESS_ACTION_NAME);
        wfApp.getDecisionOrForkOrJoin().add(preProcessAction);
        startAction = PREPROCESS_ACTION_NAME;
    }//from ww  w  .j  a  v a2  s . c  om

    //Add user action
    ACTION userAction = getUserAction(cluster, buildPath);
    addTransition(userAction, SUCCESS_POSTPROCESS_ACTION_NAME, FAIL_POSTPROCESS_ACTION_NAME);
    wfApp.getDecisionOrForkOrJoin().add(userAction);

    //Add post-processing
    ACTION success = getSuccessPostProcessAction();
    addTransition(success, OK_ACTION_NAME, FAIL_ACTION_NAME);
    wfApp.getDecisionOrForkOrJoin().add(success);

    ACTION fail = getFailPostProcessAction();
    addTransition(fail, FAIL_ACTION_NAME, FAIL_ACTION_NAME);
    wfApp.getDecisionOrForkOrJoin().add(fail);

    decorateWorkflow(wfApp, wfName, startAction);

    addLibExtensionsToWorkflow(cluster, wfApp, null);

    if (isTableStorageType) {
        setupHiveCredentials(cluster, buildPath, wfApp);
    }

    marshal(cluster, wfApp, buildPath);
    Properties props = createDefaultConfiguration(cluster);
    props.putAll(getProperties(buildPath, wfName));
    props.putAll(getWorkflowProperties());
    props.setProperty(OozieClient.APP_PATH, buildPath.toString());

    //Add libpath
    Path libPath = new Path(buildPath, "lib");
    copySharedLibs(cluster, libPath);
    props.put(OozieClient.LIBPATH, libPath.toString());

    Workflow processWorkflow = ((Process) (entity)).getWorkflow();
    propagateUserWorkflowProperties(processWorkflow, props);

    // Write out the config to config-default.xml
    marshal(cluster, wfApp, getConfig(props), buildPath);

    return props;
}

From source file:net.ontopia.persistence.proxy.DBCPConnectionFactory.java

protected void initPool() {
    // Set up connection pool
    pool = new GenericObjectPool(null);

    // Read/Write by default
    boolean readonly = defaultReadOnly;
    // Auto-commit disabled by default
    boolean autocommit = readonly;
    log.debug("Creating new DBCP connection factory, readonly=" + readonly + ", autocommit=" + autocommit);

    // Set minimum pool size (default: 20)
    String _minsize = PropertyUtils.getProperty(properties,
            "net.ontopia.topicmaps.impl.rdbms.ConnectionPool.MinimumSize", false);
    int minsize = (_minsize == null ? 20 : Integer.parseInt(_minsize));
    log.debug("Setting ConnectionPool.MinimumSize '" + minsize + "'");
    pool.setMaxIdle(minsize); // 0 = no limit

    // Set maximum pool size (default: Integer.MAX_VALUE)
    String _maxsize = PropertyUtils.getProperty(properties,
            "net.ontopia.topicmaps.impl.rdbms.ConnectionPool.MaximumSize", false);
    int maxsize = (_maxsize == null ? 0 : Integer.parseInt(_maxsize));
    log.debug("Setting ConnectionPool.MaximumSize '" + maxsize + "'");
    pool.setMaxActive(maxsize); // 0 = no limit

    // Set user timeout (default: never)
    String _utimeout = PropertyUtils.getProperty(properties,
            "net.ontopia.topicmaps.impl.rdbms.ConnectionPool.UserTimeout", false);
    int utimeout = (_utimeout == null ? -1 : Integer.parseInt(_utimeout));
    pool.setMaxWait(utimeout); // -1 = never

    // Set soft maximum - emergency objects (default: true)
    boolean softmax = PropertyUtils.isTrue(properties,
            "net.ontopia.topicmaps.impl.rdbms.ConnectionPool.SoftMaximum", true);
    log.debug("Setting ConnectionPool.SoftMaximum '" + softmax + "'");
    if (softmax)/*from   w w w.  j  ava2s  .  c om*/
        pool.setWhenExhaustedAction(GenericObjectPool.WHEN_EXHAUSTED_GROW);
    else
        pool.setWhenExhaustedAction(GenericObjectPool.WHEN_EXHAUSTED_BLOCK);

    // allow the user to overwrite exhausted options
    // warning: when set to fail, make sure Maximum and Minimum are set correctly
    // warning: when set to block, make sure a propper usertimeout is set, or pool will block
    //          forever
    String _whenExhaustedAction = PropertyUtils.getProperty(properties,
            "net.ontopia.topicmaps.impl.rdbms.ConnectionPool.WhenExhaustedAction", false);
    if (EXHAUSED_BLOCK.equals(_whenExhaustedAction))
        pool.setWhenExhaustedAction(GenericKeyedObjectPool.WHEN_EXHAUSTED_BLOCK);
    if (EXHAUSED_GROW.equals(_whenExhaustedAction))
        pool.setWhenExhaustedAction(GenericKeyedObjectPool.WHEN_EXHAUSTED_GROW);
    if (EXHAUSED_FAIL.equals(_whenExhaustedAction))
        pool.setWhenExhaustedAction(GenericKeyedObjectPool.WHEN_EXHAUSTED_FAIL);

    if (pool.getWhenExhaustedAction() == GenericKeyedObjectPool.WHEN_EXHAUSTED_BLOCK)
        log.debug("Pool is set to block on exhaused");
    if (pool.getWhenExhaustedAction() == GenericKeyedObjectPool.WHEN_EXHAUSTED_GROW)
        log.debug("Pool is set to grow on exhaused");
    if (pool.getWhenExhaustedAction() == GenericKeyedObjectPool.WHEN_EXHAUSTED_FAIL)
        log.debug("Pool is set to fail on exhaused");

    // Statement pool
    GenericKeyedObjectPoolFactory stmpool = null;
    if (PropertyUtils.isTrue(properties, "net.ontopia.topicmaps.impl.rdbms.ConnectionPool.PoolStatements",
            true)) {
        log.debug("Using prepared statement pool: Yes");
        stmpool = new GenericKeyedObjectPoolFactory(null, -1, // unlimited maxActive (per key)
                GenericKeyedObjectPool.WHEN_EXHAUSTED_FAIL, 0, // maxWait
                1, // maxIdle (per key) 
                GenericKeyedObjectPool.DEFAULT_MAX_TOTAL);
    } else {
        log.debug("Using prepared statement pool: No");
    }

    // Test on borrow
    pool.setTestOnBorrow(true);

    // Get validation query
    String vquery = PropertyUtils.getProperty(properties,
            "net.ontopia.topicmaps.impl.rdbms.ConnectionPool.ValidationQuery", false);
    if (vquery == null)
        vquery = "select seq_count from TM_ADMIN_SEQUENCE where seq_name = '<GLOBAL>'";

    try {
        // Make sure driver is registered
        ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
        Class.forName(getDriver(), true, classLoader);
        // Create connection factory
        ConnectionFactory cfactory;
        if (getUserName() == null || getPassword() == null) {
            Properties props = new Properties();
            props.putAll(properties);
            cfactory = new DriverManagerConnectionFactory(getConnectionString(), props);
        } else {
            cfactory = new DriverManagerConnectionFactory(getConnectionString(), getUserName(), getPassword());
        }

        // Create data source
        this.pcfactory = new TraceablePoolableConnectionFactory(cfactory, pool, stmpool, vquery, readonly,
                autocommit);

        // Set default transaction isolation level
        pcfactory.setDefaultTransactionIsolation(defaultTransactionIsolation);

        this.datasource = new PoolingDataSource(pool);
    } catch (Exception e) {
        throw new OntopiaRuntimeException("Problems occurred when setting up DBCP connection pool.", e);
    }
}

From source file:com.anrisoftware.propertiesutils.ContextPropertiesFactory.java

/**
 * Returning the context properties without loading any other resources.
 * <p>/*from  w  w w.  j av  a 2 s .co  m*/
 * Useful if we already loaded default properties from shared resource.
 * 
 * @return the {@link ContextProperties}.
 * 
 * @since 1.5
 */
public ContextProperties fromDefaults() {
    Properties resourceP = new Properties(defaultProperties);
    Properties parentP = new Properties(resourceP);
    parentP.putAll(parentProperties);
    return new ContextProperties(context, parentP);
}

From source file:com.anrisoftware.propertiesutils.ContextPropertiesFactory.java

private Properties loadProperties(InputStream resource, Charset charset)
        throws FileNotFoundException, IOException {
    Properties resourceP = new Properties(defaultProperties);
    Reader reader = new InputStreamReader(resource, charset);
    resourceP.load(reader);/*from  ww w. jav a 2s . c om*/
    Properties parentP = new Properties(resourceP);
    parentP.putAll(parentProperties);
    return parentP;
}

From source file:org.apache.kylin.source.kafka.config.KafkaConsumerProperties.java

private Properties loadKafkaConsumerProperties() {
    File propFile = getKafkaConsumerFile();
    if (propFile == null || !propFile.exists()) {
        logger.warn("fail to locate " + KAFKA_CONSUMER_FILE + ", use empty kafka consumer properties");
        return new Properties();
    }/*from ww  w. j  a v a2  s .  com*/
    Properties properties = new Properties();
    try {
        FileInputStream is = new FileInputStream(propFile);
        Configuration conf = new Configuration();
        conf.addResource(is);
        properties.putAll(extractKafkaConfigToProperties(conf));
        IOUtils.closeQuietly(is);

        File propOverrideFile = new File(propFile.getParentFile(), propFile.getName() + ".override");
        if (propOverrideFile.exists()) {
            FileInputStream ois = new FileInputStream(propOverrideFile);
            Configuration oconf = new Configuration();
            oconf.addResource(ois);
            properties.putAll(extractKafkaConfigToProperties(oconf));
            IOUtils.closeQuietly(ois);
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    return properties;
}