List of usage examples for java.util Properties putAll
@Override public synchronized void putAll(Map<?, ?> t)
From source file:de.micromata.genome.util.runtime.config.MailSessionLocalSettingsConfigModel.java
public Session createMailSession(Properties addProperties) { Properties msprops = new Properties(); msprops.put("mail.debug", Boolean.toString(smptDebug)); msprops.put("mail.smtp.host", this.emailHost); msprops.put("mail.smtp.port", this.emailPort); // msprops.put("mail.smtp.ssl.enable", "true"); //msprops.put("mail.smtp.starttls.enable", "true"); Encryption encr = Encryption.fromString(encryption); if (encr == Encryption.StartTLS) { msprops.put("mail.smtp.starttls.enable", "true"); } else if (encr == Encryption.SSL) { msprops.put("mail.smtp.ssl.enable", "true"); // msprops.put("mail.smtp.socketFactory.port", emailPort); // msprops.put("mail.smtp.socketFactory.class", "javax.net.ssl.SSLSocketFactory"); }// w w w .ja v a 2 s . co m javax.mail.Session mailSession; msprops.put("mail.smtp.auth", Boolean.toString(isEmailAuthEnabled())); if (addProperties != null) { msprops.putAll(addProperties); } if (isEmailAuthEnabled() == true) { mailSession = Session.getInstance(msprops, new Authenticator() { @Override protected PasswordAuthentication getPasswordAuthentication() { return new PasswordAuthentication(emailAuthUser, emailAuthPass); } }); } else { mailSession = Session.getInstance(msprops); } return mailSession; }
From source file:org.apache.ctakes.ytex.kernel.ImputedFeatureEvaluatorImpl.java
@Override public boolean evaluateCorpus(String propFile) throws IOException { Properties props = new Properties(); // put org.apache.ctakes.ytex properties in props props.putAll(this.getYtexProperties()); // override org.apache.ctakes.ytex properties with propfile props.putAll(FileUtil.loadProperties(propFile, true)); return this.evaluateCorpus(new Parameters(props)); }
From source file:org.apache.falcon.oozie.OozieOrchestrationWorkflowBuilder.java
public Properties createDefaultConfiguration(Cluster cluster) throws FalconException { Properties props = new Properties(); props.put(WorkflowExecutionArgs.ENTITY_NAME.getName(), entity.getName()); props.put(WorkflowExecutionArgs.ENTITY_TYPE.getName(), entity.getEntityType().name()); props.put(WorkflowExecutionArgs.CLUSTER_NAME.getName(), cluster.getName()); props.put("falconDataOperation", getOperation().name()); props.put(WorkflowExecutionArgs.LOG_DIR.getName(), getStoragePath(EntityUtil.getLogPath(cluster, entity))); props.put(WorkflowExecutionArgs.WF_ENGINE_URL.getName(), ClusterHelper.getOozieUrl(cluster)); addLateDataProperties(props);/*from w w w. j a v a 2s. c o m*/ addBrokerProperties(cluster, props); props.put(MR_QUEUE_NAME, "default"); props.put(MR_JOB_PRIORITY, "NORMAL"); //props in entity override the set props. props.putAll(getEntityProperties(entity)); props.putAll(createAppProperties(cluster, entity.getName())); return props; }
From source file:org.cloudifysource.esc.driver.provisioning.jclouds.DefaultProvisioningDriver.java
private void validateComputeTemplates(final boolean endpointRequired, final String apiId, final ValidationContext validationContext) throws CloudProvisioningException { JCloudsDeployer deployer = null;//from ww w . j av a2 s . c o m String templateName = ""; String imageId = ""; String hardwareId = ""; String locationId = ""; try { validationContext.validationEvent(ValidationMessageType.TOP_LEVEL_VALIDATION_MESSAGE, getFormattedMessage("validating_all_templates")); for (Entry<String, ComputeTemplate> entry : cloud.getCloudCompute().getTemplates().entrySet()) { templateName = entry.getKey(); validationContext.validationEvent(ValidationMessageType.GROUP_VALIDATION_MESSAGE, getFormattedMessage("validating_template", templateName)); ComputeTemplate template = entry.getValue(); String endpoint = getEndpoint(template); if (endpointRequired && StringUtils.isBlank(endpoint)) { throw new CloudProvisioningException("Endpoint not defined. Please add a \"jclouds.endpoint\"" + " entry in the template's overrides section"); } try { validationContext.validationOngoingEvent(ValidationMessageType.ENTRY_VALIDATION_MESSAGE, getFormattedMessage("validating_cloud_credentials")); final Properties templateProps = new Properties(); Map<String, Object> templateOverrides = template.getOverrides(); templateProps.putAll(templateOverrides); logger.fine("Creating a new cloud deployer"); deployer = new JCloudsDeployer(cloud.getProvider().getProvider(), cloud.getUser().getUser(), cloud.getUser().getApiKey(), templateProps); validationContext.validationEventEnd(ValidationResultType.OK); } catch (IOException e) { closeDeployer(deployer); validationContext.validationEventEnd(ValidationResultType.ERROR); throw new CloudProvisioningException( getFormattedMessage("error_cloud_credentials_validation", groovyFile, propertiesFile)); } imageId = template.getImageId(); hardwareId = template.getHardwareId(); locationId = template.getLocationId(); deployer.setImageId(imageId); deployer.setHardwareId(hardwareId); deployer.setExtraOptions(template.getOptions()); // TODO: check this memory validation // deployer.setMinRamMegabytes(template.getMachineMemoryMB()); try { validationContext.validationOngoingEvent(ValidationMessageType.ENTRY_VALIDATION_MESSAGE, getFormattedMessage("validating_image_hardware_location_combination", imageId == null ? "" : imageId, hardwareId == null ? "" : hardwareId, locationId == null ? "" : locationId)); // calling JCloudsDeployer.getTemplate effectively tests the above configuration through jclouds deployer.getTemplate(locationId); validationContext.validationEventEnd(ValidationResultType.OK); } catch (Exception ex) { validationContext.validationEventEnd(ValidationResultType.ERROR); throw new CloudProvisioningException( getFormattedMessage("error_image_hardware_location_combination_validation", imageId == null ? "" : imageId, hardwareId == null ? "" : hardwareId, locationId == null ? "" : locationId, groovyFile, propertiesFile), ex); } if (isKnownAPI(apiId)) { validateSecurityGroupsForTemplate(template, apiId, deployer.getContext(), validationContext); validateKeyPairForTemplate(template, apiId, deployer.getContext(), validationContext); } validationContext.validationOngoingEvent(ValidationMessageType.GROUP_VALIDATION_MESSAGE, getFormattedMessage("template_validated", templateName)); validationContext.validationEventEnd(ValidationResultType.OK); closeDeployer(deployer); } } finally { closeDeployer(deployer); } }
From source file:org.apache.flink.streaming.connectors.kafka.KafkaTestEnvironmentImpl.java
/** * Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed) *///from w w w.j a v a2 s . c o m protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception { Properties kafkaProperties = new Properties(); // properties have to be Strings kafkaProperties.put("advertised.host.name", KAFKA_HOST); kafkaProperties.put("broker.id", Integer.toString(brokerId)); kafkaProperties.put("log.dir", tmpFolder.toString()); kafkaProperties.put("zookeeper.connect", zookeeperConnectionString); kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024)); kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024)); // for CI stability, increase zookeeper session timeout kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout); kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout); if (additionalServerProperties != null) { kafkaProperties.putAll(additionalServerProperties); } final int numTries = 5; for (int i = 1; i <= numTries; i++) { int kafkaPort = NetUtils.getAvailablePort(); kafkaProperties.put("port", Integer.toString(kafkaPort)); //to support secure kafka cluster if (secureMode) { LOG.info("Adding Kafka secure configurations"); kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort); kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort); kafkaProperties.putAll(getSecureProperties()); } KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties); try { scala.Option<String> stringNone = scala.Option.apply(null); KafkaServer server = new KafkaServer(kafkaConfig, SystemTime$.MODULE$, stringNone); server.startup(); return server; } catch (KafkaException e) { if (e.getCause() instanceof BindException) { // port conflict, retry... LOG.info("Port conflict when starting Kafka Broker. Retrying..."); } else { throw e; } } } throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts."); }
From source file:com.baidu.cc.ConfigLoader.java
/** * Read property from local resource file * /* ww w. j a v a2 s . co m*/ * @param props to merge from local * @param localFile local resource file * @throws IOException throw all file operation exception */ public void readLocal(Properties props, File localFile) throws IOException { Assert.notNull(localFile, "Property 'localFile' is null."); if (!localFile.exists()) { throw new IOException("File not exist. " + localFile.getPath()); } byte[] byteArray = FileUtils.readFileToByteArray(localFile); Hex encoder = new Hex(); try { byteArray = encoder.decode(byteArray); } catch (DecoderException e) { throw new IOException(e.getMessage()); } String json = new String(byteArray, FILE_ENCODE); Map<String, String> map = gson.fromJson(json, new TypeToken<Map<String, String>>() { }.getType()); setVersionTag(map); props.putAll(map); }
From source file:org.parancoe.web.util.ReloadableResourceBundleMessageSource.java
/** * Get a PropertiesHolder that contains the actually visible properties for a Locale, after * merging all specified resource bundles. Either fetches the holder from the cache or freshly * loads it. <p>Only used when caching resource bundle contents forever, i.e. with cacheSeconds * < 0. Therefore, merged properties are always cached forever. *///from w w w. j a v a 2 s .c o m protected ReloadableResourceBundleMessageSource.PropertiesHolder getMergedProperties(Locale locale) { synchronized (this.cachedMergedProperties) { ReloadableResourceBundleMessageSource.PropertiesHolder mergedHolder = this.cachedMergedProperties .get(locale); if (mergedHolder != null) { return mergedHolder; } Properties mergedProps = new Properties(); mergedHolder = new ReloadableResourceBundleMessageSource.PropertiesHolder(mergedProps, -1); for (int i = this.basenames.length - 1; i >= 0; i--) { List filenames = calculateAllFilenames(this.basenames[i], locale); for (int j = filenames.size() - 1; j >= 0; j--) { String filename = (String) filenames.get(j); ReloadableResourceBundleMessageSource.PropertiesHolder propHolder = getProperties(filename); if (propHolder.getProperties() != null) { mergedProps.putAll(propHolder.getProperties()); } } } this.cachedMergedProperties.put(locale, mergedHolder); return mergedHolder; } }
From source file:org.bigmouth.nvwa.zookeeper.config.ZkPropertyPlaceholderConfigurer.java
private void fillRemoteProperties(Properties props) { String cfg = props.getProperty(KEY_ZOOKEEPER_SERVERS); String servers = ReaderFactory.matching(cfg).read(); if (StringUtils.isNotBlank(servers)) { zkClientHolder = new ZkClientHolder(servers); zkClientHolder.init();/*from w w w. jav a 2 s . c om*/ } if (null == zkClientHolder) { return; } CuratorFramework zk = zkClientHolder.get(); if (null == zk) { return; } String paths = props.getProperty(KEY_ZOOKEEPER_NODE_PATHS); if (StringUtils.isNotBlank(paths)) { String[] nodePaths = paths.split(PATH_SPLIT); for (String path : nodePaths) { path = StringUtils.removeStart(path, "!"); try { PathUtils.validatePath(path); if (LOGGER.isInfoEnabled()) { LOGGER.info("Loading properties file from ZooKeeper [{}]", path); } byte[] data = getData(zk, path); if (!ArrayUtils.isEmpty(data)) { Properties properties = convert(data); props.putAll(properties); } } catch (IllegalArgumentException e) { LOGGER.warn("Illegal path: {}, Has been ignored!", path); } } } }