Example usage for java.util Properties clone

List of usage examples for java.util Properties clone

Introduction

In this page you can find the example usage for java.util Properties clone.

Prototype

@Override
    public synchronized Object clone() 

Source Link

Usage

From source file:er.extensions.foundation.ERXProperties.java

/**
 * Apply the current configuration to the supplied properties.
 * @param source/*from   ww w. j a  va 2s  .  c  o m*/
 * @param commandLine
 * @return the applied properties
 */
public static Properties applyConfiguration(Properties source, Properties commandLine) {

    Properties dest = source != null ? (Properties) source.clone() : new Properties();
    NSArray additionalConfigurationFiles = ERXProperties.pathsForUserAndBundleProperties(false);

    if (additionalConfigurationFiles.count() > 0) {
        for (Enumeration configEnumerator = additionalConfigurationFiles.objectEnumerator(); configEnumerator
                .hasMoreElements();) {
            String configFile = (String) configEnumerator.nextElement();
            File file = new File(configFile);
            if (file.exists() && file.isFile() && file.canRead()) {
                try {
                    Properties props = ERXProperties.propertiesFromFile(file);
                    if (log.isDebugEnabled()) {
                        log.debug("Loaded: {}\n{}", file, ERXProperties.logString(props));
                    }
                    ERXProperties.transferPropertiesFromSourceToDest(props, dest);
                } catch (java.io.IOException ex) {
                    log.error("Unable to load optional configuration file: {}", configFile, ex);
                }
            } else {
                configLog.error("The optional configuration file '{}' either does not exist or cannot be read.",
                        file);
            }
        }
    }

    if (commandLine != null) {
        ERXProperties.transferPropertiesFromSourceToDest(commandLine, dest);
    }
    return dest;

}

From source file:net.sf.joost.trax.TransformerImpl.java

/**
 * Setter for {@link Processor#outputProperties}
 *
 * @param oformat A <code>Properties</code> object, that replaces the
 *        current set of output properties.
 * @throws IllegalArgumentException//from ww  w  .java 2  s .  c o m
 */
public void setOutputProperties(Properties oformat) throws IllegalArgumentException {
    if (oformat == null) {
        processor.initOutputProperties(); // re-initialize
    } else {
        IllegalArgumentException iE;
        // check properties in oformat
        for (Enumeration e = oformat.keys(); e.hasMoreElements();) {
            Object propKey = e.nextElement();
            if (ignoredProperties.contains(propKey)) {
                if (log != null)
                    log.warn("Output property '" + propKey + "' is not supported and will be ignored");
                continue;
            }
            if (!supportedProperties.contains(propKey)) {
                iE = new IllegalArgumentException("Invalid output property '" + propKey + "'");
                if (log != null)
                    log.error(iE);
                throw iE;
            }
            String propVal = oformat.getProperty((String) propKey);
            if (OutputKeys.METHOD.equals(propKey) && !isValidOutputMethod(propVal)) {
                iE = new IllegalArgumentException(
                        "Unsupported output method " + oformat.getProperty((String) propKey));
                if (log != null)
                    log.error(iE);
                throw iE;
            }
        }
        processor.outputProperties = (Properties) oformat.clone();
    }
}

From source file:pl.project13.maven.git.GitCommitIdMojo.java

void maybeGeneratePropertiesFile(@NotNull Properties localProperties, File base, String propertiesFilename)
        throws GitCommitIdExecutionException {
    try {//  w ww  .java  2s  .com
        final File gitPropsFile = craftPropertiesOutputFile(base, propertiesFilename);
        final boolean isJsonFormat = "json".equalsIgnoreCase(format);

        boolean shouldGenerate = true;

        if (gitPropsFile.exists()) {
            final Properties persistedProperties;

            try {
                if (isJsonFormat) {
                    log.info("Reading existing json file [{}] (for module {})...",
                            gitPropsFile.getAbsolutePath(), project.getName());

                    persistedProperties = readJsonProperties(gitPropsFile);
                } else {
                    log.info("Reading existing properties file [{}] (for module {})...",
                            gitPropsFile.getAbsolutePath(), project.getName());

                    persistedProperties = readProperties(gitPropsFile);
                }

                final Properties propertiesCopy = (Properties) localProperties.clone();

                final String buildTimeProperty = prefixDot + BUILD_TIME;

                propertiesCopy.remove(buildTimeProperty);
                persistedProperties.remove(buildTimeProperty);

                shouldGenerate = !propertiesCopy.equals(persistedProperties);
            } catch (CannotReadFileException ex) {
                // Read has failed, regenerate file
                log.info("Cannot read properties file [{}] (for module {})...", gitPropsFile.getAbsolutePath(),
                        project.getName());
                shouldGenerate = true;
            }
        }

        if (shouldGenerate) {
            Files.createParentDirs(gitPropsFile);
            Writer outputWriter = null;
            boolean threw = true;

            try {
                outputWriter = new OutputStreamWriter(new FileOutputStream(gitPropsFile), sourceCharset);
                if (isJsonFormat) {
                    log.info("Writing json file to [{}] (for module {})...", gitPropsFile.getAbsolutePath(),
                            project.getName());
                    ObjectMapper mapper = new ObjectMapper();
                    mapper.writerWithDefaultPrettyPrinter().writeValue(outputWriter, localProperties);
                } else {
                    log.info("Writing properties file to [{}] (for module {})...",
                            gitPropsFile.getAbsolutePath(), project.getName());
                    localProperties.store(outputWriter, "Generated by Git-Commit-Id-Plugin");
                }
                threw = false;
            } catch (final IOException ex) {
                throw new RuntimeException("Cannot create custom git properties file: " + gitPropsFile, ex);
            } finally {
                Closeables.close(outputWriter, threw);
            }
        } else {
            log.info("Properties file [{}] is up-to-date (for module {})...", gitPropsFile.getAbsolutePath(),
                    project.getName());
        }
    } catch (IOException e) {
        throw new GitCommitIdExecutionException(e);
    }
}

From source file:org.hyperledger.fabric_ca.sdk.HFCAClient.java

/**
 * HFCAClient constructor// w w  w  .j  ava 2s  . co  m
 *
 * @param url        Http URL for the Fabric's certificate authority services endpoint
 * @param properties PEM used for SSL .. not implemented.
 *                   <p>
 *                   Supported properties
 *                   <ul>
 *                   <li>pemFile - File location for x509 pem certificate for SSL.</li>
 *                   <li>allowAllHostNames - boolen(true/false) override certificates CN Host matching -- for development only.</li>
 *                   </ul>
 * @throws MalformedURLException
 */
HFCAClient(String caName, String url, Properties properties) throws MalformedURLException {
    logger.debug(format("new HFCAClient %s", url));
    this.url = url;

    this.caName = caName; //name may be null

    URL purl = new URL(url);
    final String proto = purl.getProtocol();
    if (!"http".equals(proto) && !"https".equals(proto)) {
        throw new IllegalArgumentException("HFCAClient only supports http or https not " + proto);
    }
    final String host = purl.getHost();

    if (Utils.isNullOrEmpty(host)) {
        throw new IllegalArgumentException("HFCAClient url needs host");
    }

    final String path = purl.getPath();

    if (!Utils.isNullOrEmpty(path)) {

        throw new IllegalArgumentException(
                "HFCAClient url does not support path portion in url remove path: '" + path + "'.");
    }

    final String query = purl.getQuery();

    if (!Utils.isNullOrEmpty(query)) {

        throw new IllegalArgumentException(
                "HFCAClient url does not support query portion in url remove query: '" + query + "'.");
    }

    isSSL = "https".equals(proto);

    if (properties != null) {
        this.properties = (Properties) properties.clone(); //keep our own copy.
    } else {
        this.properties = null;
    }

}

From source file:net.sf.farrago.namespace.jdbc.MedJdbcDataServer.java

public void initialize() throws SQLException {
    Properties props = getProperties();
    connectProps = null;//from   w ww.  j  ava2  s .  c o  m

    jndiName = props.getProperty(PROP_JNDI_NAME);

    if (jndiName == null) {
        requireProperty(props, PROP_URL);
    }

    url = props.getProperty(PROP_URL);
    userName = props.getProperty(PROP_USER_NAME);
    password = props.getProperty(PROP_PASSWORD);

    disableConnectionPool = getBooleanProperty(props, PROP_DISABLE_CONNECTION_POOL,
            DEFAULT_DISABLE_CONNECTION_POOL);

    if (jndiName != null) {
        if (url != null) {
            throw FarragoResource.instance().PluginPropsConflict.ex(PROP_JNDI_NAME, PROP_URL);
        }
        if (userName != null) {
            throw FarragoResource.instance().PluginPropsConflict.ex(PROP_JNDI_NAME, PROP_USER_NAME);
        }
        if (password != null) {
            throw FarragoResource.instance().PluginPropsConflict.ex(PROP_JNDI_NAME, PROP_PASSWORD);
        }
        if (disableConnectionPool) {
            throw FarragoResource.instance().PluginPropsConflict.ex(PROP_JNDI_NAME,
                    PROP_DISABLE_CONNECTION_POOL);
        }
    }

    schemaName = props.getProperty(PROP_SCHEMA_NAME);
    catalogName = props.getProperty(PROP_CATALOG_NAME);
    if (jndiName == null) {
        loginTimeout = props.getProperty(PROP_LOGIN_TIMEOUT);
        validationQuery = props.getProperty(PROP_VALIDATION_QUERY);

        if (!disableConnectionPool) {
            String validationTimingProp = props.getProperty(PROP_VALIDATION_TIMING, DEFAULT_VALIDATION_TIMING);
            for (String validationTiming : validationTimingProp.split(",")) {
                validationTiming = validationTiming.trim().toUpperCase();
                if (validationTiming.equals(PROP_VALIDATION_TIMING_ON_BORROW)) {
                    validateOnBorrow = true;
                } else if (validationTiming.equals(PROP_VALIDATION_TIMING_ON_RETURN)) {
                    validateOnReturn = true;
                } else if (validationTiming.equals(PROP_VALIDATION_TIMING_WHILE_IDLE)) {
                    validateWhileIdle = true;
                } else {
                    throw FarragoResource.instance().PluginInvalidStringProp.ex(validationTiming,
                            PROP_VALIDATION_TIMING);
                }
            }
        }
    }

    schemaMaps = new HashMap<String, Map<String, String>>();
    tableMaps = new HashMap<String, Map<String, Source>>();
    tablePrefixMaps = new HashMap<String, List<WildcardMapping>>();

    if (getBooleanProperty(props, PROP_EXT_OPTIONS, false)) {
        if (jndiName != null) {
            throw FarragoResource.instance().PluginPropsConflict.ex(PROP_JNDI_NAME, PROP_EXT_OPTIONS);
        }

        connectProps = (Properties) props.clone();
        removeNonDriverProps(connectProps);
    }

    useSchemaNameAsForeignQualifier = getBooleanProperty(props, PROP_USE_SCHEMA_NAME_AS_FOREIGN_QUALIFIER,
            DEFAULT_USE_SCHEMA_NAME_AS_FOREIGN_QUALIFIER);

    lenient = getBooleanProperty(props, PROP_LENIENT, DEFAULT_LENIENT);
    skipTypeCheck = getBooleanProperty(props, PROP_SKIP_TYPE_CHECK, DEFAULT_SKIP_TYPE_CHECK);

    assumePushdownValid = getBooleanProperty(props, PROP_ASSUME_PUSHDOWN_VALID, DEFAULT_ASSUME_PUSHDOWN_VALID);

    disabledPushdownPattern = Pattern.compile(
            props.getProperty(PROP_DISABLED_PUSHDOWN_REL_PATTERN, DEFAULT_DISABLED_PUSHDOWN_REL_PATTERN));

    String tableTypeString = props.getProperty(PROP_TABLE_TYPES);
    if (tableTypeString == null) {
        tableTypes = null;
    } else {
        tableTypes = tableTypeString.split(",");
    }

    // Ignore login timeout if JNDI lookup will be used.
    if ((loginTimeout != null) && (jndiName == null)) {
        try {
            // REVIEW: SWZ: 2008-09-03: This is a global setting. If
            // multiple MedJdbcDataServers are configured with different
            // values they'll step on each other.  Not to mention other
            // plugins which may make their own calls! (See FRG-343)
            DriverManager.setLoginTimeout(Integer.parseInt(loginTimeout));
        } catch (NumberFormatException ne) {
            // ignore the timeout
        }
    }

    fetchSize = getIntProperty(props, PROP_FETCH_SIZE, DEFAULT_FETCH_SIZE);
    autocommit = getBooleanProperty(props, PROP_AUTOCOMMIT, DEFAULT_AUTOCOMMIT);

    if (!disableConnectionPool) {
        maxIdleConnections = getIntProperty(props, PROP_MAX_IDLE_CONNECTIONS, DEFAULT_MAX_IDLE_CONNECTIONS);
        evictionTimerPeriodMillis = getLongProperty(props, PROP_EVICTION_TIMER_PERIOD_MILLIS,
                DEFAULT_EVICTION_TIMER_PERIOD);
        minEvictionIdleMillis = getLongProperty(props, PROP_MIN_EVICTION_IDLE_MILLIS,
                DEFAULT_MIN_EVICTION_IDLE_MILLIS);

        initializeDataSource();
    }

    DatabaseMetaData databaseMetaData = getDatabaseMetaData();

    String schemaMapping = props.getProperty(PROP_SCHEMA_MAPPING);
    String tableMapping = props.getProperty(PROP_TABLE_MAPPING);

    String tablePrefixMapping = props.getProperty(PROP_TABLE_PREFIX_MAPPING);

    try {
        if (((schemaMapping != null) && (tableMapping != null))
                || ((schemaMapping != null) && (tablePrefixMapping != null))
                || ((tableMapping != null) && (tablePrefixMapping != null))) {
            throw FarragoResource.instance().MedJdbc_InvalidTableSchemaMapping.ex();
        }

        if (schemaMapping != null) {
            parseMapping(databaseMetaData, schemaMapping, false, false);
        } else if (tableMapping != null) {
            parseMapping(databaseMetaData, tableMapping, true, false);
        } else if (tablePrefixMapping != null) {
            parseMapping(databaseMetaData, tablePrefixMapping, true, true);
        }
    } catch (SQLException e) {
        logger.log(Level.SEVERE, "Error initializing MedJdbc mappings", e);
        closeAllocation();
        throw e;
    } catch (RuntimeException e) {
        logger.log(Level.SEVERE, "Error initializing MedJdbc mappings", e);
        closeAllocation();
        throw e;
    }
}

From source file:org.apache.hadoop.hive.metastore.MetaStoreUtils.java

/**
 * Get partition level schema from table level schema.
 * This function will use the same column names, column types and partition keys for
 * each partition Properties. Their values are copied from the table Properties. This
 * is mainly to save CPU and memory. CPU is saved because the first time the
 * StorageDescriptor column names are accessed, JDO needs to execute a SQL query to
 * retrieve the data. If we know the data will be the same as the table level schema
 * and they are immutable, we should just reuse the table level schema objects.
 *
 * @param sd The Partition level Storage Descriptor.
 * @param tblsd The Table level Storage Descriptor.
 * @param parameters partition level parameters
 * @param databaseName DB name//  ww w . ja v a2  s.c  o  m
 * @param tableName table name
 * @param partitionKeys partition columns
 * @param tblSchema The table level schema from which this partition should be copied.
 * @return the properties
 */
public static Properties getPartSchemaFromTableSchema(org.apache.hadoop.hive.metastore.api.StorageDescriptor sd,
        org.apache.hadoop.hive.metastore.api.StorageDescriptor tblsd, Map<String, String> parameters,
        String databaseName, String tableName, List<FieldSchema> partitionKeys, Properties tblSchema) {

    // Inherent most properties from table level schema and overwrite some properties
    // in the following code.
    // This is mainly for saving CPU and memory to reuse the column names, types and
    // partition columns in the table level schema.
    Properties schema = (Properties) tblSchema.clone();

    // InputFormat
    String inputFormat = sd.getInputFormat();
    if (inputFormat == null || inputFormat.length() == 0) {
        String tblInput = schema
                .getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT);
        if (tblInput == null) {
            inputFormat = org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName();
        } else {
            inputFormat = tblInput;
        }
    }
    schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT,
            inputFormat);

    // OutputFormat
    String outputFormat = sd.getOutputFormat();
    if (outputFormat == null || outputFormat.length() == 0) {
        String tblOutput = schema
                .getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT);
        if (tblOutput == null) {
            outputFormat = org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName();
        } else {
            outputFormat = tblOutput;
        }
    }
    schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT,
            outputFormat);

    // Location
    if (sd.getLocation() != null) {
        schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION,
                sd.getLocation());
    }

    // Bucket count
    schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_COUNT,
            Integer.toString(sd.getNumBuckets()));

    if (sd.getBucketCols() != null && sd.getBucketCols().size() > 0) {
        schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_FIELD_NAME,
                sd.getBucketCols().get(0));
    }

    // SerdeInfo
    if (sd.getSerdeInfo() != null) {

        // We should not update the following 3 values if SerDeInfo contains these.
        // This is to keep backward compatible with getSchema(), where these 3 keys
        // are updated after SerDeInfo properties got copied.
        String cols = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMNS;
        String colTypes = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMN_TYPES;
        String parts = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS;

        for (Map.Entry<String, String> param : sd.getSerdeInfo().getParameters().entrySet()) {
            String key = param.getKey();
            if (schema.get(key) != null && (key.equals(cols) || key.equals(colTypes) || key.equals(parts))) {
                continue;
            }
            schema.put(key, (param.getValue() != null) ? param.getValue() : StringUtils.EMPTY);
        }

        if (sd.getSerdeInfo().getSerializationLib() != null) {
            schema.setProperty(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_LIB,
                    sd.getSerdeInfo().getSerializationLib());
        }
    }

    // skipping columns since partition level field schemas are the same as table level's
    // skipping partition keys since it is the same as table level partition keys

    if (parameters != null) {
        for (Entry<String, String> e : parameters.entrySet()) {
            schema.setProperty(e.getKey(), e.getValue());
        }
    }

    return schema;
}

From source file:org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.java

/**
 * Get partition level schema from table level schema.
 * This function will use the same column names, column types and partition keys for
 * each partition Properties. Their values are copied from the table Properties. This
 * is mainly to save CPU and memory. CPU is saved because the first time the
 * StorageDescriptor column names are accessed, JDO needs to execute a SQL query to
 * retrieve the data. If we know the data will be the same as the table level schema
 * and they are immutable, we should just reuse the table level schema objects.
 *
 * @param sd The Partition level Storage Descriptor.
 * @param parameters partition level parameters
 * @param tblSchema The table level schema from which this partition should be copied.
 * @return the properties/*from   w w w.  j av a 2 s  . com*/
 */
public static Properties getPartSchemaFromTableSchema(StorageDescriptor sd, Map<String, String> parameters,
        Properties tblSchema) {

    // Inherent most properties from table level schema and overwrite some properties
    // in the following code.
    // This is mainly for saving CPU and memory to reuse the column names, types and
    // partition columns in the table level schema.
    Properties schema = (Properties) tblSchema.clone();

    // InputFormat
    String inputFormat = sd.getInputFormat();
    if (inputFormat == null || inputFormat.length() == 0) {
        String tblInput = schema
                .getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT);
        if (tblInput == null) {
            inputFormat = org.apache.hadoop.mapred.SequenceFileInputFormat.class.getName();
        } else {
            inputFormat = tblInput;
        }
    }
    schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT,
            inputFormat);

    // OutputFormat
    String outputFormat = sd.getOutputFormat();
    if (outputFormat == null || outputFormat.length() == 0) {
        String tblOutput = schema
                .getProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT);
        if (tblOutput == null) {
            outputFormat = org.apache.hadoop.mapred.SequenceFileOutputFormat.class.getName();
        } else {
            outputFormat = tblOutput;
        }
    }
    schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_OUTPUT_FORMAT,
            outputFormat);

    // Location
    if (sd.getLocation() != null) {
        schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION,
                sd.getLocation());
    }

    // Bucket count
    schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_COUNT,
            Integer.toString(sd.getNumBuckets()));

    if (sd.getBucketCols() != null && sd.getBucketCols().size() > 0) {
        schema.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.BUCKET_FIELD_NAME,
                Joiner.on(",").join(sd.getBucketCols()));
    }

    // SerdeInfo
    if (sd.getSerdeInfo() != null) {

        // We should not update the following 3 values if SerDeInfo contains these.
        // This is to keep backward compatible with getSchema(), where these 3 keys
        // are updated after SerDeInfo properties got copied.
        String cols = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMNS;
        String colTypes = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_COLUMN_TYPES;
        String parts = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS;

        for (Map.Entry<String, String> param : sd.getSerdeInfo().getParameters().entrySet()) {
            String key = param.getKey();
            if (schema.get(key) != null && ((key.equals(cols) || key.equals(colTypes) || key.equals(parts) ||
            // Skip Druid and JDBC properties which are used in respective SerDes,
            // since they are also updated after SerDeInfo properties are copied.
                    key.startsWith(
                            org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DRUID_CONFIG_PREFIX)
                    || key.startsWith(
                            org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.JDBC_CONFIG_PREFIX)))) {
                continue;
            }
            schema.put(key, (param.getValue() != null) ? param.getValue() : StringUtils.EMPTY);
        }

        if (sd.getSerdeInfo().getSerializationLib() != null) {
            schema.setProperty(ColumnType.SERIALIZATION_LIB, sd.getSerdeInfo().getSerializationLib());
        }
    }

    // skipping columns since partition level field schemas are the same as table level's
    // skipping partition keys since it is the same as table level partition keys

    if (parameters != null) {
        for (Map.Entry<String, String> e : parameters.entrySet()) {
            schema.setProperty(e.getKey(), e.getValue());
        }
    }

    return schema;
}

From source file:org.apache.jackrabbit.core.config.BeanConfig.java

/**
 * Creates a bean configuration. Note that a copy of the given
 * bean properties is stored as a part of the created configuration
 * object. Thus the caller is free to modify the given properties
 * once the configuration object has been created.
 *
 * @param className class name of the bean
 * @param properties initial properties of the bean
 *///from w ww  . jav  a2s.  c o  m
public BeanConfig(String className, Properties properties) {
    this.className = className;
    this.properties = (Properties) properties.clone();
}

From source file:org.apache.jetspeed.modules.actions.portlets.designer.HeaderAction.java

public void doDefault(RunData rundata, Context context) throws Exception {
    try {//from   ww w  . j  a  va  2s.co  m
        String logo = IMAGES_DIRECTORY + "jetspeed-logo.gif";
        String bgImage = "";
        String fontSize = "10";
        String title = "Jakarta Jetspeed";

        String DEFAULT_ROOT = File.separator + "WEB-INF" + File.separator + "conf" + File.separator;
        String root = rundata.getServletConfig().getServletContext().getRealPath(DEFAULT_ROOT) + File.separator;

        Properties prop = new Properties();
        prop.load(new FileInputStream(root + "JetspeedResources.properties"));
        prop.clone();

        TurbineResources trProp = (TurbineResources) TurbineResources
                .getResources(root + "JetspeedResources.properties");

        TurbineResources.setProperty("portal.title", title);
        prop.setProperty("portal.title", title);
        TurbineResources.setProperty("topnav.logo.file", logo);
        prop.setProperty("topnav.logo.file", logo);
        TurbineResources.setProperty("topnav.bg.image", "");
        prop.setProperty("topnav.bg.image", "");
        TurbineResources.setProperty("topnav.bg.color", "");
        prop.setProperty("topnav.bg.color", "");

        TurbineResources.setProperty("topnav.font.size", "");
        prop.setProperty("topnav.font.size", "");

        TurbineResources.setProperty("topnav.font.color", "");
        prop.setProperty("topnav.font.color", "");

        FileOutputStream stream = new FileOutputStream(root + "JetspeedResources.properties");

        prop.save(stream, "topnav.logo.file");

        prop.save(stream, "portal.title");
        prop.save(stream, "topnav.bg.image");
        prop.save(stream, "topnav.bg.color");
        prop.save(stream, "ptopnav.font.size");
        prop.save(stream, "ptopnav.font.size");
        stream.close();

        context.put("settingStatus", "Successfully changed to default settings.");
    } catch (Exception e) {
        context.put("settingStatus", "Error occurred while changing to default settings. ");
        log.error(e);
    }
}

From source file:org.apache.zeppelin.jdbc.JDBCUserConfigurations.java

public void setPropertyMap(String key, Properties properties) {
    Properties p = (Properties) properties.clone();
    propertiesMap.put(key, p);/* w  ww. java 2s  .c  o  m*/
}