Example usage for java.util LinkedHashMap putAll

List of usage examples for java.util LinkedHashMap putAll

Introduction

In this page you can find the example usage for java.util LinkedHashMap putAll.

Prototype

void putAll(Map<? extends K, ? extends V> m);

Source Link

Document

Copies all of the mappings from the specified map to this map (optional operation).

Usage

From source file:com.google.gwt.emultest.java.util.LinkedHashMapTest.java

public void testIsEmpty() {
    LinkedHashMap<String, String> srcMap = new LinkedHashMap<String, String>();
    checkEmptyLinkedHashMapAssumptions(srcMap);

    LinkedHashMap<String, String> dstMap = new LinkedHashMap<String, String>();
    checkEmptyLinkedHashMapAssumptions(dstMap);

    dstMap.putAll(srcMap);
    assertTrue(dstMap.isEmpty());//from  ww  w . jav  a  2  s .  com

    dstMap.put(KEY_KEY, VALUE_VAL);
    assertFalse(dstMap.isEmpty());

    dstMap.remove(KEY_KEY);
    assertTrue(dstMap.isEmpty());
    assertEquals(dstMap.size(), 0);
}

From source file:de.helmholtz_muenchen.ibis.utils.abstractNodes.BinaryWrapperNode.BinaryWrapperNodeModel.java

/**
 * Returns the parameters which were set by the parameter file and the additional parameter field
 * and by the GUI. /*ww  w  .  j ava2s  .  c om*/
 * Override levels: GUI parameter -> additional parameter -> default parameter file
 * @param inData Input data tables
 * @return
 */
protected ArrayList<String> getSetParameters(final BufferedDataTable[] inData) {
    LinkedHashMap<String, String> pars = new LinkedHashMap<String, String>(); // merged parameter set
    //      LinkedHashMap<String, String> parsFile = getParametersFromParameterFile();         // get parameter from file
    //      LinkedHashMap<String, String> parsAdditional = getAdditionalParameter();         // get parameter from input field
    LinkedHashMap<String, String> parsGUI = getGUIParameters(inData); // get parameter from GUI

    // merge them all together
    //      pars.putAll(parsFile);
    //      pars.putAll(parsAdditional);
    pars.putAll(parsGUI);

    // build the command list
    ArrayList<String> commands = new ArrayList<String>();
    for (Iterator<String> it = pars.keySet().iterator(); it.hasNext();) {
        // add parameter name
        String key = it.next();

        if (key.length() > 0) {
            // add value, if some is set
            String value = pars.get(key);
            if (value.length() != 0)
                commands.add(key + " " + value);
            else
                commands.add(key);
        }
    }

    // return the commands
    return commands;
}

From source file:com.google.gwt.emultest.java.util.LinkedHashMapTest.java

/**
 * Test method for 'java.util.LinkedHashMap.putAll(Map)'.
 *///  w  w w .  ja va 2 s .c om
public void testPutAll() {
    LinkedHashMap<String, String> srcMap = new LinkedHashMap<String, String>();
    checkEmptyLinkedHashMapAssumptions(srcMap);

    srcMap.put(KEY_1, VALUE_1);
    srcMap.put(KEY_2, VALUE_2);
    srcMap.put(KEY_3, VALUE_3);

    // Make sure that the data is copied correctly
    LinkedHashMap<String, String> dstMap = new LinkedHashMap<String, String>();
    checkEmptyLinkedHashMapAssumptions(dstMap);

    dstMap.putAll(srcMap);
    assertEquals(srcMap.size(), dstMap.size());
    assertTrue(dstMap.containsKey(KEY_1));
    assertTrue(dstMap.containsValue(VALUE_1));
    assertFalse(dstMap.containsKey(KEY_1.toUpperCase(Locale.ROOT)));
    assertFalse(dstMap.containsValue(VALUE_1.toUpperCase(Locale.ROOT)));

    assertTrue(dstMap.containsKey(KEY_2));
    assertTrue(dstMap.containsValue(VALUE_2));
    assertFalse(dstMap.containsKey(KEY_2.toUpperCase(Locale.ROOT)));
    assertFalse(dstMap.containsValue(VALUE_2.toUpperCase(Locale.ROOT)));

    assertTrue(dstMap.containsKey(KEY_3));
    assertTrue(dstMap.containsValue(VALUE_3));
    assertFalse(dstMap.containsKey(KEY_3.toUpperCase(Locale.ROOT)));
    assertFalse(dstMap.containsValue(VALUE_3.toUpperCase(Locale.ROOT)));

    // Check that an empty map does not blow away the contents of the
    // destination map
    LinkedHashMap<String, String> emptyMap = new LinkedHashMap<String, String>();
    checkEmptyLinkedHashMapAssumptions(emptyMap);
    dstMap.putAll(emptyMap);
    assertTrue(dstMap.size() == srcMap.size());

    // Check that put all overwrite any existing mapping in the destination map
    srcMap.put(KEY_1, VALUE_2);
    srcMap.put(KEY_2, VALUE_3);
    srcMap.put(KEY_3, VALUE_1);

    dstMap.putAll(srcMap);
    assertEquals(dstMap.size(), srcMap.size());
    assertEquals(dstMap.get(KEY_1), VALUE_2);
    assertEquals(dstMap.get(KEY_2), VALUE_3);
    assertEquals(dstMap.get(KEY_3), VALUE_1);

    // Check that a putAll does adds data but does not remove it

    srcMap.put(KEY_4, VALUE_4);
    dstMap.putAll(srcMap);
    assertEquals(dstMap.size(), srcMap.size());
    assertTrue(dstMap.containsKey(KEY_4));
    assertTrue(dstMap.containsValue(VALUE_4));
    assertEquals(dstMap.get(KEY_1), VALUE_2);
    assertEquals(dstMap.get(KEY_2), VALUE_3);
    assertEquals(dstMap.get(KEY_3), VALUE_1);
    assertEquals(dstMap.get(KEY_4), VALUE_4);

    dstMap.putAll(dstMap);
}

From source file:org.apache.flink.api.java.typeutils.runtime.kryo.KryoSerializer.java

@SuppressWarnings("unchecked")
@Override/*from w  w w .  jav  a 2  s  . co  m*/
public CompatibilityResult<T> ensureCompatibility(TypeSerializerConfigSnapshot<?> configSnapshot) {
    if (configSnapshot instanceof KryoSerializerConfigSnapshot) {
        final KryoSerializerConfigSnapshot<T> config = (KryoSerializerConfigSnapshot<T>) configSnapshot;

        if (type.equals(config.getTypeClass())) {
            LinkedHashMap<String, KryoRegistration> reconfiguredRegistrations = config.getKryoRegistrations();

            // reconfigure by assuring that classes which were previously registered are registered
            // again in the exact same order; new class registrations will be appended.
            // this also overwrites any dummy placeholders that the restored old configuration has
            reconfiguredRegistrations.putAll(kryoRegistrations);

            // check if there is still any dummy placeholders even after reconfiguration;
            // if so, then this new Kryo serializer cannot read old data and is therefore incompatible
            for (Map.Entry<String, KryoRegistration> reconfiguredRegistrationEntry : reconfiguredRegistrations
                    .entrySet()) {
                if (reconfiguredRegistrationEntry.getValue().isDummy()) {
                    LOG.warn("The Kryo registration for a previously registered class {} does not have a "
                            + "proper serializer, because its previous serializer cannot be loaded or is no "
                            + "longer valid but a new serializer is not available",
                            reconfiguredRegistrationEntry.getKey());

                    return CompatibilityResult.requiresMigration();
                }
            }

            // there's actually no way to tell if new Kryo serializers are compatible with
            // the previous ones they overwrite; we can only signal compatibility and hope for the best
            this.kryoRegistrations = reconfiguredRegistrations;
            return CompatibilityResult.compatible();
        }
    }

    return CompatibilityResult.requiresMigration();
}

From source file:com.streamsets.pipeline.stage.processor.hive.HiveMetadataProcessor.java

@Override
protected void process(Record record, BatchMaker batchMaker) throws StageException {
    ELVars variables = getContext().createELVars();
    RecordEL.setRecordInContext(variables, record);
    TimeEL.setCalendarInContext(variables, Calendar.getInstance());
    TimeNowEL.setTimeNowInContext(variables, new Date());

    // Calculate record time for this particular record and persist it in the variables
    Date timeBasis = elEvals.timeDriverElEval.eval(variables, timeDriver, Date.class);
    Calendar calendar = Calendar.getInstance(timeZone);
    calendar.setTime(timeBasis);//from  w w w.ja  v a  2  s .com
    TimeEL.setCalendarInContext(variables, calendar);

    String dbName = HiveMetastoreUtil.resolveEL(elEvals.dbNameELEval, variables, databaseEL);
    String tableName = HiveMetastoreUtil.resolveEL(elEvals.tableNameELEval, variables, tableEL);
    String targetPath;
    String avroSchema;
    String partitionStr = "";
    LinkedHashMap<String, String> partitionValMap;

    if (dbName.isEmpty()) {
        dbName = DEFAULT_DB;
    }
    try {
        // Validate Database and Table names
        if (!HiveMetastoreUtil.validateObjectName(dbName)) {
            throw new HiveStageCheckedException(Errors.HIVE_METADATA_03, "database name", dbName);
        }
        if (!HiveMetastoreUtil.validateObjectName(tableName)) {
            throw new HiveStageCheckedException(Errors.HIVE_METADATA_03, "table name", tableName);
        }

        partitionValMap = getPartitionValuesFromRecord(variables);

        if (partitioned) {
            partitionStr = externalTable
                    ? HiveMetastoreUtil.resolveEL(elEvals.partitionPathTemplateELEval, variables,
                            partitionPathTemplate)
                    : HiveMetastoreUtil.generatePartitionPath(partitionValMap);
            if (!partitionStr.startsWith("/"))
                partitionStr = "/" + partitionStr;
        }
        // First, find out if this record has all necessary data to process
        validateNames(dbName, tableName);
        String qualifiedName = HiveMetastoreUtil.getQualifiedTableName(dbName, tableName);
        LOG.trace("Generated table {} for record {}", qualifiedName, record.getHeader().getSourceId());

        if (externalTable) {
            // External table have location in the resolved EL
            targetPath = HiveMetastoreUtil.resolveEL(elEvals.tablePathTemplateELEval, variables,
                    tablePathTemplate);
        } else {
            // Internal table will be the database location + table name
            String databaseLocation;
            try {
                databaseLocation = databaseCache.get(dbName);
            } catch (ExecutionException e) {
                throw new HiveStageCheckedException(com.streamsets.pipeline.stage.lib.hive.Errors.HIVE_23,
                        e.getMessage());
            }
            targetPath = String.format("%s/%s", databaseLocation, tableName);
        }

        if (targetPath.isEmpty()) {
            throw new HiveStageCheckedException(Errors.HIVE_METADATA_02, targetPath);
        }

        // Obtain the record structure from current record
        LinkedHashMap<String, HiveTypeInfo> recordStructure = HiveMetastoreUtil.convertRecordToHMSType(record,
                elEvals.scaleEL, elEvals.precisionEL, elEvals.commentEL, decimalDefaultsConfig.scaleExpression,
                decimalDefaultsConfig.precisionExpression, commentExpression, variables);

        if (recordStructure.isEmpty()) { // If record has no data to process, No-op
            return;
        }

        TBLPropertiesInfoCacheSupport.TBLPropertiesInfo tblPropertiesInfo = HiveMetastoreUtil
                .getCacheInfo(cache, HMSCacheType.TBLPROPERTIES_INFO, qualifiedName, queryExecutor);

        if (tblPropertiesInfo != null) {
            HiveMetastoreUtil.validateTblPropertiesInfo(dataFormat, tblPropertiesInfo, tableName);

            if (tblPropertiesInfo.isExternal() != externalTable) {
                throw new HiveStageCheckedException(com.streamsets.pipeline.stage.lib.hive.Errors.HIVE_23,
                        "EXTERNAL", externalTable, tblPropertiesInfo.isExternal());
            }
        }

        TypeInfoCacheSupport.TypeInfo tableCache = HiveMetastoreUtil.getCacheInfo(cache, HMSCacheType.TYPE_INFO,
                qualifiedName, queryExecutor);

        if (tableCache != null) {
            //Checks number and name of partitions.
            HiveMetastoreUtil.validatePartitionInformation(tableCache, partitionValMap, qualifiedName);
            //Checks the type of partitions.
            Map<String, HiveTypeInfo> cachedPartitionTypeInfoMap = tableCache.getPartitionTypeInfo();
            for (Map.Entry<String, HiveTypeInfo> cachedPartitionTypeInfo : cachedPartitionTypeInfoMap
                    .entrySet()) {
                String partitionName = cachedPartitionTypeInfo.getKey();
                HiveTypeInfo expectedTypeInfo = cachedPartitionTypeInfo.getValue();
                HiveTypeInfo actualTypeInfo = partitionTypeInfo.get(partitionName);
                if (!expectedTypeInfo.equals(actualTypeInfo)) {
                    throw new HiveStageCheckedException(com.streamsets.pipeline.stage.lib.hive.Errors.HIVE_28,
                            partitionName, qualifiedName, expectedTypeInfo.toString(),
                            actualTypeInfo.toString());
                }
            }
            // Validate that the columns from record itself does not clash with partition columns
            for (String columnName : recordStructure.keySet()) {
                if (cachedPartitionTypeInfoMap.containsKey(columnName)) {
                    throw new HiveStageCheckedException(com.streamsets.pipeline.stage.lib.hive.Errors.HIVE_40,
                            columnName);
                }
            }
        }

        AvroSchemaInfoCacheSupport.AvroSchemaInfo schemaCache = HiveMetastoreUtil.getCacheInfo(cache,
                HMSCacheType.AVRO_SCHEMA_INFO, qualifiedName, queryExecutor);

        // True if there was a schema drift (including detection of new table)
        boolean schemaDrift = false;

        // Build final structure of how the table should look like
        LinkedHashMap<String, HiveTypeInfo> finalStructure;
        if (tableCache != null) {
            // Table already exists in Hive - so it's columns will be preserved and in their original order
            finalStructure = new LinkedHashMap<>();
            finalStructure.putAll(tableCache.getColumnTypeInfo());

            // If there is any diff (any new columns), we will append them at the end of the table
            LinkedHashMap<String, HiveTypeInfo> columnDiff = tableCache.getDiff(recordStructure);
            if (!columnDiff.isEmpty()) {
                LOG.trace("Detected drift for table {} - new columns: {}", qualifiedName,
                        StringUtils.join(columnDiff.keySet(), ","));
                schemaDrift = true;
                finalStructure.putAll(columnDiff);
            }
        } else {
            LOG.trace("{} is a new table", qualifiedName);
            // This table doesn't exists yet, so we'll use record own structure as the final table's structure
            schemaDrift = true;
            finalStructure = recordStructure;
        }

        // Generate schema only if the table do not exist or it's schema is changed.
        if (schemaDrift) {
            avroSchema = HiveMetastoreUtil.generateAvroSchema(finalStructure, qualifiedName);
            LOG.trace("Schema Drift. Generated new Avro schema for table {}: {}", qualifiedName, avroSchema);

            // Add custom metadata attributes if they are specified
            Map<String, String> metadataHeaderAttributeMap = new LinkedHashMap();
            if (metadataHeadersToAddExist) {
                metadataHeaderAttributeMap = generateResolvedHeaderAttributeMap(metadataHeaderAttributeConfigs,
                        variables);
            }

            handleSchemaChange(dbName, tableName, recordStructure, targetPath, avroSchema, batchMaker,
                    qualifiedName, tableCache, schemaCache, metadataHeaderAttributeMap);
        } else {
            if (schemaCache == null) { // Table exists in Hive, but this is cold start so the cache is null
                avroSchema = HiveMetastoreUtil.generateAvroSchema(finalStructure, qualifiedName);
                LOG.trace("Cold Start. Generated new Avro schema for table {}: {}", qualifiedName, avroSchema);
                updateAvroCache(schemaCache, avroSchema, qualifiedName);
            } else // No schema change, table already exists in Hive, and we have avro schema in cache.
                avroSchema = schemaCache.getSchema();
        }

        if (partitioned) {
            PartitionInfoCacheSupport.PartitionInfo pCache = HiveMetastoreUtil.getCacheInfo(cache,
                    HMSCacheType.PARTITION_VALUE_INFO, qualifiedName, queryExecutor);

            PartitionInfoCacheSupport.PartitionValues partitionValues = new PartitionInfoCacheSupport.PartitionValues(
                    partitionValMap);

            // If the partition information exist (thus this is not a cold start)
            if (pCache != null) {
                // If we detected drift, we need to persist that information and "roll" all partitions next time
                // we will see them.
                if (schemaDrift) {
                    pCache.setAllPartitionsToBeRolled();
                }

                // If we performed drift for the table and this is the firs time we see this partition, we need to
                // set the roll flag anyway.
                if (pCache.shouldRoll(partitionValues)) {
                    schemaDrift = true;
                }
            }

            // Append partition path to target path as all paths from now should be with the partition info
            targetPath += partitionStr;

            Map<PartitionInfoCacheSupport.PartitionValues, String> diff = detectNewPartition(partitionValues,
                    pCache, targetPath);

            // Send new partition metadata if new partition is detected.
            if (diff != null) {
                // Add custom metadata attributes if they are specified
                Map<String, String> partitionMetadataHeaderAttributeMap = new LinkedHashMap<>();
                if (metadataHeadersToAddExist) {
                    partitionMetadataHeaderAttributeMap = generateResolvedHeaderAttributeMap(
                            metadataHeaderAttributeConfigs, variables);
                }
                handleNewPartition(partitionValMap, pCache, dbName, tableName, targetPath, batchMaker,
                        qualifiedName, diff, partitionMetadataHeaderAttributeMap);
            }
        }

        // Send record to HDFS target.
        if (dataFormat == HMPDataFormat.PARQUET) {
            targetPath = targetPath + TEMP_AVRO_DIR_NAME;
        }

        changeRecordFieldToLowerCase(record);
        updateRecordForHDFS(record, schemaDrift, avroSchema, targetPath);
        batchMaker.addRecord(record, hdfsLane);
    } catch (HiveStageCheckedException error) {
        LOG.error("Error happened when processing record", error);
        LOG.trace("Record that caused the error: {}", record.toString());
        errorRecordHandler.onError(new OnRecordErrorException(record, error.getErrorCode(), error.getParams()));
    }
}

From source file:org.jahia.utils.osgi.parsers.cnd.ExtendedNodeType.java

public Map<String, ExtendedPropertyDefinition> getPropertyDefinitionsAsMap() {
    if (allProperties == null) {
        synchronized (this) {
            if (allProperties == null) {
                LinkedHashMap<String, ExtendedPropertyDefinition> props = new LinkedHashMap<String, ExtendedPropertyDefinition>();

                props.putAll(properties);

                /*//from  w  ww  . j  av  a  2  s . c  o  m
                ExtendedNodeType[] supertypes = getSupertypes();
                for (int i = supertypes.length-1; i >=0 ; i--) {
                  ExtendedNodeType nodeType = supertypes[i];
                  Map<String, ExtendedPropertyDefinition> c = new HashMap<String, ExtendedPropertyDefinition>(nodeType.getDeclaredPropertyDefinitionsAsMap());
                  Map<String, ExtendedPropertyDefinition> over = new HashMap<String, ExtendedPropertyDefinition>(properties);
                  over.keySet().retainAll(c.keySet());
                  for (ExtendedPropertyDefinition s : over.values()) {
                  s.setOverride(true);
                  }
                  c.keySet().removeAll(over.keySet());
                  props.putAll(c);
                }
                */

                allProperties = Collections.unmodifiableMap(props);
            }
        }
    }

    return allProperties;
}

From source file:org.kuali.rice.kns.util.properties.PropertyTree.java

/**
 * Builds a HashMap containing all of the key,value pairs stored in this PropertyTree
 * /*from w w w . jav  a 2 s.  co m*/
 * @return
 */
private Map collectEntries(String prefix, boolean flattenEntries) {
    LinkedHashMap entryMap = new LinkedHashMap();

    for (Iterator i = this.children.entrySet().iterator(); i.hasNext();) {
        Map.Entry e = (Map.Entry) i.next();
        PropertyTree child = (PropertyTree) e.getValue();
        String childKey = (String) e.getKey();

        // handle children with values
        if (child.hasDirectValue()) {
            String entryKey = (prefix == null) ? childKey : prefix + "." + childKey;
            String entryValue = child.getDirectValue();

            entryMap.put(entryKey, entryValue);
        }

        // handle children with children
        if (!flattenEntries && child.hasChildren()) {
            String childPrefix = (prefix == null) ? childKey : prefix + "." + childKey;

            entryMap.putAll(child.collectEntries(childPrefix, flattenEntries));
        }
    }

    return entryMap;
}

From source file:convcao.com.agent.ConvcaoNeptusInteraction.java

public TransferData localState() {
    TransferData data = new TransferData();
    data.timeStep = this.timestep;
    data.SessionID = sessionID;/* w ww .j av a  2s.c om*/
    data.Bathymeter = new double[auvs];
    data.Location = new double[auvs][3];

    for (int AUV = 0; AUV < auvs; AUV++) {
        String auvName = nameTable.get(AUV);
        double noptDepth = coords.convertWgsDepthToNoptilusDepth(positions.get(auvName).getDepth());
        data.Bathymeter[AUV] = noptDepth - bathymetry.get(auvName);
        double[] nopCoords = coords.convert(positions.get(auvName));
        if (nopCoords == null) {
            GuiUtils.errorMessage(getConsole(), "ConvCAO", auvName + " is outside operating region");
            return null;
        }

        data.Location[AUV][0] = Math.round(nopCoords[0]);
        data.Location[AUV][1] = Math.round(nopCoords[1]);
        data.Location[AUV][2] = (int) noptDepth;
    }

    LinkedHashMap<EstimatedState, ArrayList<Distance>> samples = new LinkedHashMap<>();

    synchronized (dvlMeasurements) {
        samples.putAll(dvlMeasurements);
        dvlMeasurements.clear();
    }

    Vector<double[]> locations = new Vector<>();
    Vector<Double> bathymetry = new Vector<>();

    for (Entry<EstimatedState, ArrayList<Distance>> beams : samples.entrySet()) {
        for (Distance d : beams.getValue()) {
            double measurement[] = getBeamMeasurement(beams.getKey(), d);
            locations.add(new double[] { measurement[0], measurement[1], measurement[2] });
            bathymetry.add(measurement[3]);
        }
    }

    data.MBSamples = new double[bathymetry.size()];
    data.SampleLocations = new double[bathymetry.size()][3];

    for (int i = 0; i < bathymetry.size(); i++) {
        data.MBSamples[i] = bathymetry.get(i);
        data.SampleLocations[i] = locations.get(i);
    }

    return data;
}

From source file:org.jahia.services.content.nodetypes.ExtendedNodeType.java

public Map<String, ExtendedNodeDefinition> getUnstructuredChildNodeDefinitions() {
    if (allUnstructuredNodes == null) {
        LinkedHashMap<String, ExtendedNodeDefinition> allUnstructuredNodesMap = new LinkedHashMap<String, ExtendedNodeDefinition>();
        allUnstructuredNodesMap.putAll(unstructuredNodes);

        ExtendedNodeType[] supertypes = getSupertypes();
        for (int i = supertypes.length - 1; i >= 0; i--) {
            ExtendedNodeType nodeType = supertypes[i];
            Map<String, ExtendedNodeDefinition> c = new HashMap<String, ExtendedNodeDefinition>(
                    nodeType.getDeclaredUnstructuredChildNodeDefinitions());
            Map<String, ExtendedNodeDefinition> over = new HashMap<String, ExtendedNodeDefinition>(
                    unstructuredNodes);/*w w  w. jav  a 2 s.c o  m*/
            over.keySet().retainAll(c.keySet());
            for (ExtendedNodeDefinition s : over.values()) {
                s.setOverride(true);
            }
            c.keySet().removeAll(over.keySet());
            allUnstructuredNodesMap.putAll(c);
        }
        this.allUnstructuredNodes = Collections.unmodifiableMap(allUnstructuredNodesMap);
    }
    return allUnstructuredNodes;
}

From source file:org.jahia.services.content.nodetypes.ExtendedNodeType.java

public Map<String, ExtendedPropertyDefinition> getPropertyDefinitionsAsMap() {
    if (allProperties == null) {
        synchronized (this) {
            if (allProperties == null) {
                LinkedHashMap<String, ExtendedPropertyDefinition> props = new LinkedHashMap<String, ExtendedPropertyDefinition>();

                props.putAll(properties);

                ExtendedNodeType[] supertypes = getSupertypes();
                for (int i = supertypes.length - 1; i >= 0; i--) {
                    ExtendedNodeType nodeType = supertypes[i];
                    Map<String, ExtendedPropertyDefinition> c = new HashMap<String, ExtendedPropertyDefinition>(
                            nodeType.getDeclaredPropertyDefinitionsAsMap());
                    Map<String, ExtendedPropertyDefinition> over = new HashMap<String, ExtendedPropertyDefinition>(
                            properties);
                    over.keySet().retainAll(c.keySet());
                    for (ExtendedPropertyDefinition s : over.values()) {
                        s.setOverride(true);
                    }/*from   w w w.  java2 s  .  c o m*/
                    c.keySet().removeAll(over.keySet());
                    props.putAll(c);
                }

                allProperties = Collections.unmodifiableMap(props);
            }
        }
    }

    return allProperties;
}