Example usage for java.util LinkedHashMap values

List of usage examples for java.util LinkedHashMap values

Introduction

In this page you can find the example usage for java.util LinkedHashMap values.

Prototype

public Collection<V> values() 

Source Link

Document

Returns a Collection view of the values contained in this map.

Usage

From source file:ome.formats.OMEROMetadataStoreClient.java

public IObjectContainer getIObjectContainer(Class<? extends IObject> klass,
        LinkedHashMap<Index, Integer> indexes) {
    // Transform an integer collection into an integer array without using
    // wrapper objects.
    Collection<Integer> indexValues = indexes.values();
    int[] indexesArray = new int[indexValues.size()];
    int i = 0;/*from w  ww.ja v  a  2s.  co  m*/
    for (Integer index : indexValues) {
        indexesArray[i] = index;
        i++;
    }

    // Create a new LSID.
    LSID lsid = new LSID(klass, indexesArray);

    Map<String, Integer> asString = new HashMap<String, Integer>();
    for (Entry<Index, Integer> v : indexes.entrySet()) {
        asString.put(v.getKey().toString(), v.getValue());
    }

    if (!containerCache.containsKey(lsid)) {
        IObjectContainer c = new IObjectContainer();
        c.indexes = asString;
        c.LSID = lsid.toString();
        c.sourceObject = getSourceObjectInstance(klass);
        containerCache.put(lsid, c);
    }

    return containerCache.get(lsid);
}

From source file:ome.formats.OMEROMetadataStoreClient.java

/**
 * Adds a file annotation and original file reference linked to a given
 * base LSID target./*w ww  . j  av  a 2s.  com*/
 * @param target LSID of the target object.
 * @param indexes Indexes of the annotation.
 * @param originalFileIndex Index of the original file.
 * @return The LSID of the original file.
 */
private void addCompanionFileAnnotationTo(LSID target, LinkedHashMap<Index, Integer> indexes,
        int originalFileIndex) {
    FileAnnotation a = (FileAnnotation) getSourceObject(FileAnnotation.class, indexes);
    a.setNs(rstring(NS_COMPANION));

    Collection<Integer> indexValues = indexes.values();
    Integer[] integerValues = indexValues.toArray(new Integer[indexValues.size()]);
    int[] values = new int[integerValues.length];
    for (int i = 0; i < integerValues.length; i++) {
        values[i] = integerValues[i].intValue();
    }
    LSID annotationKey = new LSID(FileAnnotation.class, values);
    LSID originalFileKey = new LSID(OriginalFile.class, originalFileIndex);
    addReference(target, annotationKey);
    addReference(annotationKey, originalFileKey);
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

@Override
public void onObjectBatch(final Stream<Tuple2<Long, IBatchRecord>> batch, final Optional<Integer> batch_size,
        final Optional<JsonNode> grouping_key) {
    if (_deduplication_is_disabled.get()) {
        // no deduplication, generally shouldn't be here...
        //.. but if we are, make do the best we can
        batch.forEach(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(), Optional.empty(),
                Optional.empty(), Optional.empty()));
        return;/*w  ww . j a  v  a2s  .co m*/
    }

    // Create big query

    final Tuple3<QueryComponent<JsonNode>, List<Tuple2<JsonNode, Tuple2<Long, IBatchRecord>>>, Either<String, List<String>>> fieldinfo_dedupquery_keyfields = getDedupQuery(
            batch, _dedup_fields.get(), _db_mapper.get());

    // Get duplicate results

    final Tuple2<List<String>, Boolean> fields_include = getIncludeFields(_policy.get(), _dedup_fields.get(),
            _timestamp_field.get());

    final CompletableFuture<Iterator<JsonNode>> dedup_res = fieldinfo_dedupquery_keyfields._2().isEmpty()
            ? CompletableFuture.completedFuture(Collections.<JsonNode>emptyList().iterator())
            : _dedup_context.get().getObjectsBySpec(fieldinfo_dedupquery_keyfields._1(), fields_include._1(),
                    fields_include._2()).thenApply(cursor -> cursor.iterator());

    // Wait for it to finsh

    //(create handy results structure if so)
    final LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> mutable_obj_map = fieldinfo_dedupquery_keyfields
            ._2().stream()
            .collect(Collector.of(
                    () -> new LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>>(),
                    (acc, t2) -> {
                        // (ie only the first element is added, duplicate elements are removed)
                        final Tuple3<Long, IBatchRecord, ObjectNode> t3 = Tuples._3T(t2._2()._1(), t2._2()._2(),
                                _mapper.createObjectNode());
                        acc.compute(t2._1(), (k, v) -> {
                            final LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>> new_list = (null == v)
                                    ? new LinkedList<>()
                                    : v;
                            new_list.add(t3);
                            return new_list;
                        });
                    }, (map1, map2) -> {
                        map1.putAll(map2);
                        return map1;
                    }));

    //TODO (ALEPH-20): add timestamps to annotation
    //TODO (ALEPH-20): support different timestamp fields for the different buckets
    //TODO (ALEPH-20): really need to support >1 current enrichment job 
    //                 ^^(Really really longer term you should be able to decide what objects you want and what you don't  <- NOTE: don't remember what i meant here)

    final Iterator<JsonNode> cursor = dedup_res.join();

    // Handle the results

    final Stream<JsonNode> records_to_delete = Lambdas.get(() -> {
        if (isCustom(_doc_schema.get().deduplication_policy())
                || _doc_schema.get().delete_unhandled_duplicates()) {
            return Optionals.streamOf(cursor, true)
                    .collect(Collectors.groupingBy(
                            ret_obj -> getKeyFieldsAgain(ret_obj, fieldinfo_dedupquery_keyfields._3())))
                    .entrySet().stream().<JsonNode>flatMap(kv -> {

                        final Optional<JsonNode> maybe_key = kv.getKey();
                        final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                                .map(key -> mutable_obj_map.get(key));

                        // Stats:
                        _mutable_stats.duplicate_keys++;
                        _mutable_stats.duplicates_existing += kv.getValue().size();
                        _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                        //DEBUG
                        //System.out.println("?? " + kv.getValue().size() + " vs " + maybe_key + " vs " + matching_records.map(x -> Integer.toString(x.size())).orElse("(no match)"));

                        return matching_records
                                .<Stream<JsonNode>>map(records -> handleDuplicateRecord(_doc_schema.get(),
                                        _custom_handler.optional().map(
                                                handler -> Tuples._2T(handler, this._custom_context.get())),
                                        _timestamp_field.get(), records, kv.getValue(), maybe_key.get(),
                                        mutable_obj_map))
                                .orElse(Stream.empty());
                    });
        } else {
            Optionals.streamOf(cursor, true).forEach(ret_obj -> {
                final Optional<JsonNode> maybe_key = getKeyFieldsAgain(ret_obj,
                        fieldinfo_dedupquery_keyfields._3());
                final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                        .map(key -> mutable_obj_map.get(key));

                //DEBUG
                //System.out.println("?? " + ret_obj + " vs " + maybe_key + " vs " + matching_record.map(x -> x._2().getJson().toString()).orElse("(no match)"));

                // Stats:
                _mutable_stats.duplicate_keys++;
                _mutable_stats.duplicates_existing++;
                _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                matching_records.ifPresent(records -> handleDuplicateRecord(_doc_schema.get(),
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        _timestamp_field.get(), records, Arrays.asList(ret_obj), maybe_key.get(),
                        mutable_obj_map));
            });
            return Stream.<JsonNode>empty();
        }
    });

    final List<Object> ids = records_to_delete.map(j -> jsonToObject(j)).filter(j -> null != j)
            .collect(Collectors.toList());

    if (!ids.isEmpty()) { // fire a bulk deletion request
        mutable_uncompleted_deletes.add(
                _dedup_context.get().deleteObjectsBySpec(CrudUtils.allOf().withAny(AnnotationBean._ID, ids)));

        _mutable_stats.deleted += ids.size();

        //(quickly see if we can reduce the number of outstanding requests)
        final Iterator<CompletableFuture<Long>> it = mutable_uncompleted_deletes.iterator();
        while (it.hasNext()) {
            final CompletableFuture<Long> cf = it.next();
            if (cf.isDone()) {
                it.remove();
            } else
                break; // ie stop as soon as we hit one that isn't complete)
        }
    }

    _mutable_stats.nonduplicate_keys += mutable_obj_map.size();

    if (Optional.ofNullable(_doc_schema.get().custom_finalize_all_objects()).orElse(false)) {
        mutable_obj_map.entrySet().stream()
                .forEach(kv -> handleCustomDeduplication(
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        kv.getValue(), Collections.emptyList(), kv.getKey()));
    } else { // Just emit the last element of each grouped object set
        mutable_obj_map.values().stream().map(t -> t.peekLast())
                .forEach(t -> _context.get().emitImmutableObject(t._1(), t._2().getJson(), Optional.of(t._3()),
                        Optional.empty(), Optional.empty()));
    }
}

From source file:org.apache.hadoop.hive.ql.optimizer.AbstractBucketJoinProc.java

protected boolean checkConvertBucketMapJoin(BucketJoinProcCtx context,
        Map<String, Operator<? extends OperatorDesc>> aliasToOpInfo, Map<Byte, List<ExprNodeDesc>> keysMap,
        String baseBigAlias, List<String> joinAliases) throws SemanticException {

    LinkedHashMap<String, List<Integer>> tblAliasToNumberOfBucketsInEachPartition = new LinkedHashMap<String, List<Integer>>();
    LinkedHashMap<String, List<List<String>>> tblAliasToBucketedFilePathsInEachPartition = new LinkedHashMap<String, List<List<String>>>();

    HashMap<String, Operator<? extends OperatorDesc>> topOps = pGraphContext.getTopOps();

    HashMap<String, String> aliasToNewAliasMap = new HashMap<String, String>();

    // (partition to bucket file names) and (partition to bucket number) for
    // the big table;
    LinkedHashMap<Partition, List<String>> bigTblPartsToBucketFileNames = new LinkedHashMap<Partition, List<String>>();
    LinkedHashMap<Partition, Integer> bigTblPartsToBucketNumber = new LinkedHashMap<Partition, Integer>();

    Integer[] joinKeyOrder = null; // accessing order of join cols to bucket cols, should be same
    boolean bigTablePartitioned = true;
    for (int index = 0; index < joinAliases.size(); index++) {
        String alias = joinAliases.get(index);
        Operator<? extends OperatorDesc> topOp = aliasToOpInfo.get(alias);
        // The alias may not be present in case of a sub-query
        if (topOp == null) {
            return false;
        }/*from  w  ww  .j av  a  2  s  .  c  om*/
        List<String> keys = toColumns(keysMap.get((byte) index));
        if (keys == null || keys.isEmpty()) {
            return false;
        }
        int oldKeySize = keys.size();
        TableScanOperator tso = TableAccessAnalyzer.genRootTableScan(topOp, keys);
        if (tso == null) {
            // We cannot get to root TableScan operator, likely because there is a join or group-by
            // between topOp and root TableScan operator. We don't handle that case, and simply return
            return false;
        }

        // For nested sub-queries, the alias mapping is not maintained in QB currently.
        if (topOps.containsValue(tso)) {
            for (Map.Entry<String, Operator<? extends OperatorDesc>> topOpEntry : topOps.entrySet()) {
                if (topOpEntry.getValue() == tso) {
                    String newAlias = topOpEntry.getKey();
                    if (!newAlias.equals(alias)) {
                        joinAliases.set(index, newAlias);
                        if (baseBigAlias.equals(alias)) {
                            baseBigAlias = newAlias;
                        }
                        aliasToNewAliasMap.put(alias, newAlias);
                        alias = newAlias;
                    }
                    break;
                }
            }
        } else {
            // Ideally, this should never happen, and this should be an assert.
            return false;
        }

        // The join keys cannot be transformed in the sub-query currently.
        // TableAccessAnalyzer.genRootTableScan will only return the base table scan
        // if the join keys are constants or a column. Even a simple cast of the join keys
        // will result in a null table scan operator. In case of constant join keys, they would
        // be removed, and the size before and after the genRootTableScan will be different.
        if (keys.size() != oldKeySize) {
            return false;
        }

        if (joinKeyOrder == null) {
            joinKeyOrder = new Integer[keys.size()];
        }

        Table tbl = tso.getConf().getTableMetadata();
        if (tbl.isPartitioned()) {
            PrunedPartitionList prunedParts = pGraphContext.getPrunedPartitions(alias, tso);
            List<Partition> partitions = prunedParts.getNotDeniedPartns();
            // construct a mapping of (Partition->bucket file names) and (Partition -> bucket number)
            if (partitions.isEmpty()) {
                if (!alias.equals(baseBigAlias)) {
                    tblAliasToNumberOfBucketsInEachPartition.put(alias, Arrays.<Integer>asList());
                    tblAliasToBucketedFilePathsInEachPartition.put(alias, new ArrayList<List<String>>());
                }
            } else {
                List<Integer> buckets = new ArrayList<Integer>();
                List<List<String>> files = new ArrayList<List<String>>();
                for (Partition p : partitions) {
                    if (!checkBucketColumns(p.getBucketCols(), keys, joinKeyOrder)) {
                        return false;
                    }
                    List<String> fileNames = getBucketFilePathsOfPartition(p.getDataLocation(), pGraphContext);
                    // The number of files for the table should be same as number of buckets.
                    int bucketCount = p.getBucketCount();

                    if (fileNames.size() != 0 && fileNames.size() != bucketCount) {
                        String msg = "The number of buckets for table " + tbl.getTableName() + " partition "
                                + p.getName() + " is " + p.getBucketCount()
                                + ", whereas the number of files is " + fileNames.size();
                        throw new SemanticException(ErrorMsg.BUCKETED_TABLE_METADATA_INCORRECT.getMsg(msg));
                    }

                    if (alias.equals(baseBigAlias)) {
                        bigTblPartsToBucketFileNames.put(p, fileNames);
                        bigTblPartsToBucketNumber.put(p, bucketCount);
                    } else {
                        files.add(fileNames);
                        buckets.add(bucketCount);
                    }
                }
                if (!alias.equals(baseBigAlias)) {
                    tblAliasToNumberOfBucketsInEachPartition.put(alias, buckets);
                    tblAliasToBucketedFilePathsInEachPartition.put(alias, files);
                }
            }
        } else {
            if (!checkBucketColumns(tbl.getBucketCols(), keys, joinKeyOrder)) {
                return false;
            }
            List<String> fileNames = getBucketFilePathsOfPartition(tbl.getDataLocation(), pGraphContext);
            Integer num = new Integer(tbl.getNumBuckets());

            // The number of files for the table should be same as number of buckets.
            if (fileNames.size() != 0 && fileNames.size() != num) {
                String msg = "The number of buckets for table " + tbl.getTableName() + " is "
                        + tbl.getNumBuckets() + ", whereas the number of files is " + fileNames.size();
                throw new SemanticException(ErrorMsg.BUCKETED_TABLE_METADATA_INCORRECT.getMsg(msg));
            }

            if (alias.equals(baseBigAlias)) {
                bigTblPartsToBucketFileNames.put(null, fileNames);
                bigTblPartsToBucketNumber.put(null, tbl.getNumBuckets());
                bigTablePartitioned = false;
            } else {
                tblAliasToNumberOfBucketsInEachPartition.put(alias, Arrays.asList(num));
                tblAliasToBucketedFilePathsInEachPartition.put(alias, Arrays.asList(fileNames));
            }
        }
    }

    // All tables or partitions are bucketed, and their bucket number is
    // stored in 'bucketNumbers', we need to check if the number of buckets in
    // the big table can be divided by no of buckets in small tables.
    for (Integer numBucketsInPartitionOfBigTable : bigTblPartsToBucketNumber.values()) {
        if (!checkNumberOfBucketsAgainstBigTable(tblAliasToNumberOfBucketsInEachPartition,
                numBucketsInPartitionOfBigTable)) {
            return false;
        }
    }

    context.setTblAliasToNumberOfBucketsInEachPartition(tblAliasToNumberOfBucketsInEachPartition);
    context.setTblAliasToBucketedFilePathsInEachPartition(tblAliasToBucketedFilePathsInEachPartition);
    context.setBigTblPartsToBucketFileNames(bigTblPartsToBucketFileNames);
    context.setBigTblPartsToBucketNumber(bigTblPartsToBucketNumber);
    context.setJoinAliases(joinAliases);
    context.setBaseBigAlias(baseBigAlias);
    context.setBigTablePartitioned(bigTablePartitioned);
    if (!aliasToNewAliasMap.isEmpty()) {
        context.setAliasToNewAliasMap(aliasToNewAliasMap);
    }

    return true;
}

From source file:org.exoplatform.forum.service.impl.JCRDataStorage.java

public List<CategoryFilter> filterForumByName(String forumNameFilter, String userName, int maxSize)
        throws Exception {
    SessionProvider sProvider = CommonUtils.createSystemProvider();
    try {//from w w  w  .  j  av a2 s.  c  om
        Node categoryHome = getCategoryHome(sProvider);
        List<String> listOfUser = UserHelper.getAllGroupAndMembershipOfUser(userName);
        // get can create topic
        List<String> categoriesCanCreateTopics = getCategoriesCanCreateTopics(sProvider, listOfUser, true);

        Category cate = getCachedDataStorage().getCategoryIncludedSpace();
        // query forum by input-key

        StringBuffer strQuery = new StringBuffer("SELECT * FROM ");

        strQuery.append(EXO_FORUM).append(" WHERE ").append(JCR_PATH).append(" LIKE '")
                .append(categoryHome.getPath()).append("/%' AND ");
        if (cate != null) {
            strQuery.append(" NOT ").append(JCR_PATH).append(" LIKE '").append(cate.getPath())
                    .append("/%' AND ");
        }
        strQuery.append("( UPPER(").append(EXO_NAME).append(") LIKE '").append(forumNameFilter.toUpperCase())
                .append("%' OR UPPER(").append(EXO_NAME).append(") LIKE '% ")
                .append(forumNameFilter.toUpperCase()).append("%')")
                .append(Utils.getSQLQueryByProperty("AND", EXO_IS_CLOSED, "false"))
                .append(Utils.getSQLQueryByProperty("AND", EXO_IS_LOCK, "false")).append(" AND ")
                .append(getCanCreateTopicQuery(listOfUser, false)).append(" ORDER BY ").append(EXO_NAME);

        QueryManager qm = categoryHome.getSession().getWorkspace().getQueryManager();
        Query query = qm.createQuery(strQuery.toString(), Query.SQL);
        QueryImpl queryImpl = (QueryImpl) query;
        queryImpl.setCaseInsensitiveOrder(true);
        long totalSize, nextOffset = 0, gotItemNumber = 0;
        if (maxSize > 0) {
            totalSize = maxSize;
        } else {
            totalSize = query.execute().getNodes().getSize();
        }
        LinkedHashMap<String, CategoryFilter> categoryFilters = new LinkedHashMap<String, CategoryFilter>();
        QueryResult qr;
        CategoryFilter categoryFilter;
        String categoryId, categoryName, forumId, forumName;
        NodeIterator iter;
        //
        while (gotItemNumber < totalSize && nextOffset < totalSize) {
            queryImpl.setOffset(nextOffset);
            queryImpl.setLimit(totalSize);

            qr = queryImpl.execute();
            iter = qr.getNodes();
            if (iter.getSize() <= 0) {
                return new ArrayList<CategoryFilter>(categoryFilters.values());
            }

            //
            while (iter.hasNext()) {
                Node node = iter.nextNode();
                categoryId = node.getParent().getName();
                forumId = node.getName();

                // can create topic in category/forum
                if (categoriesCanCreateTopics.contains(categoryId)) {

                    if (categoryFilters.containsKey(categoryId)) {
                        categoryFilter = categoryFilters.get(categoryId);
                    } else {
                        categoryName = node.getParent().getProperty(EXO_NAME).getString();
                        categoryFilter = new CategoryFilter(categoryId, categoryName);
                        categoryFilters.put(categoryId, categoryFilter);
                    }
                    forumName = node.getProperty(EXO_NAME).getString();
                    if (categoryFilter.setForumFilter(forumId, forumName)) {
                        gotItemNumber++;
                        if (gotItemNumber == totalSize) {
                            break;
                        }
                    }
                }
            }

            nextOffset += totalSize;
        }

        return new ArrayList<CategoryFilter>(categoryFilters.values());
    } catch (Exception e) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("\nCould not filter forum by name: " + forumNameFilter + e.getCause());
        }
    }
    return new ArrayList<CategoryFilter>();
}

From source file:com.disney.opa.dao.impl.ProductDaoImpl.java

private Product[] getChildProducts(int parentID, int languageID, int castAsTemplateID, int onlyRangeChildrens)
        throws PADataAccessException {
    if (log.isDebugEnabled()) {
        log.debug("Method getChildProducts(" + parentID + ", " + languageID + ", " + castAsTemplateID + ", "
                + onlyRangeChildrens + ") ... begin");
    }/*  w  ww  .  j av a2  s . c  o m*/

    int childTemplateID = 0;
    Product[] array = null;
    String onlyRangeChildrensString = null;
    if (onlyRangeChildrens != JDBCUtil.IGNORED) {
        onlyRangeChildrensString = String.valueOf(onlyRangeChildrens);
    }
    // Step 1: Get all child headers
    /*
    ProductHeaderProcessor headerProcessor = new ProductHeaderProcessor();
    JDBCUtil.query(SP_GET_CHILD_PRODUCTS,
     new Object[] {String.valueOf(parentID), onlyRangeChildrensString},
     headerProcessor);
    List list = headerProcessor.getList();
    */
    String sql = JDBCUtil.query(SP_GET_CHILD_PRODUCTS,
            new Object[] { String.valueOf(parentID), onlyRangeChildrensString });

    MapSqlParameterSource parmSource = new MapSqlParameterSource();
    List<ProductHeader> list = namedJdbcTemplate.query(sql, parmSource, rowMappers.productHeaderRowMapper);

    ProductHeader[] headers = null;
    if (list != null && list.size() > 0) {
        headers = new ProductHeader[list.size()];
        list.toArray(headers);
    }

    if (headers != null && headers.length > 0) {
        LinkedHashMap map = new LinkedHashMap();

        for (int i = 0; i < headers.length; i++) {
            //Step 2: Load Attribute Set for each child template id (updated for FHB 2011 - not all child products have the same template)              
            childTemplateID = headers[i].getProductTemplateID();
            if (castAsTemplateID != JDBCUtil.IGNORED) {
                childTemplateID = castAsTemplateID;
            }
            int childProductStatusID = headers[i].getProductStatusID();
            boolean populateAvailableOptions = false;
            /*
            String sqlWithParameters = (populateAvailableOptions) ? SP_FIRSTSTEP_WITH_OPTION : SP_FIRSTSTEP_WITHOUT_OPTION;
            SetResultSetProcessor processor = new SetResultSetProcessor(populateAvailableOptions);
            JDBCUtil.query(sqlWithParameters,
                new Object[]{
             String.valueOf(childTemplateID),
             String.valueOf(childProductStatusID),
             String.valueOf(languageID)},
                processor);
            AttributeDefinitionSet set = processor.getPAAttributeDefinitionSet();
            */
            String sqlWithParameters = (populateAvailableOptions) ? SP_FIRSTSTEP_WITH_OPTION
                    : SP_FIRSTSTEP_WITHOUT_OPTION;
            String sqlChild = JDBCUtil.query(sqlWithParameters, new Object[] { String.valueOf(childTemplateID),
                    String.valueOf(childProductStatusID), String.valueOf(languageID) });
            MapSqlParameterSource parmSourceChild = new MapSqlParameterSource();
            List<AttributeDefinitionSet> listChildren = namedJdbcTemplate.query(sqlChild, parmSourceChild,
                    rowMappers.processResultSetRowMapper);
            AttributeDefinitionSet set = null;
            if (listChildren != null && listChildren.size() > 0) {
                set = listChildren.get(0);
            }

            //Step 3: Build products with headers, no attribute value yet
            ProductHeader header = headers[i];

            Product product = new Product();
            product.setID(header.getProductID());
            product.setAssociateUserID(header.getAssociateID());
            product.setTechnicalLeadUserID(header.getTechnicalLeadID());
            product.setProductStatusID(header.getProductStatusID());
            product.setCreatorID(header.getCreatorID());
            product.setLicenseeCompanyID(header.getLicenseeCompanyID());
            product.setLicenseeUserID(header.getLicenseeID());
            product.setProductTemplateID(header.getProductTemplateID());
            product.setLastUpdateDate(header.getUpdateDate());
            product.setActive(header.isActive());
            product.setParentID(header.getParentID());

            product.setAttributeDefinitionSet(set);
            AttributeDefinition[] definitions = set.getAllAttributeDefinitions();
            if (definitions != null && definitions.length > 0) {
                for (int j = 0; j < definitions.length; j++) {
                    product.initAttribute(definitions[j]);
                }
            }

            map.put(new Integer(product.getID()), product);
        }

        // Step 4: Populate attribute values for each product
        // TODO setup all the child attributes
        String processedTemplateIds = "";
        for (int i = 0; i < headers.length; i++) {
            String templateId = String.valueOf(headers[i].getProductTemplateID());
            if (processedTemplateIds.indexOf(templateId) < 0) {

                /*
                ChildrenProductAttributeProcessor attributeValuesProcessor =
                   new ChildrenProductAttributeProcessor(map);
                JDBCUtil.query(SP_GET_CHILD_PRODUCTS_ATTRIBUTES,
                    new Object[]{
                String.valueOf(languageID),
                String.valueOf(parentID),
                String.valueOf(headers[i].getProductTemplateID())},
                    attributeValuesProcessor);
                */

                sql = JDBCUtil.query(SP_GET_CHILD_PRODUCTS_ATTRIBUTES,
                        new Object[] { String.valueOf(languageID), String.valueOf(parentID),
                                String.valueOf(headers[i].getProductTemplateID()) });

                parmSource = new MapSqlParameterSource();
                List<Attribute> AttributeList = namedJdbcTemplate.query(sql, parmSource,
                        rowMappers.attributeResultSetExtractor);
                Product currentProduct = (Product) map.get(Integer.valueOf(headers[i].getProductID()));
                for (Attribute a : AttributeList) {
                    for (Attribute productAttribute : currentProduct.getAllAttributes()) {
                        if (a.getAttributeId() == productAttribute.getAttributeId()) {
                            if (null != a.getValues()) {
                                productAttribute.set(a.getValues());
                            } else {
                                productAttribute.add(a.getValue());
                            }
                            productAttribute.setDirty(false);
                        }
                    }
                }
                processedTemplateIds += templateId + ";";
            }
        }

        array = new Product[list.size()];
        map.values().toArray(array);

        // Set dirty flag to false for each attribute
        // so future update will check if an attribute is dirty to determine if an update is necessary.
        for (int i = 0; i < array.length; i++) {
            if (array[i] != null) {
                Attribute[] attributes = array[i].getAllAttributes();
                if (attributes != null && attributes.length > 0) {
                    for (int j = 0; j < attributes.length; j++) {
                        Attribute a = (Attribute) attributes[j];
                        a.setDirty(false);
                    }
                }
            }
        }
    }

    if (log.isDebugEnabled()) {
        log.debug("Method getChildProducts(" + parentID + ", " + languageID + ", " + castAsTemplateID + ", "
                + onlyRangeChildrens + ") ... end");
    }

    return array;
}

From source file:org.openmrs.module.chica.DynamicFormAccess.java

/**
 * Consume the information populated on a form.
 * //from w w  w . j a va2s.  c  o m
 * @param formInstance FormInstance object containing the relevant form information.
 * @param patient The patient the form belongs to.
 * @param locationTagId The location tag identifier.
 * @param encounterId The associated encounter identifier.
 * @param fieldMap Map of field name to Field object.
 * @param formFieldToValue Map of FormField to field value.
 * @param parameterHandler The parameter handler used for rule execution.
 * @param form The form containing the data to consume.
 */
private void consume(FormInstance formInstance, Patient patient, Integer locationTagId, Integer encounterId,
        HashMap<String, Field> fieldMap, LinkedHashMap<FormField, String> formFieldToValue,
        ParameterHandler parameterHandler, Form form) {
    ATDService atdService = Context.getService(ATDService.class);
    Integer locationId = formInstance.getLocationId();
    PatientState patientState = org.openmrs.module.atd.util.Util
            .getProducePatientStateByFormInstanceAction(formInstance);
    Integer sessionId = patientState.getSessionId();
    FieldType prioritizedMergeType = getFieldType("Prioritized Merge Field");

    String mode = "CONSUME";
    LinkedHashMap<String, LinkedHashMap<String, Rule>> rulesToRunByField = new LinkedHashMap<String, LinkedHashMap<String, Rule>>();
    LogicService logicService = Context.getLogicService();
    FormDatasource formDatasource = (FormDatasource) logicService.getLogicDataSource("form");
    try {
        formInstance = formDatasource.setFormFields(fieldMap, formInstance, locationTagId);
    } catch (Exception e) {
        this.log.error("Error setting form fields to be consumed");
        this.log.error(e.getMessage());
        this.log.error(Util.getStackTrace(e));
        return;
    }

    if (formInstance == null) {
        log.error("Form instance came back null");
        return;
    }

    Encounter encounter = Context.getEncounterService().getEncounter(encounterId);
    locationId = encounter.getLocation().getLocationId();
    Location location = Context.getLocationService().getLocation(locationId);
    String locationName = null;
    if (location != null) {
        locationName = location.getName();
    }

    List<Field> fieldsToAdd = new ArrayList<Field>();
    Map<Integer, PatientATD> fieldIdToPatientAtdMap = new HashMap<Integer, PatientATD>();
    for (FormField currField : formFieldToValue.keySet()) {
        org.openmrs.Field field = currField.getField();
        String fieldName = field.getName();
        Concept currConcept = field.getConcept();
        String ruleName = field.getDefaultValue();
        LinkedHashMap<String, Rule> rulesToRun = null;
        Map<String, Object> parameters = new HashMap<String, Object>();

        FormField parentField = currField.getParent();

        //if parent field is not null look at parent
        //field for rule to execute
        Rule rule = null;
        if (parentField != null) {
            FieldType currFieldType = field.getFieldType();

            if (currFieldType.equals(prioritizedMergeType)) {
                ruleName = null;//no rule to execute unless patientATD finds one   
            }

            Integer fieldId = parentField.getField().getFieldId();
            PatientATD patientATD = fieldIdToPatientAtdMap.get(fieldId);
            if (patientATD == null) {
                patientATD = atdService.getPatientATD(formInstance, fieldId);
            }

            if (patientATD != null) {
                rule = patientATD.getRule();
                ruleName = rule.getTokenName();
                fieldIdToPatientAtdMap.put(fieldId, patientATD);
            }
        }

        String lookupFieldName = null;
        Integer formFieldId = null; // DWE CHICA-437 Get the form field id here so that it can be used to determine if obs records should be voided when rules are evaluated
        if (parentField != null) {
            lookupFieldName = parentField.getField().getName();
            formFieldId = parentField.getFormFieldId();
        } else {
            lookupFieldName = fieldName;
            formFieldId = currField.getFormFieldId();
        }

        if (ruleName != null) {
            rulesToRun = rulesToRunByField.get(lookupFieldName);
            if (rulesToRun == null) {
                rulesToRun = new LinkedHashMap<String, Rule>();
                rulesToRunByField.put(lookupFieldName, rulesToRun);
            }

            Rule ruleLookup = rulesToRun.get(ruleName);
            if (ruleLookup == null) {
                if (rule != null) {
                    ruleLookup = rule;
                } else {
                    ruleLookup = new Rule();
                    ruleLookup.setTokenName(ruleName);
                }
                ruleLookup.setParameters(parameters);
                rulesToRun.put(ruleName, ruleLookup);
            } else {
                parameters = ruleLookup.getParameters();
            }
        }

        //------------start set rule parameters
        parameters.put("sessionId", sessionId);
        parameters.put("formInstance", formInstance);
        parameters.put("locationTagId", locationTagId);
        parameters.put("locationId", locationId);
        parameters.put("location", locationName);
        parameters.put("mode", mode);
        parameters.put("encounterId", encounterId);
        if (rule != null) {
            parameters.put("ruleId", rule.getRuleId());
        }

        if (currConcept != null) {
            try {
                String elementString = ((ConceptName) currConcept.getNames().toArray()[0]).getName();
                parameters.put("concept", elementString);
            } catch (Exception e) {
                parameters.put("concept", null);
            }
        } else {
            parameters.put("concept", null);
        }

        if (fieldName != null) {
            parameters.put("fieldName", lookupFieldName);
            String value = formFieldToValue.get(currField);
            parameters.put(lookupFieldName, value);
            Field saveField = new Field();
            saveField.setId(fieldName);
            saveField.setValue(value);
            fieldsToAdd.add(saveField);
        }

        // DWE CHICA-437 
        if (formFieldId != null) {
            parameters.put("formFieldId", formFieldId);
        }

        //----------end set rule parameters
    }

    HashMap<String, Integer> childIndex = new HashMap<String, Integer>();

    for (FormField currField : formFieldToValue.keySet()) {
        LinkedHashMap<String, Rule> rulesToRun = null;
        Map<String, Object> parameters = new HashMap<String, Object>();
        FormField parentField = currField.getParent();

        //look for parentField
        if (parentField != null) {
            FieldType parentFieldType = parentField.getField().getFieldType();

            String parentRuleName = parentField.getField().getDefaultValue();
            String parentFieldName = parentField.getField().getName();

            if (parentFieldType.equals(prioritizedMergeType)) {
                parentRuleName = null;//no rule to execute unless patientATD finds one   
            }

            Integer fieldId = parentField.getField().getFieldId();
            PatientATD patientATD = fieldIdToPatientAtdMap.get(fieldId);
            if (patientATD == null) {
                patientATD = atdService.getPatientATD(formInstance, fieldId);
            }

            if (patientATD != null) {
                Rule rule = patientATD.getRule();
                parentRuleName = rule.getTokenName();
                fieldIdToPatientAtdMap.put(fieldId, patientATD);
            }
            //if there is a parent rule, add a parameter for the child's fieldname
            //add the parent rule if it is not in rules to run
            if (parentRuleName != null) {
                rulesToRun = rulesToRunByField.get(parentFieldName);
                if (rulesToRun == null) {
                    rulesToRun = new LinkedHashMap<String, Rule>();
                    rulesToRunByField.put(parentFieldName, rulesToRun);
                }

                Rule ruleLookup = rulesToRun.get(parentRuleName);

                if (ruleLookup == null) {
                    ruleLookup = new Rule();
                    ruleLookup.setParameters(parameters);
                    ruleLookup.setTokenName(parentRuleName);
                    rulesToRun.put(parentRuleName, ruleLookup);
                } else {
                    parameters = ruleLookup.getParameters();
                }

                String childFieldName = currField.getField().getName();
                Integer index = childIndex.get(parentFieldName);
                if (index == null) {
                    index = 0;
                }
                parameters.put("child" + index, childFieldName);
                parameters.put(childFieldName, formFieldToValue.get(currField));
                childIndex.put(parentFieldName, ++index);
            }
        }
    }

    //run all the consume rules
    Integer formInstanceId = formInstance.getFormInstanceId();
    String formName = form.getName();
    String formType = org.openmrs.module.chirdlutil.util.Util.getFormType(form.getFormId(), locationTagId,
            locationId); // CHICA-1234 Look up the formType
    for (LinkedHashMap<String, Rule> rulesToRun : rulesToRunByField.values()) {
        for (String currRuleName : rulesToRun.keySet()) {
            Rule rule = rulesToRun.get(currRuleName);
            Map<String, Object> parameters = rule.getParameters();
            parameterHandler.addParameters(parameters, fieldMap, formType); // CHICA-1234 Added formType parameter
            atdService.evaluateRule(currRuleName, patient, parameters);
            setScannedTimestamps(formInstanceId, rule.getRuleId(), formName, locationId);
        }
    }

    // DWE CHICA-430 Now that rules have run and obs records have been added/updated/voided
    // create the list of fields to remove from the xml
    List<String> elementsToRemoveList = createElementsToRemoveList(form, formInstanceId, encounter,
            locationTagId, locationId);

    fieldIdToPatientAtdMap.clear();
    serializeFields(formInstance, locationTagId, fieldsToAdd, elementsToRemoveList); // DWE CHICA-430 Add elementsToRemoveList
}

From source file:org.apache.hadoop.hive.metastore.MyXid.java

public void addPartition(String dbName, String tblName, AddPartitionDesc addPartitionDesc)
        throws InvalidObjectException, MetaException {
    boolean success = false;

    Connection con = null;/*  w  ww. j  a  v a 2s.  co  m*/
    PreparedStatement ps = null;
    Statement stmt = null;
    dbName = dbName.toLowerCase();
    tblName = tblName.toLowerCase();

    boolean isPathMaked = false;
    ArrayList<Path> pathToMake = new ArrayList<Path>();
    Warehouse wh = new Warehouse(hiveConf);

    long tblID = 0;

    try {
        con = getSegmentConnection(dbName);
    } catch (MetaStoreConnectException e1) {
        LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                + addPartitionDesc.getLevel() + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    } catch (SQLException e1) {
        LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                + addPartitionDesc.getLevel() + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    }

    try {
        con.setAutoCommit(false);
        con.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
        stmt = con.createStatement();

        String tblType = null;
        boolean hasPriPart = false;
        boolean hasSubPart = false;
        String priPartKey = null;
        String subPartKey = null;
        String priPartType = null;
        String subPartType = null;

        String priKeyType = null;
        String subKeyType = null;
        ResultSet tblSet = null;
        boolean isTblFind = false;
        boolean isColFind = false;

        String tblFormat = null;
        String tblLocation = null;

        PrimitiveTypeInfo pti = null;
        ObjectInspector StringIO = null;
        ObjectInspector ValueIO = null;
        ObjectInspectorConverters.Converter converter1 = null;
        ObjectInspectorConverters.Converter converter2 = null;

        ArrayList<String> partToAdd = new ArrayList<String>();
        String sql = null;

        HiveConf hconf = (HiveConf) hiveConf;
        boolean externalPartition = hconf.getBoolVar(HiveConf.ConfVars.HIVESUPPORTEXTERNALPARTITION);

        if (addPartitionDesc.getLevel() == 0) {
            sql = "SELECT tbl_id, tbl_type, pri_part_type, pri_part_key, tbl_format, tbl_location"
                    + " from TBLS where db_name='" + dbName + "' and tbl_name='" + tblName + "'";

            tblSet = stmt.executeQuery(sql);
            isTblFind = false;

            while (tblSet.next()) {
                isTblFind = true;
                tblID = tblSet.getLong(1);
                tblType = tblSet.getString(2);
                priPartKey = tblSet.getString(4);
                priPartType = tblSet.getString(3);
                tblFormat = tblSet.getString(5);
                tblLocation = tblSet.getString(6);

                if (priPartType != null && !priPartType.isEmpty()) {
                    hasPriPart = true;
                }
                break;
            }
            tblSet.close();

            if (!isTblFind) {
                LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                        + addPartitionDesc.getLevel() + ", msg=" + "can not find table " + dbName + ":"
                        + tblName);

                throw new MetaException("can not find table " + dbName + ":" + tblName);
            }

            if (!tblType.equalsIgnoreCase("MANAGED_TABLE")) {
                if (tblType.equalsIgnoreCase("EXTERNAL_TABLE") && tblFormat != null
                        && tblFormat.equalsIgnoreCase("pgdata")) {
                    LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                            + addPartitionDesc.getLevel() + ", msg=" + tblType + ":" + tblFormat
                            + " can not support alter partition");
                    throw new MetaException(tblType + ":" + tblFormat + " can not support alter partition");
                }

                if (externalPartition && tblType.equalsIgnoreCase("EXTERNAL_TABLE")
                        && (tblFormat == null || !tblFormat.equalsIgnoreCase("pgdata"))) {
                } else {
                    LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                            + addPartitionDesc.getLevel() + ", msg=" + tblType
                            + " can not support alter partition");

                    throw new MetaException(tblType + " can not support alter partition");
                }
            }

            if (!hasPriPart) {
                LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                        + addPartitionDesc.getLevel() + ", msg=" + "table " + dbName + ":" + tblName
                        + " is not pri-partitioned");

                throw new MetaException("table " + dbName + ":" + tblName + " is not pri-partitioned");
            }

            sql = "SELECT type_name from COLUMNS where tbl_id=" + tblID + " and column_name='"
                    + priPartKey.toLowerCase() + "'";
            isColFind = false;
            ResultSet colSet = stmt.executeQuery(sql);
            while (colSet.next()) {
                isColFind = true;
                priKeyType = colSet.getString(1);
                break;
            }
            colSet.close();

            if (!isColFind) {
                LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                        + addPartitionDesc.getLevel() + ", msg=" + "table "
                        + "can not find partition key information " + priPartKey);

                throw new MetaException("can not find partition key information " + priPartKey);
            }

            pti = new PrimitiveTypeInfo();
            pti.setTypeName(priKeyType);
            StringIO = PrimitiveObjectInspectorFactory
                    .getPrimitiveJavaObjectInspector(PrimitiveCategory.STRING);
            ValueIO = PrimitiveObjectInspectorFactory
                    .getPrimitiveWritableObjectInspector(pti.getPrimitiveCategory());
            converter1 = ObjectInspectorConverters.getConverter(StringIO, ValueIO);
            converter2 = ObjectInspectorConverters.getConverter(StringIO, ValueIO);

            if ((addPartitionDesc.getPartType().equalsIgnoreCase("RANGE_PARTITION")
                    && !priPartType.equalsIgnoreCase("range"))
                    || (addPartitionDesc.getPartType().equalsIgnoreCase("LIST_PARTITION")
                            && !priPartType.equalsIgnoreCase("list"))) {
                LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                        + addPartitionDesc.getLevel() + ", msg=" + "can not add  a "
                        + addPartitionDesc.getPartType() + " partition, but the pri-partition type is "
                        + priPartType);

                throw new MetaException("can not add  a " + addPartitionDesc.getPartType()
                        + " partition, but the pri-partition type is " + priPartType);
            }

            LinkedHashMap<String, List<String>> partSpaces = new LinkedHashMap<String, List<String>>();
            Set<String> subPartNameSet = new TreeSet<String>();

            sql = "SELECT level, part_name, part_values from PARTITIONS where" + " tbl_id=" + tblID;// + " order by level asc";

            ResultSet partSet = stmt.executeQuery(sql);
            int partLevel = 0;

            while (partSet.next()) {
                partLevel = partSet.getInt(1);

                if (partLevel == 0) {
                    String partName = partSet.getString(2);
                    List<String> valueList = new ArrayList<String>();
                    Array spaceArray = partSet.getArray(3);

                    ResultSet priValueSet = spaceArray.getResultSet();

                    while (priValueSet.next()) {
                        valueList.add(priValueSet.getString(2));
                    }

                    partSpaces.put(partName, valueList);
                } else if (partLevel == 1) {
                    String partName = partSet.getString(2);
                    subPartNameSet.add(partName);
                }
            }
            partSet.close();

            partToAdd = new ArrayList<String>();

            LinkedHashMap<String, List<String>> addPartSpaces = (LinkedHashMap<String, List<String>>) addPartitionDesc
                    .getParSpaces();

            Iterator<String> itr = addPartSpaces.keySet().iterator();

            while (itr.hasNext()) {
                String key = itr.next().toLowerCase();
                if (partSpaces.containsKey(key)) {
                    LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                            + addPartitionDesc.getLevel() + ", msg=" + "table : " + tblName
                            + " have already contain a pri parititon named: " + key);

                    throw new MetaException(
                            "table : " + tblName + " have already contain a pri parititon named: " + key);
                }
                partToAdd.add(key);
            }

            Iterator<List<String>> listItr = addPartSpaces.values().iterator();

            while (listItr.hasNext()) {
                Iterator<String> valueItr = listItr.next().iterator();
                if (valueItr.hasNext()) {
                    String value = valueItr.next();

                    if (converter1.convert(value) == null) {
                        LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                                + addPartitionDesc.getLevel() + ", msg=" + "value : " + value
                                + " should be type of " + priKeyType);

                        throw new MetaException("value : " + value + " should be type of " + priKeyType);
                    }

                    Iterator<List<String>> PartValuesItr = partSpaces.values().iterator();
                    while (PartValuesItr.hasNext()) {
                        if (PartValuesItr.next().contains(value)) {
                            LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                                    + addPartitionDesc.getLevel() + ", msg=" + "table : " + tblName
                                    + " have already contain a pri partition contain value: " + value);

                            throw new MetaException("table : " + tblName
                                    + " have already contain a pri partition contain value: " + value);
                        }
                    }
                }
            }

            ps = con.prepareStatement(
                    "INSERT INTO partitions(level, tbl_id, " + " part_name, part_values) values(?,?,?,?)");

            for (Map.Entry<String, List<String>> entry : addPartSpaces.entrySet()) {
                ps.setInt(1, 0);
                ps.setLong(2, tblID);

                Array spaceArray = con.createArrayOf("varchar", entry.getValue().toArray());
                ps.setArray(4, spaceArray);
                ps.setString(3, entry.getKey());

                ps.addBatch();
            }
            ps.executeBatch();

            if (!tblType.equalsIgnoreCase("EXTERNAL_TABLE")) {
                for (String partName : partToAdd) {
                    if (tblLocation == null || tblLocation.trim().isEmpty()) {
                        pathToMake.addAll(wh.getPriPartitionPaths(dbName, tblName, partName, subPartNameSet));
                    } else {
                        pathToMake.addAll(Warehouse.getPriPartitionPaths(new Path(tblLocation), partName,
                                subPartNameSet));
                    }
                }
            } else {
                for (String partName : partToAdd) {
                    pathToMake.addAll(
                            Warehouse.getPriPartitionPaths(new Path(tblLocation), partName, subPartNameSet));
                }
            }
        } else if (addPartitionDesc.getLevel() == 1) {
            sql = "SELECT tbl_id, tbl_type, sub_part_type, sub_part_key, tbl_format, tbl_location"
                    + " from TBLS where db_name='" + dbName.toLowerCase() + "' and tbl_name='"
                    + tblName.toLowerCase() + "'";

            tblSet = stmt.executeQuery(sql);
            isTblFind = false;

            while (tblSet.next()) {
                isTblFind = true;
                tblID = tblSet.getLong(1);
                tblType = tblSet.getString(2);
                subPartKey = tblSet.getString(4);
                subPartType = tblSet.getString(3);
                tblFormat = tblSet.getString(5);
                tblLocation = tblSet.getString(6);

                if (subPartType != null && !subPartType.isEmpty()) {
                    hasSubPart = true;
                }

                break;
            }

            tblSet.close();
            if (!isTblFind) {
                LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                        + addPartitionDesc.getLevel() + ", msg=" + "can not find table " + dbName + ":"
                        + tblName);

                throw new MetaException("can not find table " + dbName + ":" + tblName);
            }

            if (!tblType.equalsIgnoreCase("MANAGED_TABLE")) {
                if (tblType.equalsIgnoreCase("EXTERNAL_TABLE") && tblFormat != null
                        && tblFormat.equalsIgnoreCase("pgdata")) {
                    LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                            + addPartitionDesc.getLevel() + ", msg=" + tblType + ":" + tblFormat
                            + " can not support alter partition");
                    throw new MetaException(tblType + ":" + tblFormat + " can not support alter partition");
                }

                if (externalPartition && tblType.equalsIgnoreCase("EXTERNAL_TABLE")
                        && (tblFormat == null || !tblFormat.equalsIgnoreCase("pgdata"))) {
                } else {
                    LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                            + addPartitionDesc.getLevel() + ", msg=" + tblType
                            + " can not support alter partition");

                    throw new MetaException(tblType + " can not support alter partition");
                }
            }

            if (!hasSubPart) {
                LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                        + addPartitionDesc.getLevel() + ", msg=" + "table " + dbName + ":" + tblName
                        + " is not sun-partitioned");

                throw new MetaException("table " + dbName + ":" + tblName + " is not sun-partitioned");
            }

            sql = "SELECT type_name from COLUMNS where tbl_id=" + tblID + " and column_name='"
                    + subPartKey.toLowerCase() + "'";

            isColFind = false;
            ResultSet colSet = stmt.executeQuery(sql);
            while (colSet.next()) {
                isColFind = true;
                subKeyType = colSet.getString(1);
                break;
            }

            colSet.close();

            if (!isColFind) {
                LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                        + addPartitionDesc.getLevel() + ", msg=" + "can not find partition key information "
                        + priPartKey);

                throw new MetaException("can not find partition key information " + priPartKey);
            }

            pti = new PrimitiveTypeInfo();
            pti.setTypeName(subKeyType);
            StringIO = PrimitiveObjectInspectorFactory
                    .getPrimitiveJavaObjectInspector(PrimitiveCategory.STRING);
            ValueIO = PrimitiveObjectInspectorFactory
                    .getPrimitiveWritableObjectInspector(pti.getPrimitiveCategory());
            converter1 = ObjectInspectorConverters.getConverter(StringIO, ValueIO);
            converter2 = ObjectInspectorConverters.getConverter(StringIO, ValueIO);

            if ((addPartitionDesc.getPartType().equalsIgnoreCase("RANGE_PARTITION")
                    && !subPartType.equalsIgnoreCase("range"))
                    || (addPartitionDesc.getPartType().equalsIgnoreCase("LIST_PARTITION")
                            && !subPartType.equalsIgnoreCase("list"))) {
                LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                        + addPartitionDesc.getLevel() + ", msg=" + "you can not add  a "
                        + addPartitionDesc.getPartType() + " partition, but the sub-partition type is "
                        + subPartType);

                throw new MetaException("you can not add  a " + addPartitionDesc.getPartType()
                        + " partition, but the sub-partition type is " + subPartType);
            }

            LinkedHashMap<String, List<String>> partSpaces = new LinkedHashMap<String, List<String>>();
            Set<String> partNameSet = new TreeSet<String>();

            sql = "SELECT level,  part_name, part_values from PARTITIONS where" + " tbl_id=" + tblID;// + " order by level asc";

            ResultSet partSet = stmt.executeQuery(sql);
            int partLevel = 0;

            while (partSet.next()) {
                partLevel = partSet.getInt(1);

                if (partLevel == 1) {
                    String partName = partSet.getString(2);
                    List<String> valueList = new ArrayList<String>();
                    Array spaceArray = partSet.getArray(3);

                    ResultSet priValueSet = spaceArray.getResultSet();

                    while (priValueSet.next()) {
                        valueList.add(priValueSet.getString(2));
                    }
                    partSpaces.put(partName, valueList);
                } else if (partLevel == 0) {
                    String partName = partSet.getString(2);
                    partNameSet.add(partName);
                }
            }

            partToAdd = new ArrayList<String>();

            LinkedHashMap<String, List<String>> addPartSpaces = (LinkedHashMap<String, List<String>>) addPartitionDesc
                    .getParSpaces();

            Iterator<String> itr = addPartSpaces.keySet().iterator();

            while (itr.hasNext()) {
                String key = itr.next().toLowerCase();
                if (partSpaces.containsKey(key)) {
                    LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                            + addPartitionDesc.getLevel() + ", msg=" + "table : " + tblName
                            + " have already contain a sub parititon named: " + key);

                    throw new MetaException(
                            "table : " + tblName + " have already contain a sub parititon named: " + key);
                }

                if (key.equalsIgnoreCase("default")) {
                    LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                            + addPartitionDesc.getLevel() + ", msg="
                            + "use : 'alter table tblname add default subpartition' to add default subpartition!");

                    throw new MetaException(
                            "use : 'alter table tblname add default subpartition' to add default subpartition!");
                }
                partToAdd.add(key);
            }

            Iterator<List<String>> listItr = addPartSpaces.values().iterator();

            while (listItr.hasNext()) {
                Iterator<String> valueItr = listItr.next().iterator();
                if (valueItr.hasNext()) {
                    String value = valueItr.next();

                    if (converter1.convert(value) == null) {
                        LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                                + addPartitionDesc.getLevel() + ", msg=" + "value : " + value
                                + " should be type of " + priKeyType);

                        throw new MetaException("value : " + value + " should be type of " + priKeyType);
                    }

                    Iterator<List<String>> PartValuesItr = partSpaces.values().iterator();
                    while (PartValuesItr.hasNext()) {
                        if (PartValuesItr.next().contains(value)) {
                            LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                                    + addPartitionDesc.getLevel() + ", msg=" + "table : " + tblName
                                    + " have already contain a sub partition contain value: " + value);

                            throw new MetaException("table : " + tblName
                                    + " have already contain a sub partition contain value: " + value);
                        }
                    }
                }
            }

            ps = con.prepareStatement(
                    "INSERT INTO partitions(level, tbl_id, " + " part_name, part_values) values(?,?,?,?)");

            for (Map.Entry<String, List<String>> entry : addPartSpaces.entrySet()) {
                ps.setInt(1, 1);
                ps.setLong(2, tblID);

                Array spaceArray = con.createArrayOf("varchar", entry.getValue().toArray());
                ps.setArray(4, spaceArray);
                ps.setString(3, entry.getKey());

                ps.addBatch();
            }
            ps.executeBatch();

            if (!tblType.equalsIgnoreCase("EXTERNAL_TABLE")) {
                for (String partName : partToAdd) {
                    if (tblLocation == null || tblLocation.trim().isEmpty()) {
                        pathToMake.addAll(wh.getSubPartitionPaths(dbName, tblName, partNameSet, partName));
                    } else {
                        pathToMake.addAll(
                                Warehouse.getSubPartitionPaths(new Path(tblLocation), partNameSet, partName));
                    }
                }
            } else {
                for (String partName : partToAdd) {
                    pathToMake.addAll(
                            Warehouse.getSubPartitionPaths(new Path(tblLocation), partNameSet, partName));
                }
            }
        }

        con.commit();
        success = true;
    } catch (SQLException ex) {
        ex.printStackTrace();
        LOG.error("add partition error, db=" + dbName + ", tbl=" + tblName + ", level="
                + addPartitionDesc.getLevel() + ", msg=" + ex.getMessage());

        throw new MetaException(ex.getMessage());
    } finally {
        if (!success) {
            try {
                con.rollback();
            } catch (SQLException e) {
            }

            if (isPathMaked) {
                for (Path path : pathToMake) {
                    wh.deleteDir(path, false);
                }
            }
        }

        closeStatement(ps);
        closeConnection(con);
    }

    if (success) {
        boolean mkDirOK = false;
        List<Path> createdPath = new ArrayList<Path>();
        try {
            for (Path path : pathToMake) {
                mkDirOK = wh.mkdirs(path);
                if (!mkDirOK) {
                    break;
                }

                createdPath.add(path);
            }
        } catch (Exception x) {
            mkDirOK = false;
        }

        if (!mkDirOK) {
            dropPartitionMeta(dbName, tblID, addPartitionDesc);
            if (!createdPath.isEmpty()) {
                for (Path path : createdPath) {
                    wh.deleteDir(path, true);
                }
            }

            throw new MetaException("can not create hdfs path, add partition failed");
        }

    }
}