Example usage for java.util Set clear

List of usage examples for java.util Set clear

Introduction

In this page you can find the example usage for java.util Set clear.

Prototype

void clear();

Source Link

Document

Removes all of the elements from this set (optional operation).

Usage

From source file:com.datatorrent.stram.StreamingContainerManager.java

/**
 * Transitively add operators that are container local to the dependency set.
 * (All downstream operators were traversed during checkpoint update.)
 *
 * @param ctx//from   ww  w .  ja v a  2  s  . co  m
 */
private void includeLocalUpstreamOperators(UpdateCheckpointsContext ctx) {
    Set<PTOperator> newOperators = Sets.newHashSet();
    // repeat until no more local upstream operators are found
    do {
        newOperators.clear();
        for (PTOperator oper : ctx.visited) {
            for (PTInput input : oper.getInputs()) {
                if (input.source.source.getContainer() == oper.getContainer()) {
                    if (!ctx.visited.contains(input.source.source)) {
                        newOperators.add(input.source.source);
                    }
                }
            }
        }
        if (!newOperators.isEmpty()) {
            for (PTOperator oper : newOperators) {
                updateRecoveryCheckpoints(oper, ctx);
            }
        }
    } while (!newOperators.isEmpty());
}

From source file:com.linkedin.pinot.controller.helix.core.realtime.PinotLLCRealtimeSegmentManagerTest.java

public void testAutoReplaceConsumingSegment(final String tableConfigStartOffset) throws Exception {
    FakePinotLLCRealtimeSegmentManager segmentManager = new FakePinotLLCRealtimeSegmentManager(true, null);
    final int nPartitions = 8;
    final int nInstances = 3;
    final int nReplicas = 2;

    final String topic = "someTopic";
    final String rtTableName = "table_REALTIME";
    List<String> instances = getInstanceList(nInstances);
    final String startOffset = KAFKA_OFFSET;

    IdealState idealState = PinotTableIdealStateBuilder
            .buildEmptyKafkaConsumerRealtimeIdealStateFor(rtTableName, nReplicas);
    // For the setupHelix method, the kafka offset config specified here cannot be "smallest" or "largest", otherwise
    // the kafka consumer wrapper tries to connect to Kafka and fetch patitions. We set it to "testDummy" value here.
    TableConfig tableConfig = makeTableConfig(rtTableName, nReplicas, KAFKA_OFFSET, DUMMY_HOST, topic,
            DEFAULT_SERVER_TENANT, DEFAULT_STREAM_ASSIGNMENT_STRATEGY);
    segmentManager.addTableToStore(rtTableName, tableConfig, nPartitions);
    StreamMetadata streamMetadata = makeKafkaStreamMetadata(topic, KAFKA_OFFSET, DUMMY_HOST);
    RealtimeTagConfig realtimeTagConfig = new RealtimeTagConfig(tableConfig, null);
    segmentManager.setupHelixEntries(realtimeTagConfig, streamMetadata, nPartitions, instances, idealState,
            false);//from  w w w.  j a  v  a 2  s  .co  m
    // Add another segment for each partition
    long now = System.currentTimeMillis();
    List<String> existingSegments = new ArrayList<>(segmentManager._idealStateEntries.keySet());
    final int partitionToBeFixed = 3;
    final int partitionWithHigherOffset = 4;
    final int emptyPartition = 5;
    final long smallestPartitionOffset = 0x259080984568L;
    final long largestPartitionOffset = smallestPartitionOffset + 100000;
    final long higherOffset = smallestPartitionOffset + 100;
    for (String segmentNameStr : existingSegments) {
        LLCSegmentName segmentName = new LLCSegmentName(segmentNameStr);
        switch (segmentName.getPartitionId()) {
        case partitionToBeFixed:
            // Do nothing, we will test adding a new segment for this partition when there is only one segment in there.
            break;
        case emptyPartition:
            // Remove existing segment, so we can test adding a new segment for this partition when none exists
            segmentManager._idealStateEntries.remove(segmentNameStr);
            break;
        case partitionWithHigherOffset:
            // Set segment metadata for this segment such that its offset is higher than startOffset we get from kafka.
            // In that case, we should choose the new segment offset as this one rather than the one kafka hands us.
            LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata();
            metadata.setSegmentName(segmentName.getSegmentName());
            metadata.setEndOffset(higherOffset);
            metadata.setStatus(CommonConstants.Segment.Realtime.Status.DONE);
            segmentManager._metadataMap.put(segmentName.getSegmentName(), metadata);
            break;
        default:
            // Add a second segment for this partition. It will not be repaired.
            LLCSegmentName newSegmentName = new LLCSegmentName(segmentName.getTableName(),
                    segmentName.getPartitionId(), segmentName.getSequenceNumber() + 1, now);
            List<String> hosts = segmentManager._idealStateEntries.get(segmentNameStr);
            segmentManager._idealStateEntries.put(newSegmentName.getSegmentName(), hosts);
            break;
        }
    }

    // Now we make another tableconfig that has the correct offset property ("smallest" or "largest")
    // which works correctly with the createConsumingSegment method.
    TableConfig tableConfig2 = makeTableConfig(rtTableName, nReplicas, tableConfigStartOffset, DUMMY_HOST,
            topic, DEFAULT_SERVER_TENANT, DEFAULT_STREAM_ASSIGNMENT_STRATEGY);
    segmentManager.addTableToStore(rtTableName, tableConfig2, nPartitions);

    Set<Integer> nonConsumingPartitions = new HashSet<>(1);
    nonConsumingPartitions.add(partitionToBeFixed);
    nonConsumingPartitions.add(partitionWithHigherOffset);
    nonConsumingPartitions.add(emptyPartition);
    segmentManager._kafkaSmallestOffsetToReturn = smallestPartitionOffset;
    segmentManager._kafkaLargestOffsetToReturn = largestPartitionOffset;
    existingSegments = new ArrayList<>(segmentManager._idealStateEntries.keySet());
    segmentManager._paths.clear();
    segmentManager._records.clear();
    segmentManager.createConsumingSegment(rtTableName, nonConsumingPartitions, existingSegments, tableConfig2);
    Assert.assertEquals(segmentManager._paths.size(), 3);
    Assert.assertEquals(segmentManager._records.size(), 3);
    Assert.assertEquals(segmentManager._oldSegmentNameStr.size(), 3);
    Assert.assertEquals(segmentManager._newSegmentNameStr.size(), 3);

    int found = 0;
    int index = 0;
    while (index < segmentManager._paths.size()) {
        String znodePath = segmentManager._paths.get(index);
        int slash = znodePath.lastIndexOf('/');
        String segmentNameStr = znodePath.substring(slash + 1);
        LLCSegmentName segmentName = new LLCSegmentName(segmentNameStr);
        ZNRecord znRecord;
        LLCRealtimeSegmentZKMetadata metadata;
        switch (segmentName.getPartitionId()) {
        case partitionToBeFixed:
            // We had left this partition with one segment. So, a second one should be created with a sequence number one
            // higher than starting. Its start offset should be what kafka returns.
            found++;
            Assert.assertEquals(segmentName.getSequenceNumber(),
                    PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 1);
            znRecord = segmentManager._records.get(index);
            metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
            Assert.assertEquals(metadata.getNumReplicas(), 2);
            Assert.assertEquals(metadata.getStartOffset(), smallestPartitionOffset);
            break;
        case emptyPartition:
            // We had removed any segments in this partition. A new one should be created with the offset as returned
            // by kafka and with the starting sequence number.
            found++;
            Assert.assertEquals(segmentName.getSequenceNumber(),
                    PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER);
            znRecord = segmentManager._records.get(index);
            metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
            Assert.assertEquals(metadata.getNumReplicas(), 2);
            if (tableConfigStartOffset.equals("smallest")) {
                Assert.assertEquals(metadata.getStartOffset(), smallestPartitionOffset);
            } else {
                Assert.assertEquals(metadata.getStartOffset(), largestPartitionOffset);
            }
            break;
        case partitionWithHigherOffset:
            // We had left this partition with one segment. In addition, we had the end-offset of the first segment set to
            // a value higher than that returned by kafka. So, a second one should be created with a sequence number one
            // equal to the end offset of the first one.
            found++;
            Assert.assertEquals(segmentName.getSequenceNumber(),
                    PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 1);
            znRecord = segmentManager._records.get(index);
            metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
            Assert.assertEquals(metadata.getNumReplicas(), 2);
            Assert.assertEquals(metadata.getStartOffset(), higherOffset);
            break;
        }
        index++;
    }

    // We should see all three cases here.
    Assert.assertEquals(3, found);

    // Now, if we make 'partitionToBeFixed' a non-consuming partition, a second one should get added with the same start offset as
    // as the first one, since the kafka offset to return has not changed.
    Set<Integer> ncPartitions = new HashSet<>(1);
    ncPartitions.add(partitionToBeFixed);
    segmentManager.createConsumingSegment(rtTableName, ncPartitions,
            segmentManager.getExistingSegments(rtTableName), tableConfig);
    Assert.assertEquals(segmentManager._paths.size(), 4);
    Assert.assertEquals(segmentManager._records.size(), 4);
    Assert.assertEquals(segmentManager._oldSegmentNameStr.size(), 4);
    Assert.assertEquals(segmentManager._newSegmentNameStr.size(), 4);
    // The latest zn record should be that of the new one we added.
    ZNRecord znRecord = segmentManager._records.get(3);
    LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
    Assert.assertEquals(metadata.getNumReplicas(), 2);
    Assert.assertEquals(metadata.getStartOffset(), smallestPartitionOffset);
    LLCSegmentName llcSegmentName = new LLCSegmentName(metadata.getSegmentName());
    Assert.assertEquals(llcSegmentName.getSequenceNumber(),
            PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 2);
    Assert.assertEquals(llcSegmentName.getPartitionId(), partitionToBeFixed);

    // Now pretend the prev segment ended successfully, and set the end offset
    metadata.setEndOffset(metadata.getStartOffset() + 10);
    metadata.setStatus(CommonConstants.Segment.Realtime.Status.DONE);
    segmentManager._records.remove(3);
    segmentManager._records.add(metadata.toZNRecord());
    segmentManager._metadataMap.put(metadata.getSegmentName(), metadata);

    segmentManager._kafkaLargestOffsetToReturn *= 2;
    segmentManager._kafkaSmallestOffsetToReturn *= 2;
    ncPartitions.clear();
    ncPartitions.add(partitionToBeFixed);
    segmentManager.createConsumingSegment(rtTableName, ncPartitions,
            segmentManager.getExistingSegments(rtTableName), tableConfig);
    Assert.assertEquals(segmentManager._paths.size(), 5);
    Assert.assertEquals(segmentManager._records.size(), 5);
    Assert.assertEquals(segmentManager._oldSegmentNameStr.size(), 5);
    Assert.assertEquals(segmentManager._newSegmentNameStr.size(), 5);
    znRecord = segmentManager._records.get(4);
    metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
    Assert.assertEquals(metadata.getNumReplicas(), 2);
    // In this case, since we have data loss, we will always put the smallest kafka partition available.
    Assert.assertEquals(metadata.getStartOffset(),
            segmentManager.getKafkaPartitionOffset(null, "smallest", partitionToBeFixed));
    llcSegmentName = new LLCSegmentName(metadata.getSegmentName());
    Assert.assertEquals(llcSegmentName.getSequenceNumber(),
            PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 3);
    Assert.assertEquals(llcSegmentName.getPartitionId(), partitionToBeFixed);
}

From source file:it.cnr.icar.eric.server.cache.ClassificationSchemeCache.java

/**
 * Clear all affectedObjects in AuditableEvent from cache. When called,
 * internalCache may be out of date with respect to dB (where transaction
 * has been committed) and objectCache (where affected classification nodes
 * have already been processed).<br>
 * This code keeps the cache primed if it was primed earlier. The side
 * effect of this choice is every other context (separate transaction)
 * immediately knows about the just-committed changes. That is, this cache
 * imitates TRANSACTION_READ_COMMITTED transaction isolation unless the
 * caching event setting is "never".//from   www.  j  ava2 s  .  c  o m
 */
public void onEvent(ServerRequestContext context, AuditableEventType ae) {
    final String eventType = ae.getEventType();
    final boolean justRemove = primeCacheEvent.equalsIgnoreCase("never");
    final boolean wasChanged = eventType.equalsIgnoreCase(CanonicalConstants.CANONICAL_EVENT_TYPE_ID_Created)
            || eventType.equalsIgnoreCase(CanonicalConstants.CANONICAL_EVENT_TYPE_ID_Updated)
            || eventType.equalsIgnoreCase(CanonicalConstants.CANONICAL_EVENT_TYPE_ID_Versioned);
    final boolean wasRemoved = eventType.equalsIgnoreCase(CanonicalConstants.CANONICAL_EVENT_TYPE_ID_Deleted);

    Set<String> schemesToRemove = new HashSet<String>();
    HashMap<String, RegistryObjectType> schemesToUpdate = new HashMap<String, RegistryObjectType>();

    primeCacheOnFirstUse(context);

    if (wasChanged || wasRemoved) {
        try {
            List<ObjectRefType> affectedObjects = ae.getAffectedObjects().getObjectRef();
            Iterator<ObjectRefType> iter = affectedObjects.iterator();

            while (iter.hasNext()) {
                ObjectRefType ref = iter.next();
                String objectId = ref.getId();

                RegistryObjectType ro = context.getAffectedObjectsMap().get(objectId);

                if (null == ro) {
                    // In case missing (removed?) object was a scheme
                    schemesToRemove.add(objectId);
                } else {
                    if (ro instanceof ClassificationSchemeType) {
                        if (wasRemoved || justRemove) {
                            schemesToRemove.add(objectId);
                        } else {
                            schemesToUpdate.put(objectId, ro);
                        }
                    } else if (ro instanceof ClassificationNodeType) {
                        String schemeId = bu.getSchemeIdForRegistryObject(ro);

                        // Handle case where a node in a scheme has been
                        // added, deleted or updated.
                        if (justRemove) {
                            schemesToRemove.add(schemeId);
                        } else if (!(schemesToRemove.contains(schemeId)
                                || schemesToUpdate.containsKey(schemeId))) {
                            ClassificationSchemeType scheme = (ClassificationSchemeType) getRegistryObjectInternal(
                                    context, schemeId, "ClassScheme");

                            if (null != scheme) {
                                schemesToUpdate.put(schemeId, scheme);

                                // ??? Why is this necessary for all
                                // ??? schemes loaded?
                                loadChildren(context, scheme, getDepthForScheme(schemeId));
                            }
                        }
                    }
                }
            }
        } catch (JAXRException e) {
            log.error(e);
            // Just update all schemes to be safe in case of any errors
            reset(context);

            // Make following block a no-op.
            schemesToRemove.clear();
            schemesToUpdate.clear();
        }
    }

    synchronized (internalCache) {
        int oldSize;
        // xxx pa 110816 added try / catch for CacheException (ehcache 1.0 effect?)
        try {
            oldSize = internalCache.getSize();

            Iterator<String> iter = schemesToRemove.iterator();
            while (iter.hasNext()) {
                String objectId = iter.next();
                internalCache.remove(objectId);
            }
            if (justRemove) {

                // Cache may become primed regardless of primeCacheEvent
                // setting, pay attention if we have undone that.
                if (oldSize != internalCache.getSize()) {
                    cacheIsPrimed = false;
                }
            } else if (schemesToUpdate.size() > 0) {
                addClassificationSchemesToCache(schemesToUpdate.values());
            }

        } catch (CacheException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }
}

From source file:com.alvermont.terraj.fracplanet.geom.TriangleMeshTerrain.java

private void doOneRiver(int r, Random random, boolean[] isSeaVertex, List<Set<Integer>> vertexNeighbours,
        int maxLakeSize) {
    int lastStallWarning = 0;

    final Set<Integer> verticesToAdd = new HashSet<Integer>();

    final TreeMapMulti verticesToAddByHeight = new TreeMapMulti();

    final Set<Integer> currentVertices = new HashSet<Integer>();

    float currentVerticesHeight = 0;

    // start with a random non sea triangles
    final int sourceVertex = (int) (random.nextFloat() * getVertices().size());

    if (!isSeaVertex[sourceVertex]) {
        currentVertices.add(sourceVertex);
        currentVerticesHeight = getVertexHeight(sourceVertex);

        while (true) {
            boolean reachedSea = false;
            final Set<Integer> currentVerticesNeighbours = new HashSet<Integer>();

            for (int it : currentVertices) {
                verticesToAdd.add(it);/*from   w w  w. j  a va 2s.c o m*/
                verticesToAddByHeight.insert(currentVerticesHeight, it);

                if (isSeaVertex[it]) {
                    reachedSea = true;
                }

                final Set<Integer> neighbours = vertexNeighbours.get(it);

                for (int itN : neighbours) {
                    if (!currentVertices.contains(itN)) {
                        currentVerticesNeighbours.add(itN);
                    }
                }
            }

            final TreeMapMulti flowCandidates = new TreeMapMulti();

            for (int it : currentVerticesNeighbours) {
                flowCandidates.insert(getVertexHeight(it), it);
            }

            if (reachedSea) {
                break;
            } else if (currentVertices.size() >= maxLakeSize) {
                // lake becomes an inland sea
                if (log.isDebugEnabled()) {
                    log.debug("River " + r + " is now an inland sea");
                }

                break;
            } else {
                boolean meetsExisting = false;

                for (int it : currentVertices) {
                    if (riverVertices.contains(it)) {
                        meetsExisting = true;

                        break;
                    }
                }

                if (meetsExisting) {
                    break;
                }
            }

            //debugPrintMap(flowCandidates);
            int numCurrentVertices = 0;

            if (flowCandidates.isEmpty()) {
                if (log.isDebugEnabled()) {
                    log.warn("Unexpected internal state: no flow candidates " + "for river: " + r);
                }

                break;
            } else if (flowCandidates.getFirstHeight() < (currentVerticesHeight - getGeometry().epsilon())) {
                //                        log.debug("Flow downhill");
                currentVertices.clear();
                currentVerticesHeight = flowCandidates.getFirstHeight();
                currentVertices.add(flowCandidates.getFirstVertex());

                numCurrentVertices = 1;
            } else if (flowCandidates.getFirstHeight() < (currentVerticesHeight + getGeometry().epsilon())) {
                //                        log.debug("Expand across flat");
                final SortedMap<Float, List<Integer>> subset = flowCandidates
                        .headMap(currentVerticesHeight + getGeometry().epsilon());

                for (List<Integer> list : subset.values()) {
                    currentVertices.addAll(list);
                }

                numCurrentVertices = currentVertices.size();
            } else {
                //                        log.debug("Raise level");
                currentVerticesHeight = flowCandidates.getFirstHeight() + getGeometry().epsilon();

                final int outflowVertex = flowCandidates.getFirstVertex();

                currentVertices.add(outflowVertex);

                final SortedMap<Float, List<Integer>> subset = verticesToAddByHeight
                        .headMap(currentVerticesHeight);

                for (List<Integer> list : subset.values()) {
                    for (int vertex : list) {
                        currentVertices.add(vertex);
                        setVertexHeight(vertex, currentVerticesHeight);
                    }
                }

                for (List<Integer> list : subset.values()) {
                    list.clear();
                }

                for (int it : currentVertices) {
                    setVertexHeight(it, currentVerticesHeight);
                    ++numCurrentVertices;
                }
            }

            if (numCurrentVertices >= (lastStallWarning + PROGRESS_STALL)) {
                final String msg = "Rivers (delay: " + numCurrentVertices + " vertex lake";

                getProgress().progressStall(msg);
            } else if ((numCurrentVertices + PROGRESS_STALL) <= lastStallWarning) {
                final String msg = "Rivers: lake complete";

                getProgress().progressStall(msg);

                lastStallWarning = numCurrentVertices;
            }
        }

        riverVertices.addAll(verticesToAdd);
    }
}

From source file:org.apache.hadoop.hive.metastore.TestHiveMetaStore.java

public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf) throws Exception {
    try {//from   w  w w  .j a v a  2s .c  o m
        String dbName = "compdb";
        String tblName = "comptbl";
        String typeName = "Person";
        List<String> vals = makeVals("2008-07-01 14:13:12", "14");
        List<String> vals2 = makeVals("2008-07-01 14:13:12", "15");
        List<String> vals3 = makeVals("2008-07-02 14:13:12", "15");
        List<String> vals4 = makeVals("2008-07-03 14:13:12", "151");

        client.dropTable(dbName, tblName);
        silentDropDatabase(dbName);
        Database db = new Database();
        db.setName(dbName);
        client.createDatabase(db);
        db = client.getDatabase(dbName);
        Path dbPath = new Path(db.getLocationUri());
        FileSystem fs = FileSystem.get(dbPath.toUri(), hiveConf);
        boolean inheritPerms = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
        FsPermission dbPermission = fs.getFileStatus(dbPath).getPermission();
        if (inheritPerms) {
            //Set different perms for the database dir for further tests
            dbPermission = new FsPermission((short) 488);
            fs.setPermission(dbPath, dbPermission);
        }

        client.dropType(typeName);
        Type typ1 = new Type();
        typ1.setName(typeName);
        typ1.setFields(new ArrayList<FieldSchema>(2));
        typ1.getFields().add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
        typ1.getFields().add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
        client.createType(typ1);

        Table tbl = new Table();
        tbl.setDbName(dbName);
        tbl.setTableName(tblName);
        StorageDescriptor sd = new StorageDescriptor();
        tbl.setSd(sd);
        sd.setCols(typ1.getFields());
        sd.setCompressed(false);
        sd.setNumBuckets(1);
        sd.setParameters(new HashMap<String, String>());
        sd.getParameters().put("test_param_1", "Use this for comments etc");
        sd.setBucketCols(new ArrayList<String>(2));
        sd.getBucketCols().add("name");
        sd.setSerdeInfo(new SerDeInfo());
        sd.getSerdeInfo().setName(tbl.getTableName());
        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
        sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
        sd.setSortCols(new ArrayList<Order>());
        sd.setStoredAsSubDirectories(false);
        sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
        sd.setInputFormat(HiveInputFormat.class.getName());
        sd.setOutputFormat(HiveOutputFormat.class.getName());

        //skewed information
        SkewedInfo skewInfor = new SkewedInfo();
        skewInfor.setSkewedColNames(Arrays.asList("name"));
        List<String> skv = Arrays.asList("1");
        skewInfor.setSkewedColValues(Arrays.asList(skv));
        Map<List<String>, String> scvlm = new HashMap<List<String>, String>();
        scvlm.put(skv, "location1");
        skewInfor.setSkewedColValueLocationMaps(scvlm);
        sd.setSkewedInfo(skewInfor);

        tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
        tbl.getPartitionKeys().add(new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, ""));
        tbl.getPartitionKeys().add(new FieldSchema("hr", serdeConstants.STRING_TYPE_NAME, ""));

        client.createTable(tbl);

        if (isThriftClient) {
            // the createTable() above does not update the location in the 'tbl'
            // object when the client is a thrift client and the code below relies
            // on the location being present in the 'tbl' object - so get the table
            // from the metastore
            tbl = client.getTable(dbName, tblName);
        }

        assertEquals(dbPermission, fs.getFileStatus(new Path(tbl.getSd().getLocation())).getPermission());

        Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
        Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
        Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
        Partition part4 = makePartitionObject(dbName, tblName, vals4, tbl, "/part4");

        // check if the partition exists (it shouldn't)
        boolean exceptionThrown = false;
        try {
            Partition p = client.getPartition(dbName, tblName, vals);
        } catch (Exception e) {
            assertEquals("partition should not have existed", NoSuchObjectException.class, e.getClass());
            exceptionThrown = true;
        }
        assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
        Partition retp = client.add_partition(part);
        assertNotNull("Unable to create partition " + part, retp);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation())).getPermission());
        Partition retp2 = client.add_partition(part2);
        assertNotNull("Unable to create partition " + part2, retp2);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp2.getSd().getLocation())).getPermission());
        Partition retp3 = client.add_partition(part3);
        assertNotNull("Unable to create partition " + part3, retp3);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp3.getSd().getLocation())).getPermission());
        Partition retp4 = client.add_partition(part4);
        assertNotNull("Unable to create partition " + part4, retp4);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp4.getSd().getLocation())).getPermission());

        Partition part_get = client.getPartition(dbName, tblName, part.getValues());
        if (isThriftClient) {
            // since we are using thrift, 'part' will not have the create time and
            // last DDL time set since it does not get updated in the add_partition()
            // call - likewise part2 and part3 - set it correctly so that equals check
            // doesn't fail
            adjust(client, part, dbName, tblName);
            adjust(client, part2, dbName, tblName);
            adjust(client, part3, dbName, tblName);
        }
        assertTrue("Partitions are not same", part.equals(part_get));

        String partName = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=14";
        String part2Name = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=15";
        String part3Name = "ds=" + FileUtils.escapePathName("2008-07-02 14:13:12") + "/hr=15";
        String part4Name = "ds=" + FileUtils.escapePathName("2008-07-03 14:13:12") + "/hr=151";

        part_get = client.getPartition(dbName, tblName, partName);
        assertTrue("Partitions are not the same", part.equals(part_get));

        // Test partition listing with a partial spec - ds is specified but hr is not
        List<String> partialVals = new ArrayList<String>();
        partialVals.add(vals.get(0));
        Set<Partition> parts = new HashSet<Partition>();
        parts.add(part);
        parts.add(part2);

        List<Partition> partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
        assertTrue("Should have returned 2 partitions", partial.size() == 2);
        assertTrue("Not all parts returned", partial.containsAll(parts));

        Set<String> partNames = new HashSet<String>();
        partNames.add(partName);
        partNames.add(part2Name);
        List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
        assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
        assertTrue("Not all part names returned", partialNames.containsAll(partNames));

        partNames.add(part3Name);
        partNames.add(part4Name);
        partialVals.clear();
        partialVals.add("");
        partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
        assertTrue("Should have returned 4 partition names", partialNames.size() == 4);
        assertTrue("Not all part names returned", partialNames.containsAll(partNames));

        // Test partition listing with a partial spec - hr is specified but ds is not
        parts.clear();
        parts.add(part2);
        parts.add(part3);

        partialVals.clear();
        partialVals.add("");
        partialVals.add(vals2.get(1));

        partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
        assertEquals("Should have returned 2 partitions", 2, partial.size());
        assertTrue("Not all parts returned", partial.containsAll(parts));

        partNames.clear();
        partNames.add(part2Name);
        partNames.add(part3Name);
        partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
        assertEquals("Should have returned 2 partition names", 2, partialNames.size());
        assertTrue("Not all part names returned", partialNames.containsAll(partNames));

        // Verify escaped partition names don't return partitions
        exceptionThrown = false;
        try {
            String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
            client.getPartition(dbName, tblName, badPartName);
        } catch (NoSuchObjectException e) {
            exceptionThrown = true;
        }
        assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);

        Path partPath = new Path(part.getSd().getLocation());

        assertTrue(fs.exists(partPath));
        client.dropPartition(dbName, tblName, part.getValues(), true);
        assertFalse(fs.exists(partPath));

        // Test append_partition_by_name
        client.appendPartition(dbName, tblName, partName);
        Partition part5 = client.getPartition(dbName, tblName, part.getValues());
        assertTrue("Append partition by name failed", part5.getValues().equals(vals));
        ;
        Path part5Path = new Path(part5.getSd().getLocation());
        assertTrue(fs.exists(part5Path));

        // Test drop_partition_by_name
        assertTrue("Drop partition by name failed", client.dropPartition(dbName, tblName, partName, true));
        assertFalse(fs.exists(part5Path));

        // add the partition again so that drop table with a partition can be
        // tested
        retp = client.add_partition(part);
        assertNotNull("Unable to create partition " + part, retp);
        assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation())).getPermission());

        // test add_partitions

        List<String> mvals1 = makeVals("2008-07-04 14:13:12", "14641");
        List<String> mvals2 = makeVals("2008-07-04 14:13:12", "14642");
        List<String> mvals3 = makeVals("2008-07-04 14:13:12", "14643");
        List<String> mvals4 = makeVals("2008-07-04 14:13:12", "14643"); // equal to 3
        List<String> mvals5 = makeVals("2008-07-04 14:13:12", "14645");

        Exception savedException;

        // add_partitions(empty list) : ok, normal operation
        client.add_partitions(new ArrayList<Partition>());

        // add_partitions(1,2,3) : ok, normal operation
        Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1");
        Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2");
        Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
        client.add_partitions(Arrays.asList(mpart1, mpart2, mpart3));

        if (isThriftClient) {
            // do DDL time munging if thrift mode
            adjust(client, mpart1, dbName, tblName);
            adjust(client, mpart2, dbName, tblName);
            adjust(client, mpart3, dbName, tblName);
        }
        verifyPartitionsPublished(client, dbName, tblName, Arrays.asList(mvals1.get(0)),
                Arrays.asList(mpart1, mpart2, mpart3));

        Partition mpart4 = makePartitionObject(dbName, tblName, mvals4, tbl, "/mpart4");
        Partition mpart5 = makePartitionObject(dbName, tblName, mvals5, tbl, "/mpart5");

        // create dir for /mpart5
        Path mp5Path = new Path(mpart5.getSd().getLocation());
        warehouse.mkdirs(mp5Path, true);
        assertTrue(fs.exists(mp5Path));
        assertEquals(dbPermission, fs.getFileStatus(mp5Path).getPermission());

        // add_partitions(5,4) : err = duplicate keyvals on mpart4
        savedException = null;
        try {
            client.add_partitions(Arrays.asList(mpart5, mpart4));
        } catch (Exception e) {
            savedException = e;
        } finally {
            assertNotNull(savedException);
        }

        // check that /mpart4 does not exist, but /mpart5 still does.
        assertTrue(fs.exists(mp5Path));
        assertFalse(fs.exists(new Path(mpart4.getSd().getLocation())));

        // add_partitions(5) : ok
        client.add_partitions(Arrays.asList(mpart5));

        if (isThriftClient) {
            // do DDL time munging if thrift mode
            adjust(client, mpart5, dbName, tblName);
        }

        verifyPartitionsPublished(client, dbName, tblName, Arrays.asList(mvals1.get(0)),
                Arrays.asList(mpart1, mpart2, mpart3, mpart5));

        //// end add_partitions tests

        client.dropTable(dbName, tblName);

        client.dropType(typeName);

        // recreate table as external, drop partition and it should
        // still exist
        tbl.setParameters(new HashMap<String, String>());
        tbl.getParameters().put("EXTERNAL", "TRUE");
        client.createTable(tbl);
        retp = client.add_partition(part);
        assertTrue(fs.exists(partPath));
        client.dropPartition(dbName, tblName, part.getValues(), true);
        assertTrue(fs.exists(partPath));

        for (String tableName : client.getTables(dbName, "*")) {
            client.dropTable(dbName, tableName);
        }

        client.dropDatabase(dbName);

    } catch (Exception e) {
        System.err.println(StringUtils.stringifyException(e));
        System.err.println("testPartition() failed.");
        throw e;
    }
}

From source file:org.apache.hadoop.hbase.HBaseTestingUtility.java

public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
    List<HRegion> regions = getHBaseCluster().getRegions(tableName);
    int regCount = regions.size();
    Set<Integer> attempted = new HashSet<Integer>();
    int idx;//w  w w . j a v  a  2 s . com
    int attempts = 0;
    do {
        regions = getHBaseCluster().getRegions(tableName);
        if (regCount != regions.size()) {
            // if there was region movement, clear attempted Set
            attempted.clear();
        }
        regCount = regions.size();
        // There are chances that before we get the region for the table from an RS the region may
        // be going for CLOSE.  This may be because online schema change is enabled
        if (regCount > 0) {
            idx = random.nextInt(regCount);
            // if we have just tried this region, there is no need to try again
            if (attempted.contains(idx))
                continue;
            try {
                regions.get(idx).checkSplit();
                return regions.get(idx);
            } catch (Exception ex) {
                LOG.warn("Caught exception", ex);
                attempted.add(idx);
            }
        }
        attempts++;
    } while (maxAttempts == -1 || attempts < maxAttempts);
    return null;
}

From source file:org.apache.bookkeeper.client.TestRackawareEnsemblePlacementPolicy.java

@Test
public void testNumBookiesInDefaultRackGauge() throws Exception {
    String defaultRackForThisTest = NetworkTopology.DEFAULT_REGION_AND_RACK;
    repp.uninitalize();//from w  w w .  j a va  2  s  .  co  m
    updateMyRack(defaultRackForThisTest);

    // Update cluster
    BookieSocketAddress newAddr1 = new BookieSocketAddress("127.0.0.100", 3181);
    BookieSocketAddress newAddr2 = new BookieSocketAddress("127.0.0.101", 3181);
    BookieSocketAddress newAddr3 = new BookieSocketAddress("127.0.0.102", 3181);
    BookieSocketAddress newAddr4 = new BookieSocketAddress("127.0.0.103", 3181);

    // update dns mapping
    StaticDNSResolver.addNodeToRack(newAddr1.getHostName(), defaultRackForThisTest);
    StaticDNSResolver.addNodeToRack(newAddr2.getHostName(), "/default-region/r2");
    StaticDNSResolver.addNodeToRack(newAddr3.getHostName(), "/default-region/r3");
    StaticDNSResolver.addNodeToRack(newAddr4.getHostName(), defaultRackForThisTest);

    TestStatsProvider statsProvider = new TestStatsProvider();
    TestStatsLogger statsLogger = statsProvider.getStatsLogger("");

    repp = new RackawareEnsemblePlacementPolicy();
    repp.initialize(conf, Optional.<DNSToSwitchMapping>empty(), timer, DISABLE_ALL, statsLogger);
    repp.withDefaultRack(defaultRackForThisTest);

    Gauge<? extends Number> numBookiesInDefaultRackGauge = statsLogger
            .getGauge(BookKeeperClientStats.NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK);

    Set<BookieSocketAddress> writeableBookies = new HashSet<BookieSocketAddress>();
    writeableBookies.add(newAddr1);
    writeableBookies.add(newAddr2);
    Set<BookieSocketAddress> readOnlyBookies = new HashSet<BookieSocketAddress>();
    readOnlyBookies.add(newAddr3);
    readOnlyBookies.add(newAddr4);
    repp.onClusterChanged(writeableBookies, readOnlyBookies);
    // only writable bookie - newAddr1 in default rack
    assertEquals("NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK guage value", 1,
            numBookiesInDefaultRackGauge.getSample());

    readOnlyBookies.remove(newAddr4);
    writeableBookies.add(newAddr4);
    repp.onClusterChanged(writeableBookies, readOnlyBookies);
    // newAddr4 is also added to writable bookie so 2 writable bookies -
    // newAddr1 and newAddr4
    assertEquals("NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK guage value", 2,
            numBookiesInDefaultRackGauge.getSample());

    // newAddr4 rack is changed and it is not in default anymore
    StaticDNSResolver.changeRack(Arrays.asList(newAddr4), Arrays.asList("/default-region/r4"));
    assertEquals("NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK guage value", 1,
            numBookiesInDefaultRackGauge.getSample());

    writeableBookies.clear();
    // writeableBookies is empty so 0 writable bookies in default rack
    repp.onClusterChanged(writeableBookies, readOnlyBookies);
    assertEquals("NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK guage value", 0,
            numBookiesInDefaultRackGauge.getSample());

    StaticDNSResolver.changeRack(Arrays.asList(newAddr1), Arrays.asList("/default-region/r2"));
    readOnlyBookies.clear();
    writeableBookies.add(newAddr1);
    writeableBookies.add(newAddr2);
    writeableBookies.add(newAddr3);
    writeableBookies.add(newAddr4);
    repp.onClusterChanged(writeableBookies, readOnlyBookies);
    // newAddr1 rack is changed and it is not in default anymore. So no
    // bookies in default rack anymore
    assertEquals("NUM_WRITABLE_BOOKIES_IN_DEFAULT_RACK guage value", 0,
            numBookiesInDefaultRackGauge.getSample());
}

From source file:org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable.java

private void updateColStats(Set<Integer> projIndxLst) {
    List<String> nonPartColNamesThatRqrStats = new ArrayList<String>();
    List<Integer> nonPartColIndxsThatRqrStats = new ArrayList<Integer>();
    List<String> partColNamesThatRqrStats = new ArrayList<String>();
    List<Integer> partColIndxsThatRqrStats = new ArrayList<Integer>();
    Set<String> colNamesFailedStats = new HashSet<String>();

    // 1. Separate required columns to Non Partition and Partition Cols
    ColumnInfo tmp;//from ww w .j a va  2  s  .com
    for (Integer pi : projIndxLst) {
        if (hiveColStatsMap.get(pi) == null) {
            if ((tmp = hiveNonPartitionColsMap.get(pi)) != null) {
                nonPartColNamesThatRqrStats.add(tmp.getInternalName());
                nonPartColIndxsThatRqrStats.add(pi);
            } else if ((tmp = hivePartitionColsMap.get(pi)) != null) {
                partColNamesThatRqrStats.add(tmp.getInternalName());
                partColIndxsThatRqrStats.add(pi);
            } else {
                noColsMissingStats.getAndIncrement();
                String logMsg = "Unable to find Column Index: " + pi + ", in "
                        + hiveTblMetadata.getCompleteName();
                LOG.error(logMsg);
                throw new RuntimeException(logMsg);
            }
        }
    }

    if (null == partitionList) {
        // We could be here either because its an unpartitioned table or because
        // there are no pruning predicates on a partitioned table.
        computePartitionList(hiveConf, null);
    }

    // 2. Obtain Col Stats for Non Partition Cols
    if (nonPartColNamesThatRqrStats.size() > 0) {
        List<ColStatistics> hiveColStats;

        if (!hiveTblMetadata.isPartitioned()) {
            // 2.1 Handle the case for unpartitioned table.
            hiveColStats = StatsUtils.getTableColumnStats(hiveTblMetadata, hiveNonPartitionCols,
                    nonPartColNamesThatRqrStats);

            // 2.1.1 Record Column Names that we needed stats for but couldn't
            if (hiveColStats == null) {
                colNamesFailedStats.addAll(nonPartColNamesThatRqrStats);
            } else if (hiveColStats.size() != nonPartColNamesThatRqrStats.size()) {
                Set<String> setOfFiledCols = new HashSet<String>(nonPartColNamesThatRqrStats);

                Set<String> setOfObtainedColStats = new HashSet<String>();
                for (ColStatistics cs : hiveColStats) {
                    setOfObtainedColStats.add(cs.getColumnName());
                }
                setOfFiledCols.removeAll(setOfObtainedColStats);

                colNamesFailedStats.addAll(setOfFiledCols);
            }
        } else {
            // 2.2 Obtain col stats for partitioned table.
            try {
                if (partitionList.getNotDeniedPartns().isEmpty()) {
                    // no need to make a metastore call
                    rowCount = 0;
                    hiveColStats = new ArrayList<ColStatistics>();
                    for (String c : nonPartColNamesThatRqrStats) {
                        // add empty stats object for each column
                        hiveColStats.add(new ColStatistics(hiveTblMetadata.getTableName(), c, null));
                    }
                    colNamesFailedStats.clear();
                } else {
                    Statistics stats = StatsUtils.collectStatistics(hiveConf, partitionList, hiveTblMetadata,
                            hiveNonPartitionCols, nonPartColNamesThatRqrStats, nonPartColNamesThatRqrStats,
                            true, true);
                    rowCount = stats.getNumRows();
                    hiveColStats = new ArrayList<ColStatistics>();
                    for (String c : nonPartColNamesThatRqrStats) {
                        ColStatistics cs = stats.getColumnStatisticsFromColName(c);
                        if (cs != null) {
                            hiveColStats.add(cs);
                        } else {
                            colNamesFailedStats.add(c);
                        }
                    }
                }
            } catch (HiveException e) {
                String logMsg = "Collecting stats failed.";
                LOG.error(logMsg);
                throw new RuntimeException(logMsg);
            }
        }

        if (hiveColStats != null && hiveColStats.size() == nonPartColNamesThatRqrStats.size()) {
            for (int i = 0; i < hiveColStats.size(); i++) {
                hiveColStatsMap.put(nonPartColIndxsThatRqrStats.get(i), hiveColStats.get(i));
            }
        }
    }

    // 3. Obtain Stats for Partition Cols
    if (colNamesFailedStats.isEmpty() && !partColNamesThatRqrStats.isEmpty()) {
        ColStatistics cStats = null;
        for (int i = 0; i < partColNamesThatRqrStats.size(); i++) {
            cStats = new ColStatistics(hiveTblMetadata.getTableName(), partColNamesThatRqrStats.get(i),
                    hivePartitionColsMap.get(partColIndxsThatRqrStats.get(i)).getTypeName());
            cStats.setCountDistint(
                    getDistinctCount(partitionList.getPartitions(), partColNamesThatRqrStats.get(i)));
            hiveColStatsMap.put(partColIndxsThatRqrStats.get(i), cStats);
        }
    }

    // 4. Warn user if we could get stats for required columns
    if (!colNamesFailedStats.isEmpty()) {
        String logMsg = "No Stats for " + hiveTblMetadata.getCompleteName() + ", Columns: "
                + getColNamesForLogging(colNamesFailedStats);
        LOG.error(logMsg);
        noColsMissingStats.getAndAdd(colNamesFailedStats.size());
        throw new RuntimeException(logMsg);
    }
}

From source file:org.apache.nifi.controller.repository.StandardProcessSession.java

public void expireFlowFiles() {
    final Set<FlowFileRecord> expired = new HashSet<>();
    final FlowFileFilter filter = new FlowFileFilter() {
        @Override//from  w  w w  .ja  va  2s. c o  m
        public FlowFileFilterResult filter(final FlowFile flowFile) {
            return FlowFileFilterResult.REJECT_AND_CONTINUE;
        }
    };

    for (final Connection conn : context.getConnectable().getIncomingConnections()) {
        do {
            expired.clear();
            conn.getFlowFileQueue().poll(filter, expired);
            removeExpired(expired, conn);
        } while (!expired.isEmpty());
    }
}

From source file:org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable.java

private void updateColStats(Set<Integer> projIndxLst, boolean allowNullColumnForMissingStats) {
    List<String> nonPartColNamesThatRqrStats = new ArrayList<String>();
    List<Integer> nonPartColIndxsThatRqrStats = new ArrayList<Integer>();
    List<String> partColNamesThatRqrStats = new ArrayList<String>();
    List<Integer> partColIndxsThatRqrStats = new ArrayList<Integer>();
    Set<String> colNamesFailedStats = new HashSet<String>();

    // 1. Separate required columns to Non Partition and Partition Cols
    ColumnInfo tmp;/*  ww w  .jav  a 2 s.  c om*/
    for (Integer pi : projIndxLst) {
        if (hiveColStatsMap.get(pi) == null) {
            if ((tmp = hiveNonPartitionColsMap.get(pi)) != null) {
                nonPartColNamesThatRqrStats.add(tmp.getInternalName());
                nonPartColIndxsThatRqrStats.add(pi);
            } else if ((tmp = hivePartitionColsMap.get(pi)) != null) {
                partColNamesThatRqrStats.add(tmp.getInternalName());
                partColIndxsThatRqrStats.add(pi);
            } else {
                noColsMissingStats.getAndIncrement();
                String logMsg = "Unable to find Column Index: " + pi + ", in "
                        + hiveTblMetadata.getCompleteName();
                LOG.error(logMsg);
                throw new RuntimeException(logMsg);
            }
        }
    }

    if (null == partitionList) {
        // We could be here either because its an unpartitioned table or because
        // there are no pruning predicates on a partitioned table.
        computePartitionList(hiveConf, null);
    }

    // 2. Obtain Col Stats for Non Partition Cols
    if (nonPartColNamesThatRqrStats.size() > 0) {
        List<ColStatistics> hiveColStats;

        if (!hiveTblMetadata.isPartitioned()) {
            // 2.1 Handle the case for unpartitioned table.
            hiveColStats = StatsUtils.getTableColumnStats(hiveTblMetadata, hiveNonPartitionCols,
                    nonPartColNamesThatRqrStats);

            // 2.1.1 Record Column Names that we needed stats for but couldn't
            if (hiveColStats == null) {
                colNamesFailedStats.addAll(nonPartColNamesThatRqrStats);
            } else if (hiveColStats.size() != nonPartColNamesThatRqrStats.size()) {
                Set<String> setOfFiledCols = new HashSet<String>(nonPartColNamesThatRqrStats);

                Set<String> setOfObtainedColStats = new HashSet<String>();
                for (ColStatistics cs : hiveColStats) {
                    setOfObtainedColStats.add(cs.getColumnName());
                }
                setOfFiledCols.removeAll(setOfObtainedColStats);

                colNamesFailedStats.addAll(setOfFiledCols);
            }
        } else {
            // 2.2 Obtain col stats for partitioned table.
            try {
                if (partitionList.getNotDeniedPartns().isEmpty()) {
                    // no need to make a metastore call
                    rowCount = 0;
                    hiveColStats = new ArrayList<ColStatistics>();
                    for (String c : nonPartColNamesThatRqrStats) {
                        // add empty stats object for each column
                        hiveColStats.add(new ColStatistics(c, null));
                    }
                    colNamesFailedStats.clear();
                } else {
                    Statistics stats = StatsUtils.collectStatistics(hiveConf, partitionList, hiveTblMetadata,
                            hiveNonPartitionCols, nonPartColNamesThatRqrStats, nonPartColNamesThatRqrStats,
                            true, true);
                    rowCount = stats.getNumRows();
                    hiveColStats = new ArrayList<ColStatistics>();
                    for (String c : nonPartColNamesThatRqrStats) {
                        ColStatistics cs = stats.getColumnStatisticsFromColName(c);
                        if (cs != null) {
                            hiveColStats.add(cs);
                        } else {
                            colNamesFailedStats.add(c);
                        }
                    }
                }
            } catch (HiveException e) {
                String logMsg = "Collecting stats failed.";
                LOG.error(logMsg, e);
                throw new RuntimeException(logMsg, e);
            }
        }

        if (hiveColStats != null && hiveColStats.size() == nonPartColNamesThatRqrStats.size()) {
            for (int i = 0; i < hiveColStats.size(); i++) {
                hiveColStatsMap.put(nonPartColIndxsThatRqrStats.get(i), hiveColStats.get(i));
            }
        }
    }

    // 3. Obtain Stats for Partition Cols
    if (colNamesFailedStats.isEmpty() && !partColNamesThatRqrStats.isEmpty()) {
        ColStatistics cStats = null;
        for (int i = 0; i < partColNamesThatRqrStats.size(); i++) {
            cStats = new ColStatistics(partColNamesThatRqrStats.get(i),
                    hivePartitionColsMap.get(partColIndxsThatRqrStats.get(i)).getTypeName());
            cStats.setCountDistint(
                    getDistinctCount(partitionList.getPartitions(), partColNamesThatRqrStats.get(i)));
            hiveColStatsMap.put(partColIndxsThatRqrStats.get(i), cStats);
        }
    }

    // 4. Warn user if we could get stats for required columns
    if (!colNamesFailedStats.isEmpty()) {
        String logMsg = "No Stats for " + hiveTblMetadata.getCompleteName() + ", Columns: "
                + getColNamesForLogging(colNamesFailedStats);
        noColsMissingStats.getAndAdd(colNamesFailedStats.size());
        if (allowNullColumnForMissingStats) {
            LOG.warn(logMsg);
        } else {
            LOG.error(logMsg);
            throw new RuntimeException(logMsg);
        }
    }
}