List of usage examples for com.mongodb BasicDBObject get
public Object get(final String key)
From source file:com.streamreduce.core.service.EventServiceImpl.java
License:Apache License
/** * Helper method that returns all metadata for a {@link InventoryItem}. * * @param inventoryItem the cloud inventory item to retrieve metadata for/about * @return the metadata/*from w w w . jav a2s. c o m*/ */ private Map<String, Object> getMetadataFromInventoryItem(InventoryItem inventoryItem) { // NOTE: We're not using CloudService methods here for performance reasons Map<String, Object> civMetadata = new HashMap<>(); // Right now, we are only creating extended metadata for AWS EC2 instance items if (inventoryItem.getConnection().getProviderId().equals(ProviderIdConstants.AWS_PROVIDER_ID) && inventoryItem.getType().equals(Constants.COMPUTE_INSTANCE_TYPE)) { DBObject cMetadata = genericCollectionDAO.getById(DAODatasourceType.BUSINESS, Constants.INVENTORY_ITEM_METADATA_COLLECTION_NAME, inventoryItem.getMetadataId()); if (cMetadata == null) { // Fill in the metadata based on the last event for this target Event previousEvent = getLastEventForTarget(inventoryItem.getId()); if (previousEvent != null) { Map<String, Object> peMetadata = previousEvent.getMetadata(); if (peMetadata != null) { civMetadata.put("targetIP", peMetadata.get("targetIP")); civMetadata.put("targetOS", peMetadata.get("targetOS")); civMetadata.put("targetISO3166Code", peMetadata.get("targetISO3166Code")); civMetadata.put("targetRegion", peMetadata.get("targetRegion")); civMetadata.put("targetZone", peMetadata.get("targetZone")); } } } else { // Fill in the metadata from the available node metadata // Get the IP address if (cMetadata.containsField("publicAddresses")) { BasicDBList publicAddresses = (BasicDBList) cMetadata.get("publicAddresses"); // TODO: How do we want to handle multiple IP addresses? if (publicAddresses.size() > 0) { civMetadata.put("targetIP", publicAddresses.get(0)); } } // Get location information (ISO 3166 code, region and availability zone) if (cMetadata.containsField("location") && cMetadata.get("location") != null) { BasicDBObject location = (BasicDBObject) cMetadata.get("location"); boolean regionProcessed = false; boolean zoneProcessed = false; while (location != null) { if (regionProcessed && zoneProcessed) { break; } String locationScope = location.containsField("scope") ? location.getString("scope") : null; if (locationScope != null) { LocationScope scope = LocationScope.valueOf(locationScope); switch (scope) { case REGION: civMetadata.put("targetRegion", location.get("id")); regionProcessed = true; break; case ZONE: BasicDBList iso3166Codes = (BasicDBList) location.get("iso3166Codes"); civMetadata.put("targetISO3166Code", iso3166Codes.get(0)); civMetadata.put("targetZone", location.get("id")); zoneProcessed = true; break; } } location = location.containsField("parent") && location.get("parent") != null ? (BasicDBObject) location.get("parent") : null; } } // Get OS name if (cMetadata.containsField("operatingSystem")) { BasicDBObject operatingSystem = (BasicDBObject) cMetadata.get("operatingSystem"); if (operatingSystem != null) { if (operatingSystem.containsField("family")) { civMetadata.put("targetOS", operatingSystem.get("family")); } } } } } return civMetadata; }
From source file:com.streamreduce.core.transformer.message.AgentMessageTransformer.java
License:Apache License
/** * {@inheritDoc}/* w w w. j a va2 s . c o m*/ */ @Override public String doTransform(Event event) { EventId eventId = event.getEventId(); Map<String, Object> eventMetadata = event.getMetadata(); String msg = ""; switch (eventId) { case ACTIVITY: BasicDBObject payload = (BasicDBObject) eventMetadata.get("payload"); StringBuilder sb = new StringBuilder(); sb.append("Current system overview at ").append(eventMetadata.get("activityGenerated")) // Should we format this? .append("\n\n"); sb.append("Uptime: ").append(payload.getString("uptime")).append("s\n").append("Disk usage:\n"); BasicDBObject partitionsObj = (BasicDBObject) payload.get("partitions"); Set<String> partitions = new TreeSet<>(partitionsObj.keySet()); for (String key : partitions) { BasicDBObject partition = (BasicDBObject) partitionsObj.get(key); double totalAsKb = partition.getDouble("total"); // Certain devices show as 0.00GB and should be pruned if (totalAsKb == 0) { continue; } double totalAsGB = MessageUtils.kbToGB(totalAsKb); double usedAsGB = MessageUtils.kbToGB(partition.getDouble("used")); double freeAsGB = MessageUtils.kbToGB(partition.getDouble("free")); sb.append(" ").append(key).append(": Total ").append(MessageUtils.roundAndTruncate(totalAsGB, 2)) .append("GB, Used ").append(MessageUtils.roundAndTruncate(usedAsGB, 2)).append("GB, Free ") .append(MessageUtils.roundAndTruncate(freeAsGB, 2)).append("GB\n"); } sb.append("Disk I/O:\n"); BasicDBObject diskIO = (BasicDBObject) payload.get("disk_io"); Set<String> disks = new TreeSet<>(diskIO.keySet()); if (disks.size() == 0) { sb.append(" Unavailable\n"); } else { for (String key : disks) { BasicDBObject disk = (BasicDBObject) diskIO.get(key); long reads = disk.getLong("read_count"); long writes = disk.getLong("write_count"); double gbRead = MessageUtils.kbToGB(disk.getLong("read_kbytes")); double gbWrite = MessageUtils.kbToGB(disk.getLong("write_kbytes")); long readSecs = disk.getLong("read_time"); long writeSecs = disk.getLong("write_time"); sb.append(" ").append(key).append(": Reads ").append(reads).append(", Writes ").append(writes) .append(", GB Read ").append(MessageUtils.roundAndTruncate(gbRead, 2)) .append(", GB Written ").append(MessageUtils.roundAndTruncate(gbWrite, 2)) .append(", Read Time ").append(readSecs).append("s, Write Time ").append(writeSecs) .append("s\n"); } } sb.append("Network I/O:\n"); BasicDBObject netIO = (BasicDBObject) payload.get("network_io"); Set<String> nics = new TreeSet<>(netIO.keySet()); int nicsDisplayed = 0; for (String key : nics) { BasicDBObject nic = (BasicDBObject) netIO.get(key); long packetsIn = nic.getInt("packets_in"); long packetsOut = nic.getInt("packets_out"); // Certain devices show 0 packets in/out and should be pruned if (packetsIn == 0 && packetsOut == 0) { continue; } double gbIn = MessageUtils.kbToGB(nic.getLong("kbytes_in")); double gbOut = MessageUtils.kbToGB(nic.getLong("kbytes_out")); sb.append(" ").append(key).append(": Packets In ").append(packetsIn).append(", Packets Out ") .append(packetsOut).append(", GB In ").append(MessageUtils.roundAndTruncate(gbIn, 2)) .append(", GB Out ").append(MessageUtils.roundAndTruncate(gbOut, 2)).append("\n"); nicsDisplayed++; } if (nicsDisplayed == 0) { sb.append(" Unavailable\n"); } sb.append("Load: 1m ").append(MessageUtils.roundAndTruncate(payload.getDouble("load_avg_0"), 2)) .append(", ").append("5m ") .append(MessageUtils.roundAndTruncate(payload.getDouble("load_avg_1"), 2)).append(", ") .append("15m ").append(MessageUtils.roundAndTruncate(payload.getDouble("load_avg_2"), 2)) .append("\n"); float gbTotalRAM = (float) MessageUtils.kbToGB(payload.getLong("phy_ram_total")); float gbUsedRAM = (float) MessageUtils.kbToGB(payload.getLong("phy_ram_used")); float gbFreeRAM = (float) MessageUtils.kbToGB(payload.getLong("phy_ram_free")); sb.append("Real Mem: Total ").append(MessageUtils.roundAndTruncate(gbTotalRAM, 2)).append("GB, Used ") .append(MessageUtils.roundAndTruncate(gbUsedRAM, 2)).append("GB, Free ") .append(MessageUtils.roundAndTruncate(gbFreeRAM, 2)).append("GB\n"); double gbTotalVRAM = MessageUtils.kbToGB(payload.getLong("vir_ram_total")); double gbUsedVRAM = MessageUtils.kbToGB(payload.getLong("vir_ram_used")); double gbFreeVRAM = MessageUtils.kbToGB(payload.getLong("vir_ram_free")); sb.append("Virt Mem: Total ").append(MessageUtils.roundAndTruncate(gbTotalVRAM, 2)).append("GB, Used ") .append(MessageUtils.roundAndTruncate(gbUsedVRAM, 2)).append("GB, Free ") .append(MessageUtils.roundAndTruncate(gbFreeVRAM, 2)).append("GB\n"); sb.append("Processes: ").append(payload.getInt("processes")).append("\n"); sb.append("Users: Total ").append(payload.getInt("users_total")).append(", Unique ") .append(payload.getInt("users_unique")).append("\n"); msg = sb.toString(); break; default: super.doTransform(event); break; } return msg; }
From source file:com.streamreduce.storm.spouts.EventSpout.java
License:Apache License
/** * {@inheritDoc}/*ww w. ja v a 2 s .c om*/ */ @Override public void handleDBEntry(SpoutOutputCollector collector, BasicDBObject entry) { BasicDBObject metadata = entry.containsField("metadata") ? (BasicDBObject) entry.get("metadata") : new BasicDBObject(); String eventType = metadata.getString("targetType"); // Just in case if (eventType == null) { // TODO: Figure out the best way to handle this // Log the inability to process further logger.error("Event with id of " + entry.get("_id") + " has no target type. Unable to process."); // Early return to avoid emitting the event return; } String v = (String) metadata.get("targetVisibility"); if (v != null) { Visibility visibility = Visibility.valueOf(v); if (visibility == Visibility.SELF) { // do not process private information return; } } MongoClient.mapMongoToPlainJavaTypes(entry); // Emit the entry to the type-specific stream collector.emit(eventType, new Values(entry.toMap())); ack(entry); }
From source file:com.tengen.Week3Hw3_1.java
License:Apache License
public static void main(String[] args) throws UnknownHostException { MongoClient client = new MongoClient(); DB database = client.getDB("school"); DBCollection collection = database.getCollection("students"); DBCursor cursor = collection.find(); int student_id = -1; int count = 0; try {//from www.j a v a2s . co m while (cursor.hasNext()) { DBObject student = cursor.next(); //student.grades; BasicDBObject searchQuery = new BasicDBObject().append("_id", student.get("_id")); BasicDBList scores = (BasicDBList) student.get("scores"); BasicDBObject[] scoresArr = scores.toArray(new BasicDBObject[0]); double score = 100.0; BasicDBObject rem = new BasicDBObject(); for (BasicDBObject dbObj : scoresArr) { String type = dbObj.get("type").toString(); if (type.equals("homework")) { double s = Double.parseDouble(dbObj.get("score").toString()); if (score > s) { score = s; rem = dbObj; } } } BasicDBObject update = new BasicDBObject("scores", rem); collection.update(searchQuery, new BasicDBObject("$pull", update)); } } finally { cursor.close(); } }
From source file:com.wincere.lamda.storm.bolt.CreateTable.java
License:Apache License
/** * Run this main method to see the output of this quick example. * * @param args takes no args//from w w w .j av a 2s. c om * @throws UnknownHostException if it cannot connect to a MongoDB instance at localhost:27017 */ public void update(BasicDBObject doc, OutputCollector collector, Tuple input) throws UnknownHostException { // connect to the local database server MongoCredential credential = MongoCredential.createMongoCRCredential("superuser", "admin", "12345678".toCharArray()); try { MongoClient mongoClient = new MongoClient(new ServerAddress("172.16.1.171", 27017), Arrays.asList(credential)); // MongoClient mongoClient = new MongoClient("172.16.1.171",27017); /* // Authenticate - optional MongoCredential credential = MongoCredential.createMongoCRCredential(userName, database, password); MongoClient mongoClient = new MongoClient(new ServerAddress(), Arrays.asList(credential)); */ // get handle to "mydb" DB db = mongoClient.getDB("UCAPBatchTest"); // get a collection object to work with DBCollection coll = db.getCollection("Queries1"); // DBCollection status = db.getCollection("statustest1"); //DBCollection coll1 = db.getCollection("queryaudittest1"); // drop all the data in it //coll.drop(); //status.drop(); //coll1.drop(); /* status.insert(new BasicDBObject().append("queryStatus", "Open").append("QueryStatusID","1")); status.insert(new BasicDBObject().append("queryStatus", "Answered").append("QueryStatusID","2")); status.insert(new BasicDBObject().append("queryStatus", "Closed").append("QueryStatusID","3")); status.insert(new BasicDBObject().append("queryStatus", "Cancelled").append("QueryStatusID","4")); */ // make a document and insert it int count = 0; DateTimeFormatter formatter = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss"); try { //.equals("Open")?"1":(splitValue[5].equals("Answered")?"2":"3") BasicDBObject searchQuery = new BasicDBObject().append("queryRepeatKey", (String) doc.get("queryRepeatKey")); BasicDBObject newDocument = new BasicDBObject(); DBCursor cursor = coll.find(searchQuery); //DBObject result = cursor.next(); if (cursor.hasNext()) { DBObject result = cursor.next(); String queryValue = (String) result.get("queryValue"); String queryStatusID = (String) result.get("queryStatusID"); String queryResponse = (String) result.get("queryResponse"); String queryResolvedTimeStamp = (String) result.get("queryResolvedTimeStamp"); String queryAnsweredTimeStamp = (String) result.get("queryAnsweredTimeStamp"); String queryCreatedTimeStamp = (String) result.get("queryCreatedTimeStamp"); if (doc.get("queryValue").equals("\\N")) { doc.append("queryValue", queryValue); } if (doc.get("queryStatusID").equals("\\N")) { doc.append("queryStatusID", queryStatusID); } if (doc.get("queryResponse").equals("\\N")) { doc.append("queryResponse", queryResponse); } if (doc.get("queryResolvedTimeStamp").equals("\\N")) { doc.append("queryResolvedTimeStamp", queryResolvedTimeStamp); } if (doc.get("queryAnsweredTimeStamp").equals("\\N")) { doc.append("queryAnsweredTimeStamp", queryAnsweredTimeStamp); } doc.append("queryCreatedTimeStamp", queryCreatedTimeStamp); } if (doc.get("queryStatusID").equals("Open")) doc.append("queryCreatedTimeStamp", doc.get("queryCreatedTimeStamp")); //System.out.println(count); newDocument.append("$set", doc); try { coll.update(searchQuery, newDocument, true, true); } catch (MongoException me) { collector.fail(input); } // collector.ack(input); //coll.insert(doc); } catch (Exception e) { System.err.println("CSV file cannot be read : " + e); } //System.out.println(count); // lets get all the documents in the collection and print them out /*DBCursor cursor = coll1.find(); try { while (cursor.hasNext()) { System.out.println(cursor.next()); } } finally { cursor.close(); }*/ /* // now use a query to get 1 document out BasicDBObject query = new BasicDBObject("i", 71); cursor = coll.find(query); try { while (cursor.hasNext()) { System.out.println(cursor.next()); } } finally { cursor.close(); }*/ // release resources //db.dropDatabase(); mongoClient.close(); } catch (UnknownHostException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
From source file:com.wordnik.system.mongodb.Analyzer.java
License:Open Source License
protected void run() { long startTime = System.currentTimeMillis(); // decide what collections to process selectCollections();// w w w. jav a 2 s .c o m // create any re-mappings Map<String, String> collectionMappings = new HashMap<String, String>(); Map<String, String> databaseMappings = new HashMap<String, String>(); createMappings(DATABASE_MAPPING_STRING, COLLECTION_MAPPING_STRING, databaseMappings, collectionMappings); try { File[] files = new File(INPUT_DIR).listFiles(); if (files != null) { List<File> filesToProcess = new ArrayList<File>(); for (File file : files) { if (file.getName().indexOf(".bson") > 0) { filesToProcess.add(file); } } long operationsRead = 0; long operationsSkipped = 0; long lastOutput = System.currentTimeMillis(); for (File file : filesToProcess) { System.out.println("analyzing file " + file.getName()); BufferedInputStream inputStream = null; try { if (file.getName().endsWith(".gz")) { InputStream is = new GZIPInputStream(new FileInputStream(file)); inputStream = new BufferedInputStream(is); } else { inputStream = new BufferedInputStream(new FileInputStream(file)); } BSONDecoder decoder = new DefaultDBDecoder(); while (true) { if (inputStream.available() == 0) { break; } BSONObject obj = decoder.readObject(inputStream); if (obj == null) { break; } BasicDBObject dbo = new BasicDBObject((BasicBSONObject) obj); BSONTimestamp operationTimestamp = (BSONTimestamp) dbo.get("ts"); String namespace = dbo.getString("ns"); processRecord(dbo); operationsRead++; long durationSinceLastOutput = System.currentTimeMillis() - lastOutput; if (durationSinceLastOutput > REPORT_INTERVAL) { report(operationsRead, System.currentTimeMillis() - startTime); lastOutput = System.currentTimeMillis(); } } } catch (Exception ex) { ex.printStackTrace(); } } } } catch (Exception e) { e.printStackTrace(); } report(0, System.currentTimeMillis() - startTime); }
From source file:com.wordnik.system.mongodb.ReplayUtil.java
License:Open Source License
protected void run() { long startTime = System.currentTimeMillis(); // decide what collections to process selectCollections();/*from www . j a v a 2s . c o m*/ OplogReplayWriter util = new OplogReplayWriter(); // create any re-mappings Map<String, String> collectionMappings = new HashMap<String, String>(); Map<String, String> databaseMappings = new HashMap<String, String>(); createMappings(DATABASE_MAPPING_STRING, COLLECTION_MAPPING_STRING, databaseMappings, collectionMappings); // configure the writer util.setCollectionMappings(collectionMappings); util.setDatabaseMappings(databaseMappings); util.setDestinationDatabaseUsername(DEST_DATABASE_USER_NAME); util.setDestinationDatabasePassword(DEST_DATABASE_PASSWORD); util.setDestinationDatabaseHost(DEST_DATABASE_HOST); try { File[] files = new File(INPUT_DIR).listFiles(); if (files != null) { List<File> filesToProcess = new ArrayList<File>(); for (File file : files) { if (file.getName().indexOf(".bson") > 0) { filesToProcess.add(file); } } long operationsRead = 0; long operationsSkipped = 0; long lastOutput = System.currentTimeMillis(); for (File file : filesToProcess) { System.out.println("replaying file " + file.getName()); BufferedInputStream inputStream = null; try { if (file.getName().endsWith(".gz")) { InputStream is = new GZIPInputStream(new FileInputStream(file)); inputStream = new BufferedInputStream(is); } else { inputStream = new BufferedInputStream(new FileInputStream(file)); } BSONDecoder decoder = new DefaultDBDecoder(); while (true) { if (inputStream.available() == 0) { break; } BSONObject obj = decoder.readObject(inputStream); if (obj == null) { break; } BasicDBObject dbo = new BasicDBObject((BasicBSONObject) obj); BSONTimestamp operationTimestamp = (BSONTimestamp) dbo.get("ts"); String namespace = dbo.getString("ns"); String collection = util.getUnmappedCollectionFromNamespace(namespace); boolean shouldProcess = shouldProcessRecord(collection, operationTimestamp); if (collection != null && shouldProcess) { util.processRecord(dbo); operationsRead++; } else { operationsSkipped++; } long durationSinceLastOutput = System.currentTimeMillis() - lastOutput; if (durationSinceLastOutput > REPORT_INTERVAL) { report(util.getInsertCount(), util.getUpdateCount(), util.getDeleteCount(), operationsRead, operationsSkipped, System.currentTimeMillis() - startTime); lastOutput = System.currentTimeMillis(); } } } catch (Exception ex) { ex.printStackTrace(); } } } } catch (Exception e) { e.printStackTrace(); } }
From source file:com.zdy.statistics.analysis.shop.AnalysisShop.java
public int shopSell() throws UnknownHostException { DB db = MongoDBConnector.getDB();/*from w ww . j a va2 s . com*/ DBCollection collection = db.getCollection("server"); BasicDBObject query = new BasicDBObject(); query.put("message.type", "obtain_prop"); query.put("message.event", 1); java.util.Date now = new java.util.Date(); String gtTime = DateTimeUtil.dateCalculate(now, -1) + " 00:00:00"; String ltTime = DateTimeUtil.dateCalculate(now, -1) + " 23:59:59"; query.put("message.opera_time", new BasicDBObject("$gte", gtTime).append("$lte", ltTime)); System.out.println(query); DBCursor cur = collection.find(query); int count = 0; while (cur.hasNext()) { BasicDBObject dbObject = (BasicDBObject) cur.next(); DBObject message = (DBObject) dbObject.get("message"); count += ((int) message.get("count")); } return count; }
From source file:com.zjy.mongo.splitter.MongoCollectionSplitter.java
License:Apache License
/** * Contacts the config server and builds a map of each shard's name to its * host(s) by examining config.shards./* w w w .j a va2 s . c o m*/ * @return a Map of shard name onto shard hostnames */ protected Map<String, String> getShardsMap() { DBCursor cur = null; HashMap<String, String> shardsMap = new HashMap<String, String>(); DB configDB = null; try { configDB = getConfigDB(); DBCollection shardsCollection = configDB.getCollection("shards"); cur = shardsCollection.find(); while (cur.hasNext()) { final BasicDBObject row = (BasicDBObject) cur.next(); String host = row.getString("host"); // for replica sets host will look like: "setname/localhost:20003,localhost:20004" int slashIndex = host.indexOf('/'); if (slashIndex > 0) { host = host.substring(slashIndex + 1); } shardsMap.put((String) row.get("_id"), host); } } finally { if (cur != null) { cur.close(); } } return shardsMap; }
From source file:com.zjy.mongo.splitter.ShardChunkMongoSplitter.java
License:Apache License
@Override public List<InputSplit> calculateSplits() throws SplitFailedException { boolean targetShards = MongoConfigUtil.canReadSplitsFromShards(getConfiguration()); DB configDB = getConfigDB();//from www . j a v a 2 s.c o m DBCollection chunksCollection = configDB.getCollection("chunks"); MongoClientURI inputURI = MongoConfigUtil.getInputURI(getConfiguration()); String inputNS = inputURI.getDatabase() + "." + inputURI.getCollection(); DBCursor cur = chunksCollection.find(new BasicDBObject("ns", inputNS)); int numChunks = 0; Map<String, String> shardsMap = null; if (targetShards) { try { shardsMap = getShardsMap(); } catch (Exception e) { //Something went wrong when trying to //read the shards data from the config server, //so abort the splitting throw new SplitFailedException("Couldn't get shards information from config server", e); } } List<String> mongosHostNames = MongoConfigUtil.getInputMongosHosts(getConfiguration()); if (targetShards && mongosHostNames.size() > 0) { throw new SplitFailedException( "Setting both mongo.input.split.read_from_shards and mongo.input.mongos_hosts" + " does not make sense. "); } if (mongosHostNames.size() > 0) { LOG.info("Using multiple mongos instances (round robin) for reading input."); } Map<String, LinkedList<InputSplit>> shardToSplits = new HashMap<String, LinkedList<InputSplit>>(); try { while (cur.hasNext()) { final BasicDBObject row = (BasicDBObject) cur.next(); BasicDBObject chunkLowerBound = (BasicDBObject) row.get("min"); BasicDBObject chunkUpperBound = (BasicDBObject) row.get("max"); MongoInputSplit chunkSplit = createSplitFromBounds(chunkLowerBound, chunkUpperBound); chunkSplit.setInputURI(inputURI); String shard = (String) row.get("shard"); if (targetShards) { //The job is configured to target shards, so replace the //mongos hostname with the host of the shard's servers String shardHosts = shardsMap.get(shard); if (shardHosts == null) { throw new SplitFailedException("Couldn't find shard ID: " + shard + " in config.shards."); } MongoClientURI newURI = rewriteURI(inputURI, shardHosts); chunkSplit.setInputURI(newURI); } else if (mongosHostNames.size() > 0) { //Multiple mongos hosts are specified, so //choose a host name in round-robin fashion //and rewrite the URI using that hostname. //This evenly distributes the load to avoid //pegging a single mongos instance. String roundRobinHost = mongosHostNames.get(numChunks % mongosHostNames.size()); MongoClientURI newURI = rewriteURI(inputURI, roundRobinHost); chunkSplit.setInputURI(newURI); } LinkedList<InputSplit> shardList = shardToSplits.get(shard); if (shardList == null) { shardList = new LinkedList<InputSplit>(); shardToSplits.put(shard, shardList); } chunkSplit.setKeyField(MongoConfigUtil.getInputKey(getConfiguration())); shardList.add(chunkSplit); numChunks++; } } finally { MongoConfigUtil.close(configDB.getMongo()); } final List<InputSplit> splits = new ArrayList<InputSplit>(numChunks); int splitIndex = 0; while (splitIndex < numChunks) { Set<String> shardSplitsToRemove = new HashSet<String>(); for (Entry<String, LinkedList<InputSplit>> shardSplits : shardToSplits.entrySet()) { LinkedList<InputSplit> shardSplitsList = shardSplits.getValue(); InputSplit split = shardSplitsList.pop(); splits.add(splitIndex, split); splitIndex++; if (shardSplitsList.isEmpty()) { shardSplitsToRemove.add(shardSplits.getKey()); } } for (String shardName : shardSplitsToRemove) { shardToSplits.remove(shardName); } } return splits; }