List of usage examples for com.mongodb ServerAddress toString
@Override
public String toString()
From source file:com.bosscs.spark.mongodb.extractor.MongoNativeExtractor.java
License:Apache License
/** * Calculate splits.// w w w .j av a 2 s . c o m * * @param collection the collection * @return the deep partition [ ] */ private HadoopPartition[] calculateSplits(DBCollection collection) { BasicDBList splitData = getSplitData(collection); List<ServerAddress> serverAddressList = collection.getDB().getMongo().getServerAddressList(); if (splitData == null) { Pair<BasicDBList, List<ServerAddress>> pair = getSplitDataCollectionShardEnviroment( getShards(collection), collection.getDB().getName(), collection.getName()); splitData = pair.left; serverAddressList = pair.right; } Object lastKey = null; // Lower boundary of the first min split List<String> stringHosts = new ArrayList<>(); for (ServerAddress serverAddress : serverAddressList) { stringHosts.add(serverAddress.toString()); } int i = 0; MongoPartition[] partitions = new MongoPartition[splitData.size() + 1]; for (Object aSplitData : splitData) { BasicDBObject currentKey = (BasicDBObject) aSplitData; Object currentO = currentKey.get(MONGO_DEFAULT_ID); partitions[i] = new MongoPartition(mongoDeepJobConfig.getRddId(), i, new TokenRange(lastKey, currentO, stringHosts), MONGO_DEFAULT_ID); lastKey = currentO; i++; } QueryBuilder queryBuilder = QueryBuilder.start(MONGO_DEFAULT_ID); queryBuilder.greaterThanEquals(lastKey); partitions[i] = new MongoPartition(0, i, new TokenRange(lastKey, null, stringHosts), MONGO_DEFAULT_ID); return partitions; }
From source file:com.edgytech.umongo.ReplSetNode.java
License:Apache License
String[] getReplicaNames() { List<ServerAddress> addrs = mongo.getServerAddressList(); String[] names = new String[addrs.size()]; int i = 0;/*from ww w.ja v a2 s . c om*/ for (ServerAddress addr : addrs) names[i++] = addr.toString(); return names; }
From source file:com.edgytech.umongo.ServerNode.java
License:Apache License
public ServerNode(ServerAddress serverAddress, MongoClientOptions opts, boolean isReplica, boolean isConfig) { setLabel(serverAddress.toString()); this.serverAddress = serverAddress; serverMongo = new MongoClient(serverAddress, opts); serverMongo.addOption(Bytes.QUERYOPTION_SLAVEOK); this.isReplica = isReplica; this.isConfig = isConfig; try {/*ww w . j a v a2 s . co m*/ xmlLoad(Resource.getXmlDir(), Resource.File.serverNode, null); } catch (Exception ex) { getLogger().log(Level.SEVERE, null, ex); } markStructured(); }
From source file:com.edgytech.umongo.ServerPanel.java
License:Apache License
@Override protected void updateComponentCustom(JPanel comp) { try {//from w ww . ja va 2 s.c om ServerNode node = getServerNode(); if (node.isConfig) { ((Menu) getBoundUnit(Item.replica)).enabled = false; } MongoClient svrMongo = node.getServerMongoClient(); ServerAddress addr = getServerNode().getServerAddress(); if (addr != null) { setStringFieldValue(Item.host, addr.toString()); setStringFieldValue(Item.address, addr.getSocketAddress().toString()); } CommandResult res = svrMongo.getDB("local").command("isMaster"); boolean master = res.getBoolean("ismaster"); String replication = MongoUtils.makeInfoString("master", master, "secondary", res.getBoolean("secondary"), "passive", res.getBoolean("passive")); setStringFieldValue(Item.replication, replication); ((Text) getBoundUnit(Item.replication)).showIcon = master; setStringFieldValue(Item.maxObjectSize, String.valueOf(svrMongo.getMaxBsonObjectSize())); // ((CmdField) getBoundUnit(Item.serverStatus)).updateFromCmd(svrMongo); // // DBObject svrStatus = ((DocField) getBoundUnit(Item.serverStatus)).getDoc(); // boolean dur = svrStatus.containsField("dur"); // ((Text)getBoundUnit(Item.journaling)).setStringValue(dur ? "On" : "Off"); // ((Text)getBoundUnit(Item.journaling)).showIcon = dur; } catch (Exception e) { UMongo.instance.showError(this.getClass().getSimpleName() + " update", e); } }
From source file:com.stratio.deep.mongodb.extractor.MongoNativeExtractor.java
License:Apache License
/** * Calculate splits./* w ww . j a va 2 s . c om*/ * * @param collection the collection * @return the deep partition [ ] */ private DeepPartition[] calculateSplits(DBCollection collection) { BasicDBList splitData = getSplitData(collection); List<ServerAddress> serverAddressList = collection.getDB().getMongo().getServerAddressList(); if (splitData == null) { Pair<BasicDBList, List<ServerAddress>> pair = getSplitDataCollectionShardEnviroment( getShards(collection), collection.getDB().getName(), collection.getName()); splitData = pair.left; serverAddressList = pair.right; } Object lastKey = null; // Lower boundary of the first min split List<String> stringHosts = new ArrayList<>(); for (ServerAddress serverAddress : serverAddressList) { stringHosts.add(serverAddress.toString()); } int i = 0; MongoPartition[] partitions = new MongoPartition[splitData.size() + 1]; for (Object aSplitData : splitData) { BasicDBObject currentKey = (BasicDBObject) aSplitData; Object currentO = currentKey.get(MONGO_DEFAULT_ID); partitions[i] = new MongoPartition(mongoDeepJobConfig.getRddId(), i, new DeepTokenRange(lastKey, currentO, stringHosts), MONGO_DEFAULT_ID); lastKey = currentO; i++; } QueryBuilder queryBuilder = QueryBuilder.start(MONGO_DEFAULT_ID); queryBuilder.greaterThanEquals(lastKey); partitions[i] = new MongoPartition(0, i, new DeepTokenRange(lastKey, null, stringHosts), MONGO_DEFAULT_ID); return partitions; }
From source file:org.apache.drill.exec.store.mongo.MongoGroupScan.java
License:Apache License
@SuppressWarnings({ "rawtypes" }) private void init() throws IOException { List<String> h = storagePluginConfig.getHosts(); List<ServerAddress> addresses = Lists.newArrayList(); for (String host : h) { addresses.add(new ServerAddress(host)); }// www. j a v a2 s. c o m MongoClient client = storagePlugin.getClient(); chunksMapping = Maps.newHashMap(); chunksInverseMapping = Maps.newLinkedHashMap(); if (isShardedCluster(client)) { MongoDatabase db = client.getDatabase(CONFIG); MongoCollection<Document> chunksCollection = db.getCollection(CHUNKS); Document filter = new Document(); filter.put(NS, this.scanSpec.getDbName() + "." + this.scanSpec.getCollectionName()); Document projection = new Document(); projection.put(SHARD, select); projection.put(MIN, select); projection.put(MAX, select); FindIterable<Document> chunkCursor = chunksCollection.find(filter).projection(projection); MongoCursor<Document> iterator = chunkCursor.iterator(); MongoCollection<Document> shardsCollection = db.getCollection(SHARDS); projection = new Document(); projection.put(HOST, select); boolean hasChunks = false; while (iterator.hasNext()) { Document chunkObj = iterator.next(); String shardName = (String) chunkObj.get(SHARD); String chunkId = (String) chunkObj.get(ID); filter = new Document(ID, shardName); FindIterable<Document> hostCursor = shardsCollection.find(filter).projection(projection); MongoCursor<Document> hostIterator = hostCursor.iterator(); while (hostIterator.hasNext()) { Document hostObj = hostIterator.next(); String hostEntry = (String) hostObj.get(HOST); String[] tagAndHost = StringUtils.split(hostEntry, '/'); String[] hosts = tagAndHost.length > 1 ? StringUtils.split(tagAndHost[1], ',') : StringUtils.split(tagAndHost[0], ','); List<String> chunkHosts = Arrays.asList(hosts); Set<ServerAddress> addressList = getPreferredHosts(storagePlugin.getClient(addresses), chunkHosts); if (addressList == null) { addressList = Sets.newHashSet(); for (String host : chunkHosts) { addressList.add(new ServerAddress(host)); } } chunksMapping.put(chunkId, addressList); ServerAddress address = addressList.iterator().next(); List<ChunkInfo> chunkList = chunksInverseMapping.get(address.getHost()); if (chunkList == null) { chunkList = Lists.newArrayList(); chunksInverseMapping.put(address.getHost(), chunkList); } List<String> chunkHostsList = new ArrayList<String>(); for (ServerAddress serverAddr : addressList) { chunkHostsList.add(serverAddr.toString()); } ChunkInfo chunkInfo = new ChunkInfo(chunkHostsList, chunkId); Document minMap = (Document) chunkObj.get(MIN); Map<String, Object> minFilters = Maps.newHashMap(); Set keySet = minMap.keySet(); for (Object keyObj : keySet) { Object object = minMap.get(keyObj); if (!(object instanceof MinKey)) { minFilters.put(keyObj.toString(), object); } } chunkInfo.setMinFilters(minFilters); Map<String, Object> maxFilters = Maps.newHashMap(); Map maxMap = (Document) chunkObj.get(MAX); keySet = maxMap.keySet(); for (Object keyObj : keySet) { Object object = maxMap.get(keyObj); if (!(object instanceof MaxKey)) { maxFilters.put(keyObj.toString(), object); } } chunkInfo.setMaxFilters(maxFilters); chunkList.add(chunkInfo); } hasChunks = true; } // In a sharded environment, if a collection doesn't have any chunks, it is considered as an // unsharded collection and it will be stored in the primary shard of that database. if (!hasChunks) { handleUnshardedCollection(getPrimaryShardInfo(client)); } } else { handleUnshardedCollection(storagePluginConfig.getHosts()); } }
From source file:org.pentaho.di.trans.steps.mongodbinput.MongoDbInput.java
License:Open Source License
@Override public boolean processRow(StepMetaInterface smi, StepDataInterface sdi) throws KettleException { try {/*from w ww .j a va 2s . c o m*/ if (meta.getExecuteForEachIncomingRow() && m_currentInputRowDrivingQuery == null) { m_currentInputRowDrivingQuery = getRow(); if (m_currentInputRowDrivingQuery == null) { // no more input, no more queries to make setOutputDone(); return false; } if (!first) { initQuery(); } } if (first) { data.outputRowMeta = new RowMeta(); meta.getFields(data.outputRowMeta, getStepname(), null, null, MongoDbInput.this); initQuery(); first = false; data.init(); } boolean hasNext = ((meta.getQueryIsPipeline() ? data.m_pipelineResult.hasNext() : data.cursor.hasNext()) && !isStopped()); if (hasNext) { DBObject nextDoc = null; Object[] row = null; if (meta.getQueryIsPipeline()) { nextDoc = data.m_pipelineResult.next(); } else { nextDoc = data.cursor.next(); } if (!meta.getQueryIsPipeline() && !m_serverDetermined) { ServerAddress s = data.cursor.getServerAddress(); if (s != null) { m_serverDetermined = true; logBasic(BaseMessages.getString(PKG, "MongoDbInput.Message.QueryPulledDataFrom", //$NON-NLS-1$ s.toString())); } } if (meta.getOutputJson() || meta.getMongoFields() == null || meta.getMongoFields().size() == 0) { String json = nextDoc.toString(); row = RowDataUtil.allocateRowData(data.outputRowMeta.size()); int index = 0; row[index++] = json; putRow(data.outputRowMeta, row); } else { Object[][] outputRows = data.mongoDocumentToKettle(nextDoc, MongoDbInput.this); // there may be more than one row if the paths contain an array // unwind for (int i = 0; i < outputRows.length; i++) { putRow(data.outputRowMeta, outputRows[i]); } } } else { if (!meta.getExecuteForEachIncomingRow()) { setOutputDone(); return false; } else { m_currentInputRowDrivingQuery = null; // finished with this row } } return true; } catch (Exception e) { if (e instanceof KettleException) { throw (KettleException) e; } else { throw new KettleException(e); //$NON-NLS-1$ } } }
From source file:org.pentaho.di.trans.steps.mongodboutput.MongoDbOutput.java
License:Open Source License
protected void doBatch() throws KettleException, MongoDbException { int retries = 0; MongoException lastEx = null;/*from ww w .j av a2 s . c om*/ while (retries <= m_writeRetries && !isStopped()) { WriteResult result = null; CommandResult cmd = null; try { if (retries == 0) { result = m_data.getCollection().insert(m_batch); cmd = result.getLastError(); if (cmd != null && !cmd.ok()) { String message = cmd.getErrorMessage(); logError( BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.MongoReported", message)); //$NON-NLS-1$ cmd.throwOnError(); } } else { // fall back to save logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.SavingIndividualDocsInCurrentBatch")); cmd = batchRetryUsingSave(retries == m_writeRetries); } } catch (MongoException me) { // avoid exception if a timeout issue occurred and it was exactly the first attempt boolean shouldNotBeAvoided = !isTimeoutException(me) && (retries == 0); if (shouldNotBeAvoided) { lastEx = me; } retries++; if (retries <= m_writeRetries) { if (shouldNotBeAvoided) { // skip logging error // however do not skip saving elements separately during next attempt to prevent losing data logError(BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.ErrorWritingToMongo", //$NON-NLS-1$ me.toString())); logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.Message.Retry", //$NON-NLS-1$ m_writeRetryDelay)); } try { Thread.sleep(m_writeRetryDelay * 1000); // CHECKSTYLE:OFF } catch (InterruptedException e) { // CHECKSTYLE:ON } } // throw new KettleException(me.getMessage(), me); } if (cmd != null) { ServerAddress s = cmd.getServerUsed(); if (s != null) { logDetailed( BaseMessages.getString(PKG, "MongoDbOutput.Messages.WroteBatchToServer", s.toString())); //$NON-NLS-1$ } } if (cmd != null && cmd.ok()) { break; } } if ((retries > m_writeRetries || isStopped()) && lastEx != null) { throw new KettleException(lastEx); } m_batch.clear(); m_batchRows.clear(); }
From source file:org.pentaho.mongo.MongoUtils.java
License:Open Source License
public static void main(String[] args) { try {/*from w ww .j a va 2 s .c o m*/ String hostPort = args[0]; String defaultPort = args[1]; Variables vars = new Variables(); List<String> repSetTags = MongoUtils.getAllTags(hostPort, defaultPort, null, vars, null); System.out.println("Number of tags: " + repSetTags.size()); //$NON-NLS-1$ for (String tag : repSetTags) { System.out.println(tag); } List<ServerAddress> repSetMembers = MongoUtils.getReplicaSetMembers(hostPort, defaultPort, null, vars, null); System.out.println("Number of replica set members: " + repSetMembers.size()); for (ServerAddress s : repSetMembers) { System.out.println(s.toString()); } } catch (Exception ex) { ex.printStackTrace(); } }