Example usage for com.mongodb ServerAddress getHost

List of usage examples for com.mongodb ServerAddress getHost

Introduction

In this page you can find the example usage for com.mongodb ServerAddress getHost.

Prototype

public String getHost() 

Source Link

Document

Gets the hostname

Usage

From source file:org.apache.drill.exec.store.mongo.MongoGroupScan.java

License:Apache License

@Override
public List<EndpointAffinity> getOperatorAffinity() {
    watch.reset();/*from  w  w  w  .  j  a v a 2s .  c  o m*/
    watch.start();

    Map<String, DrillbitEndpoint> endpointMap = Maps.newHashMap();
    for (DrillbitEndpoint endpoint : storagePlugin.getContext().getBits()) {
        endpointMap.put(endpoint.getAddress(), endpoint);
        logger.debug("Endpoint address: {}", endpoint.getAddress());
    }

    Map<DrillbitEndpoint, EndpointAffinity> affinityMap = Maps.newHashMap();
    // As of now, considering only the first replica, though there may be
    // multiple replicas for each chunk.
    for (Set<ServerAddress> addressList : chunksMapping.values()) {
        // Each replica can be on multiple machines, take the first one, which
        // meets affinity.
        for (ServerAddress address : addressList) {
            DrillbitEndpoint ep = endpointMap.get(address.getHost());
            if (ep != null) {
                EndpointAffinity affinity = affinityMap.get(ep);
                if (affinity == null) {
                    affinityMap.put(ep, new EndpointAffinity(ep, 1));
                } else {
                    affinity.addAffinity(1);
                }
                break;
            }
        }
    }
    logger.debug("Took {} s to get operator affinity", watch.elapsed(TimeUnit.NANOSECONDS) / 1000);
    logger.debug("Affined drillbits : " + affinityMap.values());
    return Lists.newArrayList(affinityMap.values());
}

From source file:org.apache.hadoop.contrib.mongoreduce.MongoInputFormat.java

License:Apache License

public static String[] hostsForShard(String shardName, boolean primaryOk)
        throws UnknownHostException, MongoException {

    ArrayList<String> hosts = new ArrayList<String>();

    String[] parts = shardName.split("/");
    if (parts.length == 1) { // no replicas
        hosts.add(shardName);/*from  w ww  .j  av a  2s .c  o m*/
    } else { // replicas

        // get first or only host listed
        String host = parts[1].split(",")[0];
        Mongo h = new Mongo(host);
        List<ServerAddress> addresses = h.getServerAddressList();
        h.close();
        h = null;

        // only one node in replica set ... - use it
        if (addresses.size() == 1) {
            ServerAddress addr = addresses.get(0);
            hosts.add(addr.getHost() + ":" + Integer.toString(addr.getPort()));
        }

        else {
            for (ServerAddress addr : addresses) {

                // use secondaries and primaries
                if (primaryOk) {
                    hosts.add(addr.getHost() + ":" + Integer.toString(addr.getPort()));
                }

                // only use secondaries
                else {
                    String haddr = addr.getHost() + ":" + Integer.toString(addr.getPort());
                    h = new Mongo(haddr);
                    if (!(Boolean) h.getDB(h.getDatabaseNames().get(0)).command(cmd).get("ismaster")) {
                        hosts.add(haddr);
                    }
                }
            }
        }
    }

    return hosts.toArray(new String[0]);
}

From source file:org.apache.logging.log4j.nosql.appender.mongodb.MongoDbProvider.java

License:Apache License

/**
 * Factory method for creating a MongoDB provider within the plugin manager.
 *
 * @param collectionName The name of the MongoDB collection to which log events should be written.
 * @param writeConcernConstant The {@link WriteConcern} constant to control writing details, defaults to
 *                             {@link WriteConcern#ACKNOWLEDGED}.
 * @param writeConcernConstantClassName The name of a class containing the aforementioned static WriteConcern
 *                                      constant. Defaults to {@link WriteConcern}.
 * @param databaseName The name of the MongoDB database containing the collection to which log events should be
 *                     written. Mutually exclusive with {@code factoryClassName&factoryMethodName!=null}.
 * @param server The host name of the MongoDB server, defaults to localhost and mutually exclusive with
 *               {@code factoryClassName&factoryMethodName!=null}.
 * @param port The port the MongoDB server is listening on, defaults to the default MongoDB port and mutually
 *             exclusive with {@code factoryClassName&factoryMethodName!=null}.
 * @param userName The username to authenticate against the MongoDB server with.
 * @param password The password to authenticate against the MongoDB server with.
 * @param factoryClassName A fully qualified class name containing a static factory method capable of returning a
 *                         {@link DB} or a {@link MongoClient}.
 * @param factoryMethodName The name of the public static factory method belonging to the aforementioned factory
 *                          class./*from   w w w .  j ava2s  .c  om*/
 * @return a new MongoDB provider.
 */
@PluginFactory
public static MongoDbProvider createNoSqlProvider(
        @PluginAttribute("collectionName") final String collectionName,
        @PluginAttribute("writeConcernConstant") final String writeConcernConstant,
        @PluginAttribute("writeConcernConstantClass") final String writeConcernConstantClassName,
        @PluginAttribute("databaseName") final String databaseName,
        @PluginAttribute("server") final String server, @PluginAttribute("port") final String port,
        @PluginAttribute("userName") final String userName,
        @PluginAttribute(value = "password", sensitive = true) final String password,
        @PluginAttribute("factoryClassName") final String factoryClassName,
        @PluginAttribute("factoryMethodName") final String factoryMethodName) {
    DB database;
    String description;
    if (Strings.isNotEmpty(factoryClassName) && Strings.isNotEmpty(factoryMethodName)) {
        try {
            final Class<?> factoryClass = Loader.loadClass(factoryClassName);
            final Method method = factoryClass.getMethod(factoryMethodName);
            final Object object = method.invoke(null);

            if (object instanceof DB) {
                database = (DB) object;
            } else if (object instanceof MongoClient) {
                if (Strings.isNotEmpty(databaseName)) {
                    database = ((MongoClient) object).getDB(databaseName);
                } else {
                    LOGGER.error("The factory method [{}.{}()] returned a MongoClient so the database name is "
                            + "required.", factoryClassName, factoryMethodName);
                    return null;
                }
            } else if (object == null) {
                LOGGER.error("The factory method [{}.{}()] returned null.", factoryClassName,
                        factoryMethodName);
                return null;
            } else {
                LOGGER.error("The factory method [{}.{}()] returned an unsupported type [{}].",
                        factoryClassName, factoryMethodName, object.getClass().getName());
                return null;
            }

            description = "database=" + database.getName();
            final List<ServerAddress> addresses = database.getMongo().getAllAddress();
            if (addresses.size() == 1) {
                description += ", server=" + addresses.get(0).getHost() + ", port="
                        + addresses.get(0).getPort();
            } else {
                description += ", servers=[";
                for (final ServerAddress address : addresses) {
                    description += " { " + address.getHost() + ", " + address.getPort() + " } ";
                }
                description += "]";
            }
        } catch (final ClassNotFoundException e) {
            LOGGER.error("The factory class [{}] could not be loaded.", factoryClassName, e);
            return null;
        } catch (final NoSuchMethodException e) {
            LOGGER.error("The factory class [{}] does not have a no-arg method named [{}].", factoryClassName,
                    factoryMethodName, e);
            return null;
        } catch (final Exception e) {
            LOGGER.error("The factory method [{}.{}()] could not be invoked.", factoryClassName,
                    factoryMethodName, e);
            return null;
        }
    } else if (Strings.isNotEmpty(databaseName)) {
        List<MongoCredential> credentials = new ArrayList<>();
        description = "database=" + databaseName;
        if (Strings.isNotEmpty(userName) && Strings.isNotEmpty(password)) {
            description += ", username=" + userName + ", passwordHash="
                    + NameUtil.md5(password + MongoDbProvider.class.getName());
            credentials.add(MongoCredential.createCredential(userName, databaseName, password.toCharArray()));
        }
        try {
            if (Strings.isNotEmpty(server)) {
                final int portInt = AbstractAppender.parseInt(port, 0);
                description += ", server=" + server;
                if (portInt > 0) {
                    description += ", port=" + portInt;
                    database = new MongoClient(new ServerAddress(server, portInt), credentials)
                            .getDB(databaseName);
                } else {
                    database = new MongoClient(new ServerAddress(server), credentials).getDB(databaseName);
                }
            } else {
                database = new MongoClient(new ServerAddress(), credentials).getDB(databaseName);
            }
        } catch (final Exception e) {
            LOGGER.error("Failed to obtain a database instance from the MongoClient at server [{}] and "
                    + "port [{}].", server, port);
            return null;
        }
    } else {
        LOGGER.error("No factory method was provided so the database name is required.");
        return null;
    }

    try {
        database.getCollectionNames(); // Check if the database actually requires authentication
    } catch (final Exception e) {
        LOGGER.error(
                "The database is not up, or you are not authenticated, try supplying a username and password to the MongoDB provider.",
                e);
        return null;
    }

    WriteConcern writeConcern = toWriteConcern(writeConcernConstant, writeConcernConstantClassName);

    return new MongoDbProvider(database, writeConcern, collectionName, description);
}

From source file:org.apache.rya.indexing.geoExamples.RyaMongoGeoDirectExample.java

License:Apache License

private static Configuration getConf() throws IOException {

    MongoDBIndexingConfigBuilder builder = MongoIndexingConfiguration.builder().setUseMockMongo(USE_MOCK)
            .setUseInference(USE_INFER).setAuths("U");

    if (USE_MOCK) {
        mock = EmbeddedMongoFactory.newFactory();
        MongoClient c = mock.newMongoClient();
        ServerAddress address = c.getAddress();
        String url = address.getHost();
        String port = Integer.toString(address.getPort());
        c.close();// w ww.ja va  2 s  .  co m
        builder.setMongoHost(url).setMongoPort(port);
    } else {
        // User name and password must be filled in:
        builder = builder.setMongoUser("fill this in").setMongoPassword("fill this in")
                .setMongoHost(MONGO_INSTANCE_URL).setMongoPort(MONGO_INSTANCE_PORT);
    }

    return builder.setMongoDBName(MONGO_DB).setMongoCollectionPrefix(MONGO_COLL_PREFIX)
            .setUseMongoFreetextIndex(true).setMongoFreeTextPredicates(RDFS.LABEL.stringValue()).build();

}

From source file:org.apache.rya.indexing.mongodb.AbstractMongoIndexer.java

License:Apache License

@VisibleForTesting
public void initIndexer(final Configuration conf, final MongoClient client) {
    setClient(client);//from w ww.  j ava2  s  .  c  om
    final ServerAddress address = client.getAddress();
    conf.set(MongoDBRdfConfiguration.MONGO_INSTANCE, address.getHost());
    conf.set(MongoDBRdfConfiguration.MONGO_INSTANCE_PORT, Integer.toString(address.getPort()));
    setConf(conf);
    if (!isInit) {
        init();
        isInit = true;
    }
}

From source file:org.cloudfoundry.java.test.core.MongoDbUtils.java

License:Apache License

private String getServerString(DB mongoDb, ServerAddress serverAddress) {
    String host = serverAddress.getHost();
    int port = serverAddress.getPort();
    String name = mongoDb.getName();
    return String.format("mongodb://%s:%d/%s", host, port, name);
}

From source file:org.elasticsearch.river.mongodb.MongoDBRiver.java

License:Apache License

/**
 * Execute actions to (re-)start the river on this node.
 *//*from  w  w w .  j av  a  2 s  .  c om*/
void internalStartRiver() {
    if (startupThread != null) {
        // Already processing a request to start up the river, so ignore this call.
        return;
    }
    // Update the status: we're busy starting now.
    context.setStatus(Status.STARTING);

    // ES only starts one River at a time, so we start the river using a new thread so that
    // we don't block the startup of other rivers
    Runnable startupRunnable = new Runnable() {
        @Override
        public void run() {
            // Log some info about what we're about to do now.
            logger.info("Starting river {}", riverName.getName());
            logger.info(
                    "MongoDB options: secondaryreadpreference [{}], drop_collection [{}], include_collection [{}], throttlesize [{}], gridfs [{}], filter [{}], db [{}], collection [{}], script [{}], indexing to [{}]/[{}]",
                    definition.isMongoSecondaryReadPreference(), definition.isDropCollection(),
                    definition.getIncludeCollection(), definition.getThrottleSize(), definition.isMongoGridFS(),
                    definition.getMongoOplogFilter(), definition.getMongoDb(), definition.getMongoCollection(),
                    definition.getScript(), definition.getIndexName(), definition.getTypeName());

            for (ServerAddress server : definition.getMongoServers()) {
                logger.debug("Using MongoDB server(s): host [{}], port [{}]", server.getHost(),
                        server.getPort());
            }

            try {
                // Create the index if it does not exist
                try {
                    if (!esClient.admin().indices().prepareExists(definition.getIndexName()).get().isExists()) {
                        esClient.admin().indices().prepareCreate(definition.getIndexName()).get();
                    }
                } catch (Exception e) {
                    if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
                        // that's fine
                    } else if (ExceptionsHelper.unwrapCause(e) instanceof ClusterBlockException) {
                        // ok, not recovered yet..., lets start indexing and hope we
                        // recover by the first bulk
                        // TODO: a smarter logic can be to register for cluster
                        // event
                        // listener here, and only start sampling when the
                        // block is removed...
                    } else {
                        logger.error("failed to create index [{}], disabling river...", e,
                                definition.getIndexName());
                        return;
                    }
                }

                // GridFS
                if (definition.isMongoGridFS()) {
                    try {
                        if (logger.isDebugEnabled()) {
                            logger.debug("Set explicit attachment mapping.");
                        }
                        esClient.admin().indices().preparePutMapping(definition.getIndexName())
                                .setType(definition.getTypeName()).setSource(getGridFSMapping()).get();
                    } catch (Exception e) {
                        logger.warn("Failed to set explicit mapping (attachment): {}", e);
                    }
                }

                // Replicate data roughly the same way MongoDB does
                // https://groups.google.com/d/msg/mongodb-user/sOKlhD_E2ns/SvngoUHXtcAJ
                //
                // Steps:
                // Get oplog timestamp
                // Do the initial import
                // Sync from the oplog of each shard starting at timestamp
                //
                // Notes
                // Primary difference between river sync and MongoDB replica sync is that we ignore chunk migrations
                // We only need to know about CRUD commands. If data moves from one MongoDB shard to another
                // then we do not need to let ElasticSearch know that.
                MongoClient mongoClusterClient = mongoClientService.getMongoClusterClient(definition);
                MongoConfigProvider configProvider = new MongoConfigProvider(mongoClientService, definition);
                MongoConfig config;
                while (true) {
                    try {
                        config = configProvider.call();
                        break;
                    } catch (MongoSocketException | MongoTimeoutException e) {
                        Thread.sleep(MONGODB_RETRY_ERROR_DELAY_MS);
                    }
                }

                Timestamp startTimestamp = null;
                if (definition.getInitialTimestamp() != null) {
                    startTimestamp = definition.getInitialTimestamp();
                } else if (getLastProcessedTimestamp() != null) {
                    startTimestamp = getLastProcessedTimestamp();
                } else {
                    for (Shard shard : config.getShards()) {
                        if (startTimestamp == null
                                || shard.getLatestOplogTimestamp().compareTo(startTimestamp) < 1) {
                            startTimestamp = shard.getLatestOplogTimestamp();
                        }
                    }
                }

                // All good, mark the context as "running" now: this
                // status value is used as termination condition for the threads we're going to start now.
                context.setStatus(Status.RUNNING);

                indexerThread = EsExecutors
                        .daemonThreadFactory(settings.globalSettings(),
                                "mongodb_river_indexer:" + definition.getIndexName())
                        .newThread(
                                new Indexer(MongoDBRiver.this, definition, context, esClient, scriptService));
                indexerThread.start();

                // Import in main thread to block tailing the oplog
                Timestamp slurperStartTimestamp = getLastProcessedTimestamp();
                if (slurperStartTimestamp != null) {
                    logger.trace("Initial import already completed.");
                    // Start from where we last left of
                } else if (definition.isSkipInitialImport() || definition.getInitialTimestamp() != null) {
                    logger.info("Skip initial import from collection {}", definition.getMongoCollection());
                    // Start from the point requested
                    slurperStartTimestamp = definition.getInitialTimestamp();
                } else {
                    // Determine the timestamp to be used for all documents loaded as "initial import".
                    Timestamp initialImportTimestamp = null;
                    for (Shard shard : config.getShards()) {
                        if (initialImportTimestamp == null
                                || shard.getLatestOplogTimestamp().compareTo(initialImportTimestamp) < 1) {
                            initialImportTimestamp = shard.getLatestOplogTimestamp();
                        }
                    }
                    CollectionSlurper importer = new CollectionSlurper(mongoClusterClient, definition, context,
                            esClient);
                    importer.importInitial(initialImportTimestamp);
                    // Start slurping from the shard's oplog time
                    slurperStartTimestamp = null;
                }

                // Tail the oplog
                // NB: In a non-mongos environment the config will report a single shard, with the servers used for the connection as the replicas.
                for (Shard shard : config.getShards()) {
                    Timestamp shardSlurperStartTimestamp = slurperStartTimestamp != null ? slurperStartTimestamp
                            : shard.getLatestOplogTimestamp();
                    MongoClient mongoClient = mongoClientService.getMongoShardClient(definition,
                            shard.getReplicas());
                    Thread tailerThread = EsExecutors
                            .daemonThreadFactory(settings.globalSettings(),
                                    "mongodb_river_slurper_" + shard.getName() + ":"
                                            + definition.getIndexName())
                            .newThread(new OplogSlurper(shardSlurperStartTimestamp, mongoClusterClient,
                                    mongoClient, definition, context, esClient));
                    tailerThreads.add(tailerThread);
                }

                for (Thread thread : tailerThreads) {
                    thread.start();
                }
                logger.info("Started river {}", riverName.getName());
            } catch (Throwable t) {
                logger.warn("Failed to start river {}", t, riverName.getName());
                MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.START_FAILED);
                context.setStatus(Status.START_FAILED);
            } finally {
                // Startup is fully done
                startupThread = null;
            }
        }
    };
    startupThread = EsExecutors.daemonThreadFactory(settings.globalSettings(),
            "mongodb_river_startup:" + definition.getIndexName()).newThread(startupRunnable);
    startupThread.start();
}

From source file:org.grails.datastore.gorm.mongo.bean.factory.GMongoFactoryBean.java

License:Apache License

public void afterPropertiesSet() throws UnknownHostException {
    // apply defaults - convenient when used to configure for tests
    // in an application context
    if (mongo != null) {
        return;//from  w  ww .j a v  a2s. co  m
    }

    ServerAddress defaultOptions = new ServerAddress();
    if (mongoOptions == null)
        mongoOptions = new MongoOptions();
    if (replicaPair != null) {
        if (replicaPair.size() < 2) {
            throw new DatastoreConfigurationException("A replica pair must have two server entries");
        }
        mongo = new GMongo(replicaPair.get(0), replicaPair.get(1), mongoOptions);
    } else if (replicaSetSeeds != null) {
        mongo = new GMongo(replicaSetSeeds, mongoOptions);
    } else {
        String mongoHost = host != null ? host : defaultOptions.getHost();
        if (port != null) {
            mongo = new GMongo(new ServerAddress(mongoHost, port), mongoOptions);
        } else {
            mongo = new GMongo(mongoHost, mongoOptions);
        }
    }
}

From source file:org.grails.datastore.gorm.mongo.bean.factory.MongoClientFactoryBean.java

License:Apache License

public void afterPropertiesSet() throws UnknownHostException {
    // apply defaults - convenient when used to configure for tests
    // in an application context
    if (mongo != null) {
        return;//from w ww.j  a v  a 2s .c om
    }

    ServerAddress defaultOptions = new ServerAddress();
    List<MongoCredential> credentials = new ArrayList<MongoCredential>();
    if (mongoOptions == null) {
        MongoClientOptions.Builder builder = MongoClientOptions.builder();
        builder.codecRegistry(CodecRegistries.fromRegistries(codecRegistries));
        mongoOptions = builder.build();
    }
    // If username/pw exists and we are not authenticated, authenticate now
    if (username != null && password != null) {
        credentials.add(MongoCredential.createCredential(username, database, password.toCharArray()));
    }

    if (replicaPair != null) {
        if (replicaPair.size() < 2) {
            throw new DatastoreConfigurationException("A replica pair must have two server entries");
        }
        mongo = new MongoClient(replicaPair, credentials, mongoOptions);
    } else if (replicaSetSeeds != null) {
        mongo = new MongoClient(replicaSetSeeds, credentials, mongoOptions);
    } else if (clientURI != null) {
        mongo = new MongoClient(clientURI);
    } else if (connectionString != null) {
        mongo = new MongoClient(new MongoClientURI(connectionString));
    } else {
        String mongoHost = host != null ? host : defaultOptions.getHost();
        if (port != null) {
            mongo = new MongoClient(new ServerAddress(mongoHost, port), credentials, mongoOptions);
        } else {
            mongo = new MongoClient(new ServerAddress(host), credentials, mongoOptions);
        }
    }

}

From source file:org.grails.datastore.mapping.mongo.MongoDatastore.java

License:Apache License

public void afterPropertiesSet() throws Exception {
    if (mongo == null) {
        ServerAddress defaults = new ServerAddress();
        MongoFactoryBean dbFactory = new MongoFactoryBean();
        dbFactory.setHost(read(String.class, MONGO_HOST, connectionDetails, defaults.getHost()));
        dbFactory.setPort(read(Integer.class, MONGO_PORT, connectionDetails, defaults.getPort()));
        if (mongoOptions != null) {
            dbFactory.setMongoOptions(mongoOptions);
        }/*from   ww  w. j a  va2s  .c o m*/

        dbFactory.afterPropertiesSet();
        mongo = dbFactory.getObject();
    }

    for (PersistentEntity entity : mappingContext.getPersistentEntities()) {
        // Only create Mongo templates for entities that are mapped with Mongo
        if (!entity.isExternal()) {
            createMongoTemplate(entity, mongo);
        }
    }
}