Example usage for java.net InetSocketAddress getHostName

List of usage examples for java.net InetSocketAddress getHostName

Introduction

In this page you can find the example usage for java.net InetSocketAddress getHostName.

Prototype

public final String getHostName() 

Source Link

Document

Gets the hostname .

Usage

From source file:com.cloud.bridge.io.S3CAStorBucketAdapter.java

private String castorURL(String mountedRoot, String bucket, String fileName) {
    // TODO: Replace this method with access to ScspClient's Locator,
    // or add read method that returns the body as an unread
    // InputStream for use by loadObject() and loadObjectRange().

    myClient(mountedRoot); // make sure castorNodes and castorPort initialized
    InetSocketAddress nodeAddr = _locator.locate();
    if (nodeAddr == null) {
        throw new ConfigurationException("Unable to locate CAStor node with locator " + _locator);
    }/*from w  w  w  .ja v  a  2 s.c  o m*/
    InetAddress nodeInetAddr = nodeAddr.getAddress();
    if (nodeInetAddr == null) {
        _locator.foundDead(nodeAddr);
        throw new ConfigurationException(
                "Unable to resolve CAStor node name '" + nodeAddr.getHostName() + "' to IP address");
    }
    return "http://" + nodeInetAddr.getHostAddress() + ":" + nodeAddr.getPort() + "/" + bucket + "/" + fileName
            + (_domain == null ? "" : "?domain=" + _domain);
}

From source file:com.blackducksoftware.integration.hub.jenkins.PostBuildHubScan.java

public void addProxySettingsToScanner(final IntLogger logger, final JenkinsScanExecutor scan)
        throws BDJenkinsHubPluginException, HubIntegrationException, URISyntaxException, MalformedURLException {
    final Jenkins jenkins = Jenkins.getInstance();
    if (jenkins != null) {
        final ProxyConfiguration proxyConfig = jenkins.proxy;
        if (proxyConfig != null) {

            final URL serverUrl = new URL(getHubServerInfo().getServerUrl());

            final Proxy proxy = ProxyConfiguration.createProxy(serverUrl.getHost(), proxyConfig.name,
                    proxyConfig.port, proxyConfig.noProxyHost);

            if (proxy != Proxy.NO_PROXY && proxy.address() != null) {
                final InetSocketAddress proxyAddress = (InetSocketAddress) proxy.address();
                if (StringUtils.isNotBlank(proxyAddress.getHostName()) && proxyAddress.getPort() != 0) {
                    if (StringUtils.isNotBlank(jenkins.proxy.getUserName())
                            && StringUtils.isNotBlank(jenkins.proxy.getPassword())) {
                        scan.setProxyHost(proxyAddress.getHostName());
                        scan.setProxyPort(proxyAddress.getPort());
                        scan.setProxyUsername(jenkins.proxy.getUserName());
                        scan.setProxyPassword(jenkins.proxy.getPassword());

                    } else {
                        scan.setProxyHost(proxyAddress.getHostName());
                        scan.setProxyPort(proxyAddress.getPort());
                    }/*w w w  . j a v a  2s .  c  om*/
                    if (logger != null) {
                        logger.debug("Using proxy: '" + proxyAddress.getHostName() + "' at Port: '"
                                + proxyAddress.getPort() + "'");
                    }
                }
            }
        }
    }
}

From source file:com.blackducksoftware.integration.hub.jenkins.PostBuildHubScan.java

public void addProxySettingsToCLIInstaller(final IntLogger logger, final CLIRemoteInstall remoteCLIInstall)
        throws BDJenkinsHubPluginException, HubIntegrationException, URISyntaxException, MalformedURLException {
    final Jenkins jenkins = Jenkins.getInstance();
    if (jenkins != null) {
        final ProxyConfiguration proxyConfig = jenkins.proxy;
        if (proxyConfig != null) {

            final URL serverUrl = new URL(getHubServerInfo().getServerUrl());

            final Proxy proxy = ProxyConfiguration.createProxy(serverUrl.getHost(), proxyConfig.name,
                    proxyConfig.port, proxyConfig.noProxyHost);

            if (proxy != Proxy.NO_PROXY && proxy.address() != null) {
                final InetSocketAddress proxyAddress = (InetSocketAddress) proxy.address();
                if (StringUtils.isNotBlank(proxyAddress.getHostName()) && proxyAddress.getPort() != 0) {
                    if (StringUtils.isNotBlank(jenkins.proxy.getUserName())
                            && StringUtils.isNotBlank(jenkins.proxy.getPassword())) {
                        remoteCLIInstall.setProxyHost(proxyAddress.getHostName());
                        remoteCLIInstall.setProxyPort(proxyAddress.getPort());
                        remoteCLIInstall.setProxyUserName(jenkins.proxy.getUserName());
                        remoteCLIInstall.setProxyPassword(jenkins.proxy.getPassword());

                    } else {
                        remoteCLIInstall.setProxyHost(proxyAddress.getHostName());
                        remoteCLIInstall.setProxyPort(proxyAddress.getPort());
                    }//from   ww  w .  jav a 2 s  .com
                    if (logger != null) {
                        logger.debug("Using proxy: '" + proxyAddress.getHostName() + "' at Port: '"
                                + proxyAddress.getPort() + "'");
                    }
                }
            }
        }
    }
}

From source file:org.apache.hadoop.ha.DummyHAService.java

private InetSocketAddress startAndGetRPCServerAddress(InetSocketAddress serverAddress) {
    Configuration conf = new Configuration();

    try {/*w w  w.j av  a  2  s. co  m*/
        RPC.setProtocolEngine(conf, HAServiceProtocolPB.class, ProtobufRpcEngine.class);
        HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = new HAServiceProtocolServerSideTranslatorPB(
                new MockHAProtocolImpl());
        BlockingService haPbService = HAServiceProtocolService
                .newReflectiveBlockingService(haServiceProtocolXlator);

        Server server = new RPC.Builder(conf).setProtocol(HAServiceProtocolPB.class).setInstance(haPbService)
                .setBindAddress(serverAddress.getHostName()).setPort(serverAddress.getPort()).build();
        server.start();
        return NetUtils.getConnectAddress(server);
    } catch (IOException e) {
        return null;
    }
}

From source file:org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.java

/**
 * @param config//from   w  ww  .  j a  va2  s.  c o  m
 *          initial configuration
 */
@SuppressWarnings("deprecation")
public MiniAccumuloClusterImpl(MiniAccumuloConfigImpl config) throws IOException {

    this.config = config.initialize();

    mkdirs(config.getConfDir());
    mkdirs(config.getLogDir());
    mkdirs(config.getLibDir());
    mkdirs(config.getLibExtDir());

    if (!config.useExistingInstance()) {
        if (!config.useExistingZooKeepers())
            mkdirs(config.getZooKeeperDir());
        mkdirs(config.getAccumuloDir());
    }

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        mkdirs(nn);
        File dn = new File(config.getAccumuloDir(), "dn");
        mkdirs(dn);
        File dfs = new File(config.getAccumuloDir(), "dfs");
        mkdirs(dfs);
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster.Builder(conf).build();
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else if (config.useExistingInstance()) {
        dfsUri = config.getHadoopConfiguration().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    } else {
        dfsUri = "file:///";
    }

    File clientConfFile = config.getClientConfFile();
    // Write only the properties that correspond to ClientConfiguration properties
    writeConfigProperties(clientConfFile,
            Maps.filterEntries(config.getSiteConfig(),
                    v -> org.apache.accumulo.core.client.ClientConfiguration.ClientProperty
                            .getPropertyByKey(v.getKey()) != null));

    Map<String, String> clientProps = config.getClientProps();
    clientProps.put(ClientProperty.INSTANCE_ZOOKEEPERS.getKey(), config.getZooKeepers());
    clientProps.put(ClientProperty.INSTANCE_NAME.getKey(), config.getInstanceName());
    if (!clientProps.containsKey(ClientProperty.AUTH_TYPE.getKey())) {
        clientProps.put(ClientProperty.AUTH_TYPE.getKey(), "password");
        clientProps.put(ClientProperty.AUTH_PRINCIPAL.getKey(), config.getRootUserName());
        clientProps.put(ClientProperty.AUTH_TOKEN.getKey(), config.getRootPassword());
    }

    File clientPropsFile = config.getClientPropsFile();
    writeConfigProperties(clientPropsFile, clientProps);

    File siteFile = new File(config.getConfDir(), "accumulo.properties");
    writeConfigProperties(siteFile, config.getSiteConfig());
    siteConfig = new SiteConfiguration(siteFile);

    if (!config.useExistingInstance() && !config.useExistingZooKeepers()) {
        zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
        FileWriter fileWriter = new FileWriter(zooCfgFile);

        // zookeeper uses Properties to read its config, so use that to write in order to properly
        // escape things like Windows paths
        Properties zooCfg = new Properties();
        zooCfg.setProperty("tickTime", "2000");
        zooCfg.setProperty("initLimit", "10");
        zooCfg.setProperty("syncLimit", "5");
        zooCfg.setProperty("clientPortAddress", "127.0.0.1");
        zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
        zooCfg.setProperty("maxClientCnxns", "1000");
        zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
        zooCfg.store(fileWriter, null);

        fileWriter.close();
    }
    clusterControl = new MiniAccumuloClusterControl(this);
}

From source file:my.adam.smo.client.HTTPClient.java

@Override
public RpcChannel connect(final InetSocketAddress sa) {
    RpcChannel rpcChannel = new RpcChannel() {
        private Channel c = bootstrap.connect(sa).awaitUninterruptibly().getChannel();

        @Override/* ww w.j av  a2  s  . co m*/
        public void callMethod(Descriptors.MethodDescriptor method, RpcController controller, Message request,
                Message responsePrototype, RpcCallback<Message> done) {
            StopWatch stopWatch = new StopWatch("callMethod");
            stopWatch.start();

            long id = seqNum.addAndGet(1);

            logger.trace("calling method: " + method.getFullName());

            HttpRequest httpRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST,
                    "http://" + sa.getHostName() + ":" + sa.getPort());
            httpRequest.setHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.CLOSE);
            httpRequest.setHeader(HttpHeaders.Names.ACCEPT_ENCODING, HttpHeaders.Values.GZIP);
            httpRequest.setHeader(HttpHeaders.Names.CONTENT_TYPE,
                    HttpHeaders.Values.APPLICATION_X_WWW_FORM_URLENCODED);

            RPCommunication.Request protoRequest = RPCommunication.Request.newBuilder()
                    .setServiceName(method.getService().getFullName()).setMethodName(method.getName())
                    .setMethodArgument(request.toByteString()).setRequestId(id).build();

            logger.trace("request built: " + request.toString());

            if (enableSymmetricEncryption) {
                protoRequest = getEncryptedRequest(protoRequest);
                logger.trace("symmetric encryption enabled, encrypted request: " + protoRequest.toString());
            }

            if (enableAsymmetricEncryption) {
                protoRequest = getAsymEncryptedRequest(protoRequest);
                logger.trace("asymmetric encryption enabled, encrypted request: " + protoRequest.toString());
            }

            byte[] arr = protoRequest.toByteArray();

            ChannelBuffer s = Base64.encode(ChannelBuffers.copiedBuffer(arr), Base64Dialect.STANDARD);

            httpRequest.setContent(s);

            httpRequest.addHeader(HttpHeaders.Names.CONTENT_LENGTH, s.readableBytes());

            httpRequest.setChunked(false);

            callbackMap.put(id, done);
            descriptorProtoMap.put(id, responsePrototype);

            c.write(httpRequest);
            logger.trace("request sent: " + protoRequest.toString());

            stopWatch.stop();
            logger.trace(stopWatch.shortSummary());
        }
    };
    logger.trace("connected to address: " + sa.toString());
    return rpcChannel;
}

From source file:org.apache.synapse.transport.passthru.core.PassThroughListeningIOReactorManager.java

private ListenerEndpoint startEndpoint(InetSocketAddress inetSocketAddress,
        ListeningIOReactor defaultListeningIOReactor, String endPointName) throws Exception {
    ListenerEndpoint endpoint = defaultListeningIOReactor.listen(inetSocketAddress);
    try {//w w w  .  ja va 2s  .co  m
        endpoint.waitFor();
        InetSocketAddress address = (InetSocketAddress) endpoint.getAddress();
        if (!address.isUnresolved()) {
            log.info((endPointName != null ? "Pass-through " + endPointName : " Pass-through Http ")
                    + " Listener started on " + address.getHostName() + ":" + address.getPort());
        } else {
            log.info((endPointName != null ? "Pass-through " + endPointName : " Pass-through Http ")
                    + " Listener started on " + address);
        }
    } catch (Exception e) {
        throw new Exception("Endpoint does not start for port " + inetSocketAddress.getPort()
                + "May be IO Reactor not started or endpoint binding exception ", e);

    }
    return endpoint;
}

From source file:org.apache.slider.common.tools.SliderUtils.java

/**
 * probe to see if the address//from  ww w  .java 2 s .  c om
 * @param address network address
 * @return true if the scheduler address is set to
 * something other than 0.0.0.0
 */
public static boolean isAddressDefined(InetSocketAddress address) {
    return !(address.getHostName().equals("0.0.0.0"));
}

From source file:org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.java

/**
 * @param config//from ww  w.j av a2s . c  om
 *          initial configuration
 */
@SuppressWarnings("deprecation")
public MiniAccumuloClusterImpl(MiniAccumuloConfigImpl config) throws IOException {

    this.config = config.initialize();

    mkdirs(config.getConfDir());
    mkdirs(config.getLogDir());
    mkdirs(config.getLibDir());
    mkdirs(config.getLibExtDir());

    if (!config.useExistingInstance()) {
        if (!config.useExistingZooKeepers())
            mkdirs(config.getZooKeeperDir());
        mkdirs(config.getWalogDir());
        mkdirs(config.getAccumuloDir());
    }

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        mkdirs(nn);
        File dn = new File(config.getAccumuloDir(), "dn");
        mkdirs(dn);
        File dfs = new File(config.getAccumuloDir(), "dfs");
        mkdirs(dfs);
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster.Builder(conf).build();
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else if (config.useExistingInstance()) {
        dfsUri = CachedConfiguration.getInstance().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    } else {
        dfsUri = "file:///";
    }

    File clientConfFile = config.getClientConfFile();
    // Write only the properties that correspond to ClientConfiguration properties
    writeConfigProperties(clientConfFile, Maps.filterEntries(config.getSiteConfig(),
            v -> ClientConfiguration.ClientProperty.getPropertyByKey(v.getKey()) != null));

    File siteFile = new File(config.getConfDir(), "accumulo-site.xml");
    writeConfig(siteFile, config.getSiteConfig().entrySet());

    if (!config.useExistingInstance() && !config.useExistingZooKeepers()) {
        zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
        FileWriter fileWriter = new FileWriter(zooCfgFile);

        // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths
        Properties zooCfg = new Properties();
        zooCfg.setProperty("tickTime", "2000");
        zooCfg.setProperty("initLimit", "10");
        zooCfg.setProperty("syncLimit", "5");
        zooCfg.setProperty("clientPortAddress", "127.0.0.1");
        zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
        zooCfg.setProperty("maxClientCnxns", "1000");
        zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
        zooCfg.store(fileWriter, null);

        fileWriter.close();
    }

    // disable audit logging for mini....
    InputStream auditStream = this.getClass().getResourceAsStream("/auditLog.xml");

    if (auditStream != null) {
        FileUtils.copyInputStreamToFile(auditStream, new File(config.getConfDir(), "auditLog.xml"));
    }

    clusterControl = new MiniAccumuloClusterControl(this);
}

From source file:org.apache.tajo.cli.tsql.TajoCli.java

public TajoCli(TajoConf c, String[] args, @Nullable Properties clientParams, InputStream in, OutputStream out,
        OutputStream err) throws Exception {

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = parser.parse(options, args);

    this.conf = new TajoConf(c);
    context = new TajoCliContext(conf);
    this.sin = in;
    if (cmd.hasOption("B")) {
        this.reader = new ConsoleReader(sin, out, new UnsupportedTerminal());
    } else {/*w w  w.j  a va 2s. co m*/
        this.reader = new ConsoleReader(sin, out);
    }

    this.reader.setExpandEvents(false);
    this.sout = new PrintWriter(reader.getOutput());
    this.serr = new PrintWriter(new OutputStreamWriter(err, "UTF-8"));
    initFormatter();

    if (cmd.hasOption("help")) {
        printUsage();
        System.exit(0);
    }

    String hostName = null;
    Integer port = null;
    if (cmd.hasOption("h")) {
        hostName = cmd.getOptionValue("h");
    }
    if (cmd.hasOption("p")) {
        port = Integer.parseInt(cmd.getOptionValue("p"));
    }

    String baseDatabase = null;
    if (cmd.getArgList().size() > 0) {
        baseDatabase = (String) cmd.getArgList().get(0);
    }

    if (cmd.getOptionValues("conf") != null) {
        processConfVarCommand(cmd.getOptionValues("conf"));
    }

    this.reconnect = cmd.hasOption("reconnect");

    // if there is no "-h" option,
    InetSocketAddress address = conf.getSocketAddrVar(TajoConf.ConfVars.TAJO_MASTER_CLIENT_RPC_ADDRESS,
            TajoConf.ConfVars.TAJO_MASTER_UMBILICAL_RPC_ADDRESS);

    if (hostName == null) {
        hostName = address.getHostName();
    }

    if (port == null) {
        port = address.getPort();
    }

    // Get connection parameters
    Properties defaultConnParams = CliClientParamsFactory.get(clientParams);
    final KeyValueSet actualConnParams = new KeyValueSet(Maps.fromProperties(defaultConnParams));

    if ((hostName == null) ^ (port == null)) {
        System.err.println(ERROR_PREFIX + "cannot find valid Tajo server address");
        throw new RuntimeException("cannot find valid Tajo server address");
    } else if (hostName != null && port != null) {
        conf.setVar(ConfVars.TAJO_MASTER_CLIENT_RPC_ADDRESS, NetUtils.getHostPortString(hostName, port));
        client = new TajoClientImpl(ServiceTrackerFactory.get(conf), baseDatabase, actualConnParams);
    } else if (hostName == null && port == null) {
        client = new TajoClientImpl(ServiceTrackerFactory.get(conf), baseDatabase, actualConnParams);
    }

    try {
        context.setCurrentDatabase(client.getCurrentDatabase());
        initHistory();
        initCommands();

        reader.addCompleter(cliCompleter);
        reader.addCompleter(sqlCompleter);

        if (cmd.getOptionValues("conf") != null) {
            processSessionVarCommand(cmd.getOptionValues("conf"));
        }

        if (cmd.hasOption("c")) {
            displayFormatter.setScriptMode();
            int exitCode = executeScript(cmd.getOptionValue("c"));
            sout.flush();
            serr.flush();
            System.exit(exitCode);
        }
        if (cmd.hasOption("f")) {
            displayFormatter.setScriptMode();
            cmd.getOptionValues("");
            File sqlFile = new File(cmd.getOptionValue("f"));
            if (sqlFile.exists()) {
                String script = FileUtil.readTextFile(new File(cmd.getOptionValue("f")));
                script = replaceParam(script, cmd.getOptionValues("param"));
                int exitCode = executeScript(script);
                sout.flush();
                serr.flush();
                System.exit(exitCode);
            } else {
                System.err.println(ERROR_PREFIX + "No such a file \"" + cmd.getOptionValue("f") + "\"");
                System.exit(-1);
            }
        }
    } catch (Exception e) {
        System.err.println(ERROR_PREFIX + "Exception was thrown. Caused by " + e.getMessage());

        if (client != null) {
            client.close();
        }

        throw e;
    }

    addShutdownHook();
}