Example usage for java.net InetSocketAddress getPort

List of usage examples for java.net InetSocketAddress getPort

Introduction

In this page you can find the example usage for java.net InetSocketAddress getPort.

Prototype

public final int getPort() 

Source Link

Document

Gets the port number.

Usage

From source file:org.apache.hadoop.hdfs.server.datanode.DataNode.java

/**
 * This method starts the data node with the specified conf.
 * //  w  w  w .  java  2s  .  com
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 * @throws MalformedObjectNameException 
 * @throws MBeanRegistrationException 
 * @throws InstanceAlreadyExistsException 
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs, SecureResources resources)
        throws IOException {
    if (UserGroupInformation.isSecurityEnabled() && resources == null)
        throw new RuntimeException("Cannot start secure cluster without " + "privileged resources.");

    this.secureResources = resources;
    // use configured nameserver & interface to get local hostname
    if (conf.get("slave.host.name") != null) {
        machineName = conf.get("slave.host.name");
    }
    if (machineName == null) {
        machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"),
                conf.get("dfs.datanode.dns.nameserver", "default"));
    }
    InetSocketAddress nameNodeAddr = NameNode.getServiceAddress(conf, true);

    this.socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
    this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT);
    /* Based on results on different platforms, we might need set the default 
     * to false on some of them. */
    this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true);
    this.writePacketSize = conf.getInt("dfs.write.packet.size", 64 * 1024);

    InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
    int tmpPort = socAddr.getPort();
    storage = new DataStorage();
    // construct registration
    this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort);

    // connect to name node
    this.namenode = (DatanodeProtocol) RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID,
            nameNodeAddr, conf);
    // get version and id info from the name-node
    NamespaceInfo nsInfo = handshake();
    StartupOption startOpt = getStartupOption(conf);
    assert startOpt != null : "Startup option must be set.";

    boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false);
    if (simulatedFSDataset) {
        setNewStorageID(dnRegistration);
        dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
        dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
        // it would have been better to pass storage as a parameter to
        // constructor below - need to augment ReflectionUtils used below.
        conf.set("StorageId", dnRegistration.getStorageID());
        try {
            //Equivalent of following (can't do because Simulated is in test dir)
            //  this.data = new SimulatedFSDataset(conf);
            this.data = (FSDatasetInterface) ReflectionUtils.newInstance(
                    Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf);
        } catch (ClassNotFoundException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }
    } else { // real storage
        // read storage info, lock data dirs and transition fs state if necessary
        storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
        // adjust
        this.dnRegistration.setStorageInfo(storage);
        // initialize data node internal structure
        this.data = new FSDataset(storage, conf);
    }

    // register datanode MXBean
    this.registerMXBean(conf); // register the MXBean for DataNode

    // Allow configuration to delay block reports to find bugs
    artificialBlockReceivedDelay = conf.getInt("dfs.datanode.artificialBlockReceivedDelay", 0);

    // find free port or use privileged port provide
    ServerSocket ss;
    if (secureResources == null) {
        ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket();
        Server.bind(ss, socAddr, 0);
    } else {
        ss = resources.getStreamingSocket();
    }
    ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE);
    // adjust machine name with the actual port
    tmpPort = ss.getLocalPort();
    selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort);
    this.dnRegistration.setName(machineName + ":" + tmpPort);
    LOG.info("Opened info server at " + tmpPort);

    this.threadGroup = new ThreadGroup("dataXceiverServer");
    this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this));
    this.threadGroup.setDaemon(true); // auto destroy when empty

    this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY)
            * 1000L;
    if (this.initialBlockReportDelay >= blockReportInterval) {
        this.initialBlockReportDelay = 0;
        LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec."
                + " Setting initial delay to 0 msec:");
    }
    this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;
    DataNode.nameNodeAddr = nameNodeAddr;

    //initialize periodic block scanner
    String reason = null;
    if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) {
        reason = "verification is turned off by configuration";
    } else if (!(data instanceof FSDataset)) {
        reason = "verifcation is supported only with FSDataset";
    }
    if (reason == null) {
        blockScanner = new DataBlockScanner(this, (FSDataset) data, conf);
    } else {
        LOG.info("Periodic Block Verification is disabled because " + reason + ".");
    }

    //create a servlet to serve full-file content
    InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
    String infoHost = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = (secureResources == null)
            ? new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf,
                    SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN))
            : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf,
                    SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN), secureResources.getListener());
    if (conf.getBoolean("dfs.https.enable", false)) {
        boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
        InetSocketAddress secInfoSocAddr = NetUtils
                .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0));
        Configuration sslConf = new Configuration(false);
        sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml"));
        this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
    }
    this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
    this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class);

    this.infoServer.setAttribute("datanode", this);
    this.infoServer.setAttribute("datanode.blockScanner", blockScanner);
    this.infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
    this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class);

    if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
        infoServer.addJerseyResourcePackage(
                DatanodeWebHdfsMethods.class.getPackage().getName() + ";" + Param.class.getPackage().getName(),
                WebHdfsFileSystem.PATH_PREFIX + "/*");
    }
    this.infoServer.start();
    // adjust info port
    this.dnRegistration.setInfoPort(this.infoServer.getPort());
    myMetrics = DataNodeInstrumentation.create(conf, dnRegistration.getStorageID());

    // set service-level authorization security policy
    if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
    }

    // BlockTokenSecretManager is created here, but it shouldn't be
    // used until it is initialized in register().
    this.blockTokenSecretManager = new BlockTokenSecretManager(false, 0, 0);
    //init ipc server
    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(),
            conf.getInt("dfs.datanode.handler.count", 3), false, conf, blockTokenSecretManager);
    dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());

    LOG.info("dnRegistration = " + dnRegistration);
}

From source file:org.apache.hadoop.raid.RaidNode.java

private void startHttpServer() throws IOException {
    String infoAddr = conf.get("mapred.raid.http.address", "localhost:50091");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    this.infoBindAddress = infoSocAddr.getAddress().getHostAddress();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = new HttpServer("raid", this.infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    this.infoServer.setAttribute("raidnode", this);
    this.infoServer.addInternalServlet("corruptfilecounter", "/corruptfilecounter",
            CorruptFileCounterServlet.class);
    this.infoServer.start();
    LOG.info("Web server started at port " + this.infoServer.getPort());
}

From source file:org.elasticsearch.test.ElasticsearchIntegrationTest.java

protected HttpRequestBuilder httpClient() {
    final NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get();
    final NodeInfo[] nodes = nodeInfos.getNodes();
    assertTrue(nodes.length > 0);//from www . j av  a 2s.  c o m
    TransportAddress publishAddress = randomFrom(nodes).getHttp().address().publishAddress();
    assertEquals(1, publishAddress.uniqueAddressTypeId());
    InetSocketAddress address = ((InetSocketTransportAddress) publishAddress).address();
    return new HttpRequestBuilder(HttpClients.createDefault()).host(address.getHostName())
            .port(address.getPort());
}

From source file:io.bitsquare.btc.WalletService.java

public void initialize(@Nullable DeterministicSeed seed, ResultHandler resultHandler,
        ExceptionHandler exceptionHandler) {
    Log.traceCall();/*from   ww  w . ja v a  2 s .c o  m*/
    // Tell bitcoinj to execute event handlers on the JavaFX UI thread. This keeps things simple and means
    // we cannot forget to switch threads when adding event handlers. Unfortunately, the DownloadListener
    // we give to the app kit is currently an exception and runs on a library thread. It'll get fixed in
    // a future version.

    Threading.USER_THREAD = UserThread.getExecutor();

    Timer timeoutTimer = UserThread.runAfter(
            () -> exceptionHandler.handleException(
                    new TimeoutException("Wallet did not initialize in " + STARTUP_TIMEOUT_SEC + " seconds.")),
            STARTUP_TIMEOUT_SEC);

    backupWallet();

    final Socks5Proxy socks5Proxy = preferences.getUseTorForBitcoinJ() ? socks5ProxyProvider.getSocks5Proxy()
            : null;
    log.debug("Use socks5Proxy for bitcoinj: " + socks5Proxy);

    // If seed is non-null it means we are restoring from backup.
    walletAppKit = new WalletAppKitBitSquare(params, socks5Proxy, walletDir, "Bitsquare") {
        @Override
        protected void onSetupCompleted() {
            // Don't make the user wait for confirmations for now, as the intention is they're sending it
            // their own money!
            walletAppKit.wallet().allowSpendingUnconfirmedTransactions();
            final PeerGroup peerGroup = walletAppKit.peerGroup();

            if (params != RegTestParams.get())
                peerGroup.setMaxConnections(11);

            // We don't want to get our node white list polluted with nodes from AddressMessage calls.
            if (preferences.getBitcoinNodes() != null && !preferences.getBitcoinNodes().isEmpty())
                peerGroup.setAddPeersFromAddressMessage(false);

            wallet = walletAppKit.wallet();
            wallet.addEventListener(walletEventListener);

            addressEntryList.onWalletReady(wallet);

            peerGroup.addEventListener(new PeerEventListener() {
                @Override
                public void onPeersDiscovered(Set<PeerAddress> peerAddresses) {
                }

                @Override
                public void onBlocksDownloaded(Peer peer, Block block, FilteredBlock filteredBlock,
                        int blocksLeft) {
                }

                @Override
                public void onChainDownloadStarted(Peer peer, int blocksLeft) {
                }

                @Override
                public void onPeerConnected(Peer peer, int peerCount) {
                    numPeers.set(peerCount);
                    connectedPeers.set(peerGroup.getConnectedPeers());
                }

                @Override
                public void onPeerDisconnected(Peer peer, int peerCount) {
                    numPeers.set(peerCount);
                    connectedPeers.set(peerGroup.getConnectedPeers());
                }

                @Override
                public Message onPreMessageReceived(Peer peer, Message m) {
                    return null;
                }

                @Override
                public void onTransaction(Peer peer, Transaction t) {
                }

                @Nullable
                @Override
                public List<Message> getData(Peer peer, GetDataMessage m) {
                    return null;
                }
            });

            // set after wallet is ready
            tradeWalletService.setWalletAppKit(walletAppKit);
            tradeWalletService.setAddressEntryList(addressEntryList);
            timeoutTimer.stop();

            // onSetupCompleted in walletAppKit is not the called on the last invocations, so we add a bit of delay
            UserThread.runAfter(resultHandler::handleResult, 100, TimeUnit.MILLISECONDS);
        }
    };

    // Bloom filters in BitcoinJ are completely broken
    // See: https://jonasnick.github.io/blog/2015/02/12/privacy-in-bitcoinj/
    // Here are a few improvements to fix a few vulnerabilities.

    // Bitsquare's BitcoinJ fork has added a bloomFilterTweak (nonce) setter to reuse the same seed avoiding the trivial vulnerability
    // by getting the real pub keys by intersections of several filters sent at each startup.
    walletAppKit.setBloomFilterTweak(bloomFilterTweak);

    // Avoid the simple attack (see: https://jonasnick.github.io/blog/2015/02/12/privacy-in-bitcoinj/) due to the 
    // default implementation using both pubkey and hash of pubkey. We have set a insertPubKey flag in BasicKeyChain to default false.

    // Default only 266 keys are generated (2 * 100+33). That would trigger new bloom filters when we are reaching 
    // the threshold. To avoid reaching the threshold we create much more keys which are unlikely to cause update of the
    // filter for most users. With lookaheadSize of 500 we get 1333 keys which should be enough for most users to 
    // never need to update a bloom filter, which would weaken privacy.
    walletAppKit.setLookaheadSize(500);

    // Calculation is derived from: https://www.reddit.com/r/Bitcoin/comments/2vrx6n/privacy_in_bitcoinj_android_wallet_multibit_hive/coknjuz
    // No. of false positives (56M keys in the blockchain): 
    // First attempt for FP rate:
    // FP rate = 0,0001;  No. of false positives: 0,0001 * 56 000 000  = 5600
    // We have 1333keys: 1333 / (5600 + 1333) = 0.19 -> 19 % probability that a pub key is in our wallet
    // After tests I found out that the bandwidth consumption varies widely related to the generated filter.
    // About 20- 40 MB for upload and 30-130 MB for download at first start up (spv chain).
    // Afterwards its about 1 MB for upload and 20-80 MB for download.
    // Probably better then a high FP rate would be to include foreign pubKeyHashes which are tested to not be used 
    // in many transactions. If we had a pool of 100 000 such keys (2 MB data dump) to random select 4000 we could mix it with our
    // 1000 own keys and get a similar probability rate as with the current setup but less variation in bandwidth 
    // consumption.

    // For now to reduce risks with high bandwidth consumption we reduce the FP rate by half.
    // FP rate = 0,00005;  No. of false positives: 0,00005 * 56 000 000  = 2800
    // 1333 / (2800 + 1333) = 0.32 -> 32 % probability that a pub key is in our wallet
    walletAppKit.setBloomFilterFalsePositiveRate(0.00005);

    String btcNodes = preferences.getBitcoinNodes();
    log.debug("btcNodes: " + btcNodes);
    boolean usePeerNodes = false;

    // Pass custom seed nodes if set in options
    if (!btcNodes.isEmpty()) {
        String[] nodes = StringUtils.deleteWhitespace(btcNodes).split(",");
        List<PeerAddress> peerAddressList = new ArrayList<>();
        for (String node : nodes) {
            String[] parts = node.split(":");
            if (parts.length == 1) {
                // port not specified.  Use default port for network.
                parts = new String[] { parts[0], Integer.toString(params.getPort()) };
            }
            if (parts.length == 2) {
                // note: this will cause a DNS request if hostname used.
                // note: DNS requests are routed over socks5 proxy, if used.
                // note: .onion hostnames will be unresolved.
                InetSocketAddress addr;
                if (socks5Proxy != null) {
                    try {
                        // proxy remote DNS request happens here.  blocking.
                        addr = new InetSocketAddress(DnsLookupTor.lookup(socks5Proxy, parts[0]),
                                Integer.parseInt(parts[1]));
                    } catch (Exception e) {
                        log.warn("Dns lookup failed for host: {}", parts[0]);
                        addr = null;
                    }
                } else {
                    // DNS request happens here. if it fails, addr.isUnresolved() == true.
                    addr = new InetSocketAddress(parts[0], Integer.parseInt(parts[1]));
                }
                if (addr != null && !addr.isUnresolved()) {
                    peerAddressList.add(new PeerAddress(addr.getAddress(), addr.getPort()));
                }
            }
        }
        if (peerAddressList.size() > 0) {
            PeerAddress peerAddressListFixed[] = new PeerAddress[peerAddressList.size()];
            log.debug("btcNodes parsed: " + Arrays.toString(peerAddressListFixed));

            walletAppKit.setPeerNodes(peerAddressList.toArray(peerAddressListFixed));
            usePeerNodes = true;
        }
    }

    // Now configure and start the appkit. This will take a second or two - we could show a temporary splash screen
    // or progress widget to keep the user engaged whilst we initialise, but we don't.
    if (params == RegTestParams.get()) {
        if (regTestHost == RegTestHost.REG_TEST_SERVER) {
            try {
                walletAppKit.setPeerNodes(
                        new PeerAddress(InetAddress.getByName(RegTestHost.SERVER_IP), params.getPort()));
                usePeerNodes = true;
            } catch (UnknownHostException e) {
                throw new RuntimeException(e);
            }
        } else if (regTestHost == RegTestHost.LOCALHOST) {
            walletAppKit.connectToLocalHost(); // You should run a regtest mode bitcoind locally.}
        }
    } else if (params == MainNetParams.get()) {
        // Checkpoints are block headers that ship inside our app: for a new user, we pick the last header
        // in the checkpoints file and then download the rest from the network. It makes things much faster.
        // Checkpoint files are made using the BuildCheckpoints tool and usually we have to download the
        // last months worth or more (takes a few seconds).
        try {
            walletAppKit.setCheckpoints(getClass().getResourceAsStream("/wallet/checkpoints"));
        } catch (Exception e) {
            e.printStackTrace();
            log.error(e.toString());
        }
    } else if (params == TestNet3Params.get()) {
        walletAppKit.setCheckpoints(getClass().getResourceAsStream("/wallet/checkpoints.testnet"));
    }

    // If operating over a proxy and we haven't set any peer nodes, then
    // we want to use SeedPeers for discovery instead of the default DnsDiscovery.
    // This is only because we do not yet have a Dns discovery class that works
    // reliably over proxy/tor.
    //
    // todo: There should be a user pref called "Use Local DNS for Proxy/Tor"
    // that disables this.  In that case, the default DnsDiscovery class will
    // be used which should work, but is less private.  The aim here is to
    // be private by default when using proxy/tor.  However, the seedpeers
    // could become outdated, so it is important that the user be able to
    // disable it, but should be made aware of the reduced privacy.
    if (socks5Proxy != null && !usePeerNodes) {
        // SeedPeers uses hard coded stable addresses (from MainNetParams). It should be updated from time to time.
        walletAppKit.setDiscovery(new Socks5MultiDiscovery(socks5Proxy, params, socks5DiscoverMode));
    }

    walletAppKit.setDownloadListener(downloadListener).setBlockingStartup(false)
            .setUserAgent(userAgent.getName(), userAgent.getVersion()).restoreWalletFromSeed(seed);

    walletAppKit.addListener(new Service.Listener() {
        @Override
        public void failed(@NotNull Service.State from, @NotNull Throwable failure) {
            walletAppKit = null;
            log.error("walletAppKit failed");
            timeoutTimer.stop();
            UserThread.execute(() -> exceptionHandler.handleException(failure));
        }
    }, Threading.USER_THREAD);
    walletAppKit.startAsync();
}

From source file:orca.ektorp.client.ContextualSSLSocketFactory.java

/**
 * @since 4.1// ww w  .j ava2 s  .  com
 * @param socket socket
 * @param remoteAddress remote address
 * @param localAddress local address
 * @param params HTTP params
 * @throws IOException in case of IO error
 * @throws UnknownHostException in case of unknonwn host
 * @throws ConnectTimeoutException in case of connect timeout
 * @return returns the socket
 */
public Socket connectSocket(final Socket socket, final InetSocketAddress remoteAddress,
        final InetSocketAddress localAddress, final HttpParams params)
        throws IOException, UnknownHostException, ConnectTimeoutException {
    if (remoteAddress == null) {
        throw new IllegalArgumentException("Remote address may not be null");
    }
    if (params == null) {
        throw new IllegalArgumentException("HTTP parameters may not be null");
    }
    //Here!
    Socket sock = socket != null ? socket : this.socketfactory.createSocket();
    if (localAddress != null) {
        sock.setReuseAddress(HttpConnectionParams.getSoReuseaddr(params));
        sock.bind(localAddress);
    }

    int connTimeout = HttpConnectionParams.getConnectionTimeout(params);
    int soTimeout = HttpConnectionParams.getSoTimeout(params);

    try {
        sock.setSoTimeout(soTimeout);
        sock.connect(remoteAddress, connTimeout);
    } catch (SocketTimeoutException ex) {
        throw new ConnectTimeoutException("Connect to " + remoteAddress + " timed out");
    }

    String hostname;
    if (remoteAddress instanceof HttpInetSocketAddress) {
        hostname = ((HttpInetSocketAddress) remoteAddress).getHttpHost().getHostName();
    } else {
        hostname = remoteAddress.getHostName();
    }

    SSLSocket sslsock;
    // Setup SSL layering if necessary
    if (sock instanceof SSLSocket) {
        sslsock = (SSLSocket) sock;
    } else {
        int port = remoteAddress.getPort();
        sslsock = (SSLSocket) this.socketfactory.createSocket(sock, hostname, port, true);
        prepareSocket(sslsock);
    }
    if (this.hostnameVerifier != null) {
        try {
            this.hostnameVerifier.verify(hostname, sslsock);
            // verifyHostName() didn't blowup - good!
        } catch (IOException iox) {
            // close the socket before re-throwing the exception
            try {
                sslsock.close();
            } catch (Exception x) {
                /*ignore*/ }
            throw iox;
        }
    }
    return sslsock;
}

From source file:org.apache.hadoop.hdfs.server.namenode.AvatarNode.java

/**
 * Initialize AvatarNode/*ww  w  .j ava  2  s  . c  o  m*/
 * @param conf the configuration
 */
private void initialize(Configuration conf) throws IOException {
    InetSocketAddress socAddr = AvatarNode.getAddress(conf);
    int handlerCount = conf.getInt("hdfs.avatarnode.handler.count", 3);

    // create rpc server
    // no point to deserialize job file in Avatar node.
    this.server = RPC.getServer(this, socAddr.getAddress().getHostAddress(), socAddr.getPort(), handlerCount,
            false, conf, false);

    // The rpc-server port can be ephemeral... ensure we have the 
    // correct info
    this.serverAddress = this.server.getListenerAddress();
    LOG.info("AvatarNode up at: " + this.serverAddress);
    this.registerMBean();
    this.server.start();
}

From source file:com.delphix.session.test.ServiceTest.java

@Test
public void testLoginLocalBinding() {
    int localPort = portScan();

    // Create the session
    ClientConfig spec = initServiceSpec(new HelloService(helloService));

    PlainClient sasl = new PlainClient(TEST_USERNAME, TEST_PASSWORD);
    spec.setSaslMechanism(sasl);//from ww  w.j a v a 2s . c o  m

    // Connect to an address with the specified local binding
    List<TransportAddress> addresses = new ArrayList<TransportAddress>();
    addresses.add(new TransportAddress(localhost, PORT, localPort));
    spec.setAddresses(addresses);

    ClientNexus client = clientManager.create(spec);

    login(client);

    // Validate the local binding
    Collection<ServiceTransport> xports = client.getTransports();
    assertEquals(xports.size(), 1);

    for (ServiceTransport xport : xports) {
        InetSocketAddress address = (InetSocketAddress) xport.getLocalAddress();
        assertEquals(address.getPort(), localPort);
    }
}

From source file:nz.dataview.websyncclientgui.WebSYNCClientGUIView.java

/**
 * try to detect if a proxy exists, and update the UI
 * /*from w w  w.j av a  2s .c  o m*/
 * @return
 */
private void detectProxy() {

    try {
        proxyDetectedLabel.setText("");
        List l = ProxySelector.getDefault().select(new URI(knURLField.getText()));

        for (Iterator iter = l.iterator(); iter.hasNext();) {
            Proxy proxy = (Proxy) iter.next();
            if ("DIRECT".equals(proxy.type().toString())) {
                proxyEnabled = false;
                useProxyField.setSelected(proxyEnabled);
                proxyFieldsUpdate(proxyEnabled);
            } else {
                InetSocketAddress addr = (InetSocketAddress) proxy.address();
                if (addr != null) {
                    // if we have a proxy set the correct fields and state of UI
                    proxyEnabled = true;
                    useProxyField.setSelected(proxyEnabled);
                    proxyHostField.setText(addr.getHostName());
                    proxyPortField.setText(Integer.toString(addr.getPort()));
                    proxyFieldsUpdate(proxyEnabled);

                }
            }
            String text = (String) "Proxy settings detected and updated";
            proxyDetectedLabel.setText(text);
        }
    } catch (Exception e) {
        showErrorDialog("Detect Proxy", "Problem in detecting proxy, please set proxy settings manually");
    }

}

From source file:org.apache.hadoop.hdfs.MiniDFSCluster.java

/**
 * Restart a datanode, on the same port if requested
 *
 * @param dnprop/*from   w  w  w .ja v a 2  s.  c om*/
 *     the datanode to restart
 * @param keepPort
 *     whether to use the same port
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop, boolean keepPort) throws IOException {
    Configuration conf = dnprop.conf;
    String[] args = dnprop.dnArgs;
    SecureResources secureResources = dnprop.secureResources;
    Configuration newconf = new HdfsConfiguration(conf); // save cloned config
    if (keepPort) {
        InetSocketAddress addr = dnprop.datanode.getXferAddress();
        conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort());
        conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort);
    }
    DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
    dataNodes.add(new DataNodeProperties(newDn, newconf, args, secureResources, newDn.getIpcPort()));
    numDataNodes++;
    try {

        //[S] figure out which thread has slowed down
        Thread.sleep(1000);
    } catch (InterruptedException ex) {
        Logger.getLogger(MiniDFSCluster.class.getName()).log(Level.SEVERE, null, ex);
    }

    return true;
}

From source file:com.alibaba.wasp.master.FMaster.java

/**
 * Initializes the FMaster. The steps are as follows:
 * <p>//  w w w . ja va2  s  . c  o m
 * <ol>
 * <li>Initialize FMaster RPC and address
 * <li>Connect to ZooKeeper.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in {@link #run()} so that they run
 * in their own thread rather than within the context of the constructor.
 * 
 * @throws InterruptedException
 */
public FMaster(final Configuration conf) throws IOException, KeeperException, InterruptedException {
    this.conf = new Configuration(conf);
    // Set how many times to retry talking to another server over HConnection.
    FConnectionManager.setServerSideFConnectionRetries(this.conf, LOG);
    // Server to handle client requests.
    String hostname = Strings
            .domainNamePointerToHostName(DNS.getDefaultHost(conf.get("wasp.master.dns.interface", "default"),
                    conf.get("wasp.master.dns.nameserver", "default")));
    int port = conf.getInt(FConstants.MASTER_PORT, FConstants.DEFAULT_MASTER_PORT);
    // Creation of a ISA will force a resolve.
    InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
    if (initialIsa.getAddress() == null) {
        throw new IllegalArgumentException("Failed resolve of " + initialIsa);
    }
    this.rpcServer = WaspRPC.getServer(FMaster.class, this,
            new Class<?>[] { FMasterMonitorProtocol.class, FMasterAdminProtocol.class,
                    FServerStatusProtocol.class, FMetaServerProtocol.class },
            initialIsa.getHostName(), // BindAddress is IP we got for this server.
            initialIsa.getPort(), conf);
    // Set our address.
    this.isa = this.rpcServer.getListenerAddress();
    this.serverName = new ServerName(this.isa.getHostName(), this.isa.getPort(), System.currentTimeMillis());

    // set the thread name now we have an address
    setName(MASTER + "-" + this.serverName.toString());

    this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + isa.getPort(), this, true);

    // metrics interval: using the same property as fserver.
    this.msgInterval = conf.getInt("wasp.fserver.msginterval", 3 * 1000);

    this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
}