List of usage examples for java.net InetSocketAddress getAddress
public final InetAddress getAddress()
From source file:de.dal33t.powerfolder.clientserver.ServerClient.java
/** * @return the string representing the server address *///from w ww . j a va 2 s.co m public String getServerString() { String addrStr; if (server != null) { if (server.isMySelf()) { addrStr = "myself"; } else { InetSocketAddress addr = server.getReconnectAddress(); if (addr != null) { if (addr.getAddress() != null) { addrStr = NetworkUtil.getHostAddressNoResolve(addr.getAddress()); } else { addrStr = addr.getHostName(); } } else { addrStr = ""; } if (addr != null && addr.getPort() != ConnectionListener.DEFAULT_PORT) { addrStr += ":" + addr.getPort(); } } } else { addrStr = ""; } if (hasWebURL()) { return getWebURL(); } else if (StringUtils.isNotBlank(addrStr)) { return "pf://" + addrStr; } else { return "n/a"; } }
From source file:io.bitsquare.btc.WalletService.java
public void initialize(@Nullable DeterministicSeed seed, ResultHandler resultHandler, ExceptionHandler exceptionHandler) { Log.traceCall();/*from w ww . java 2 s .c o m*/ // Tell bitcoinj to execute event handlers on the JavaFX UI thread. This keeps things simple and means // we cannot forget to switch threads when adding event handlers. Unfortunately, the DownloadListener // we give to the app kit is currently an exception and runs on a library thread. It'll get fixed in // a future version. Threading.USER_THREAD = UserThread.getExecutor(); Timer timeoutTimer = UserThread.runAfter( () -> exceptionHandler.handleException( new TimeoutException("Wallet did not initialize in " + STARTUP_TIMEOUT_SEC + " seconds.")), STARTUP_TIMEOUT_SEC); backupWallet(); final Socks5Proxy socks5Proxy = preferences.getUseTorForBitcoinJ() ? socks5ProxyProvider.getSocks5Proxy() : null; log.debug("Use socks5Proxy for bitcoinj: " + socks5Proxy); // If seed is non-null it means we are restoring from backup. walletAppKit = new WalletAppKitBitSquare(params, socks5Proxy, walletDir, "Bitsquare") { @Override protected void onSetupCompleted() { // Don't make the user wait for confirmations for now, as the intention is they're sending it // their own money! walletAppKit.wallet().allowSpendingUnconfirmedTransactions(); final PeerGroup peerGroup = walletAppKit.peerGroup(); if (params != RegTestParams.get()) peerGroup.setMaxConnections(11); // We don't want to get our node white list polluted with nodes from AddressMessage calls. if (preferences.getBitcoinNodes() != null && !preferences.getBitcoinNodes().isEmpty()) peerGroup.setAddPeersFromAddressMessage(false); wallet = walletAppKit.wallet(); wallet.addEventListener(walletEventListener); addressEntryList.onWalletReady(wallet); peerGroup.addEventListener(new PeerEventListener() { @Override public void onPeersDiscovered(Set<PeerAddress> peerAddresses) { } @Override public void onBlocksDownloaded(Peer peer, Block block, FilteredBlock filteredBlock, int blocksLeft) { } @Override public void onChainDownloadStarted(Peer peer, int blocksLeft) { } @Override public void onPeerConnected(Peer peer, int peerCount) { numPeers.set(peerCount); connectedPeers.set(peerGroup.getConnectedPeers()); } @Override public void onPeerDisconnected(Peer peer, int peerCount) { numPeers.set(peerCount); connectedPeers.set(peerGroup.getConnectedPeers()); } @Override public Message onPreMessageReceived(Peer peer, Message m) { return null; } @Override public void onTransaction(Peer peer, Transaction t) { } @Nullable @Override public List<Message> getData(Peer peer, GetDataMessage m) { return null; } }); // set after wallet is ready tradeWalletService.setWalletAppKit(walletAppKit); tradeWalletService.setAddressEntryList(addressEntryList); timeoutTimer.stop(); // onSetupCompleted in walletAppKit is not the called on the last invocations, so we add a bit of delay UserThread.runAfter(resultHandler::handleResult, 100, TimeUnit.MILLISECONDS); } }; // Bloom filters in BitcoinJ are completely broken // See: https://jonasnick.github.io/blog/2015/02/12/privacy-in-bitcoinj/ // Here are a few improvements to fix a few vulnerabilities. // Bitsquare's BitcoinJ fork has added a bloomFilterTweak (nonce) setter to reuse the same seed avoiding the trivial vulnerability // by getting the real pub keys by intersections of several filters sent at each startup. walletAppKit.setBloomFilterTweak(bloomFilterTweak); // Avoid the simple attack (see: https://jonasnick.github.io/blog/2015/02/12/privacy-in-bitcoinj/) due to the // default implementation using both pubkey and hash of pubkey. We have set a insertPubKey flag in BasicKeyChain to default false. // Default only 266 keys are generated (2 * 100+33). That would trigger new bloom filters when we are reaching // the threshold. To avoid reaching the threshold we create much more keys which are unlikely to cause update of the // filter for most users. With lookaheadSize of 500 we get 1333 keys which should be enough for most users to // never need to update a bloom filter, which would weaken privacy. walletAppKit.setLookaheadSize(500); // Calculation is derived from: https://www.reddit.com/r/Bitcoin/comments/2vrx6n/privacy_in_bitcoinj_android_wallet_multibit_hive/coknjuz // No. of false positives (56M keys in the blockchain): // First attempt for FP rate: // FP rate = 0,0001; No. of false positives: 0,0001 * 56 000 000 = 5600 // We have 1333keys: 1333 / (5600 + 1333) = 0.19 -> 19 % probability that a pub key is in our wallet // After tests I found out that the bandwidth consumption varies widely related to the generated filter. // About 20- 40 MB for upload and 30-130 MB for download at first start up (spv chain). // Afterwards its about 1 MB for upload and 20-80 MB for download. // Probably better then a high FP rate would be to include foreign pubKeyHashes which are tested to not be used // in many transactions. If we had a pool of 100 000 such keys (2 MB data dump) to random select 4000 we could mix it with our // 1000 own keys and get a similar probability rate as with the current setup but less variation in bandwidth // consumption. // For now to reduce risks with high bandwidth consumption we reduce the FP rate by half. // FP rate = 0,00005; No. of false positives: 0,00005 * 56 000 000 = 2800 // 1333 / (2800 + 1333) = 0.32 -> 32 % probability that a pub key is in our wallet walletAppKit.setBloomFilterFalsePositiveRate(0.00005); String btcNodes = preferences.getBitcoinNodes(); log.debug("btcNodes: " + btcNodes); boolean usePeerNodes = false; // Pass custom seed nodes if set in options if (!btcNodes.isEmpty()) { String[] nodes = StringUtils.deleteWhitespace(btcNodes).split(","); List<PeerAddress> peerAddressList = new ArrayList<>(); for (String node : nodes) { String[] parts = node.split(":"); if (parts.length == 1) { // port not specified. Use default port for network. parts = new String[] { parts[0], Integer.toString(params.getPort()) }; } if (parts.length == 2) { // note: this will cause a DNS request if hostname used. // note: DNS requests are routed over socks5 proxy, if used. // note: .onion hostnames will be unresolved. InetSocketAddress addr; if (socks5Proxy != null) { try { // proxy remote DNS request happens here. blocking. addr = new InetSocketAddress(DnsLookupTor.lookup(socks5Proxy, parts[0]), Integer.parseInt(parts[1])); } catch (Exception e) { log.warn("Dns lookup failed for host: {}", parts[0]); addr = null; } } else { // DNS request happens here. if it fails, addr.isUnresolved() == true. addr = new InetSocketAddress(parts[0], Integer.parseInt(parts[1])); } if (addr != null && !addr.isUnresolved()) { peerAddressList.add(new PeerAddress(addr.getAddress(), addr.getPort())); } } } if (peerAddressList.size() > 0) { PeerAddress peerAddressListFixed[] = new PeerAddress[peerAddressList.size()]; log.debug("btcNodes parsed: " + Arrays.toString(peerAddressListFixed)); walletAppKit.setPeerNodes(peerAddressList.toArray(peerAddressListFixed)); usePeerNodes = true; } } // Now configure and start the appkit. This will take a second or two - we could show a temporary splash screen // or progress widget to keep the user engaged whilst we initialise, but we don't. if (params == RegTestParams.get()) { if (regTestHost == RegTestHost.REG_TEST_SERVER) { try { walletAppKit.setPeerNodes( new PeerAddress(InetAddress.getByName(RegTestHost.SERVER_IP), params.getPort())); usePeerNodes = true; } catch (UnknownHostException e) { throw new RuntimeException(e); } } else if (regTestHost == RegTestHost.LOCALHOST) { walletAppKit.connectToLocalHost(); // You should run a regtest mode bitcoind locally.} } } else if (params == MainNetParams.get()) { // Checkpoints are block headers that ship inside our app: for a new user, we pick the last header // in the checkpoints file and then download the rest from the network. It makes things much faster. // Checkpoint files are made using the BuildCheckpoints tool and usually we have to download the // last months worth or more (takes a few seconds). try { walletAppKit.setCheckpoints(getClass().getResourceAsStream("/wallet/checkpoints")); } catch (Exception e) { e.printStackTrace(); log.error(e.toString()); } } else if (params == TestNet3Params.get()) { walletAppKit.setCheckpoints(getClass().getResourceAsStream("/wallet/checkpoints.testnet")); } // If operating over a proxy and we haven't set any peer nodes, then // we want to use SeedPeers for discovery instead of the default DnsDiscovery. // This is only because we do not yet have a Dns discovery class that works // reliably over proxy/tor. // // todo: There should be a user pref called "Use Local DNS for Proxy/Tor" // that disables this. In that case, the default DnsDiscovery class will // be used which should work, but is less private. The aim here is to // be private by default when using proxy/tor. However, the seedpeers // could become outdated, so it is important that the user be able to // disable it, but should be made aware of the reduced privacy. if (socks5Proxy != null && !usePeerNodes) { // SeedPeers uses hard coded stable addresses (from MainNetParams). It should be updated from time to time. walletAppKit.setDiscovery(new Socks5MultiDiscovery(socks5Proxy, params, socks5DiscoverMode)); } walletAppKit.setDownloadListener(downloadListener).setBlockingStartup(false) .setUserAgent(userAgent.getName(), userAgent.getVersion()).restoreWalletFromSeed(seed); walletAppKit.addListener(new Service.Listener() { @Override public void failed(@NotNull Service.State from, @NotNull Throwable failure) { walletAppKit = null; log.error("walletAppKit failed"); timeoutTimer.stop(); UserThread.execute(() -> exceptionHandler.handleException(failure)); } }, Threading.USER_THREAD); walletAppKit.startAsync(); }
From source file:com.alibaba.wasp.fserver.FServer.java
/** * Starts a FServer at the default location * /* w w w . j a va 2s . c o m*/ * @param conf * @throws java.io.IOException * @throws InterruptedException */ public FServer(Configuration conf) throws IOException, InterruptedException { this.conf = conf; this.isOnline = false; // Set how many times to retry talking to another server over FConnection. FConnectionManager.setServerSideFConnectionRetries(this.conf, LOG); // Config'ed params this.msgInterval = conf.getInt("wasp.fserver.msginterval", 3 * 1000); this.sleeper = new Sleeper(this.msgInterval, this); this.numEntityGroupsToReport = conf.getInt("wasp.fserver.numentitygroupstoreport", 10); this.rpcTimeout = conf.getInt(FConstants.WASP_RPC_TIMEOUT_KEY, FConstants.DEFAULT_WASP_RPC_TIMEOUT); this.abortRequested = false; this.stopped = false; this.actionManager = new StorageActionManager(conf); // Server to handle client requests. String hostname = Strings .domainNamePointerToHostName(DNS.getDefaultHost(conf.get("wasp.fserver.dns.interface", "default"), conf.get("wasp.fserver.dns.nameserver", "default"))); int port = conf.getInt(FConstants.FSERVER_PORT, FConstants.DEFAULT_FSERVER_PORT); // Creation of a HSA will force a resolve. InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); if (initialIsa.getAddress() == null) { throw new IllegalArgumentException("Failed resolve of " + initialIsa); } this.rpcServer = WaspRPC.getServer(FServer.class, this, new Class<?>[] { ClientProtocol.class, AdminProtocol.class, WaspRPCErrorHandler.class, OnlineEntityGroups.class }, initialIsa.getHostName(), // BindAddress is // IP we got for // this server. initialIsa.getPort(), conf); // Set our address. this.isa = this.rpcServer.getListenerAddress(); this.leases = new Leases(conf.getInt(FConstants.THREAD_WAKE_FREQUENCY, 10 * 1000)); this.startcode = System.currentTimeMillis(); int maxThreads = conf.getInt("wasp.transaction.threads.max", 150); this.pool = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new DaemonThreadFactory("thread factory")); ((ThreadPoolExecutor) this.pool).allowCoreThreadTimeOut(true); this.scannerLeaseTimeoutPeriod = conf.getInt(FConstants.WASP_CLIENT_SCANNER_TIMEOUT_PERIOD, FConstants.DEFAULT_WASP_CLIENT_SCANNER_TIMEOUT_PERIOD); this.driver = new BaseDriver(this); this.splitThread = new SplitThread(this); this.globalEntityGroup = new GlobalEntityGroup(this); }
From source file:org.apache.hadoop.hdfs.server.namenode.AvatarNode.java
/** * Initialize AvatarNode//from w ww. j a v a 2 s .c o m * @param conf the configuration */ private void initialize(Configuration conf) throws IOException { InetSocketAddress socAddr = AvatarNode.getAddress(conf); int handlerCount = conf.getInt("hdfs.avatarnode.handler.count", 3); // create rpc server // no point to deserialize job file in Avatar node. this.server = RPC.getServer(this, socAddr.getAddress().getHostAddress(), socAddr.getPort(), handlerCount, false, conf, false); // The rpc-server port can be ephemeral... ensure we have the // correct info this.serverAddress = this.server.getListenerAddress(); LOG.info("AvatarNode up at: " + this.serverAddress); this.registerMBean(); this.server.start(); }
From source file:org.ireland.jnetty.http.HttpServletRequestImpl.java
@Override // OK//ww w . ja v a 2 s. c o m public String getLocalAddr() { InetSocketAddress local = socketChannel.localAddress(); if (local == null) return ""; InetAddress address = local.getAddress(); if (address == null) return local.getHostString(); return address.getHostAddress(); }
From source file:de.dal33t.powerfolder.clientserver.ServerClient.java
/** * @param conHan// w ww . j av a 2s . c o m * @return true if the node is the primary login server for the current * account. account. */ public boolean isPrimaryServer(ConnectionHandler conHan) { if (server.getInfo().equals(conHan.getIdentity().getMemberInfo())) { return true; } if (isTempServerNode(server)) { if (server.getReconnectAddress().equals(conHan.getRemoteAddress())) { return true; } // Try check by hostname / port InetSocketAddress nodeSockAddr = conHan.getRemoteAddress(); InetSocketAddress serverSockAddr = server.getReconnectAddress(); if (nodeSockAddr == null || serverSockAddr == null) { return false; } InetAddress nodeAddr = nodeSockAddr.getAddress(); InetAddress serverAddr = serverSockAddr.getAddress(); if (nodeAddr == null || serverAddr == null) { return false; } String nodeHost = NetworkUtil.getHostAddressNoResolve(nodeAddr); String serverHost = NetworkUtil.getHostAddressNoResolve(serverAddr); int nodePort = nodeSockAddr.getPort(); int serverPort = serverSockAddr.getPort(); return nodeHost.equalsIgnoreCase(serverHost) && nodePort == serverPort; } return false; }
From source file:de.dal33t.powerfolder.clientserver.ServerClient.java
/** * @param node/*from w w w. j ava2 s . c om*/ * @return true if the node is the primary login server for the current * account. account. */ public boolean isPrimaryServer(Member node) { if (server.equals(node)) { return true; } if (isTempServerNode(server)) { if (server.getReconnectAddress().equals(node.getReconnectAddress())) { return true; } // Try check by hostname / port InetSocketAddress nodeSockAddr = node.getReconnectAddress(); InetSocketAddress serverSockAddr = server.getReconnectAddress(); if (nodeSockAddr == null || serverSockAddr == null) { return false; } InetAddress nodeAddr = nodeSockAddr.getAddress(); InetAddress serverAddr = serverSockAddr.getAddress(); if (nodeAddr == null || serverAddr == null) { return false; } String nodeHost = NetworkUtil.getHostAddressNoResolve(nodeAddr); String serverHost = NetworkUtil.getHostAddressNoResolve(serverAddr); int nodePort = nodeSockAddr.getPort(); int serverPort = serverSockAddr.getPort(); return nodeHost.equalsIgnoreCase(serverHost) && nodePort == serverPort; } return false; }
From source file:org.ireland.jnetty.http.HttpServletRequestImpl.java
@Override // OK//from w ww.j a va2s . c om public String getRemoteAddr() { InetSocketAddress remote = _remote; if (remote == null) remote = socketChannel.remoteAddress(); if (remote == null) return ""; InetAddress address = remote.getAddress(); if (address == null) return remote.getHostString(); return address.getHostAddress(); }
From source file:org.apache.hadoop.hdfs.MiniDFSCluster.java
/** * Restart a datanode, on the same port if requested * * @param dnprop//from w w w. jav a2s . c o m * the datanode to restart * @param keepPort * whether to use the same port * @return true if restarting is successful * @throws IOException */ public synchronized boolean restartDataNode(DataNodeProperties dnprop, boolean keepPort) throws IOException { Configuration conf = dnprop.conf; String[] args = dnprop.dnArgs; SecureResources secureResources = dnprop.secureResources; Configuration newconf = new HdfsConfiguration(conf); // save cloned config if (keepPort) { InetSocketAddress addr = dnprop.datanode.getXferAddress(); conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort()); conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); } DataNode newDn = DataNode.createDataNode(args, conf, secureResources); dataNodes.add(new DataNodeProperties(newDn, newconf, args, secureResources, newDn.getIpcPort())); numDataNodes++; try { //[S] figure out which thread has slowed down Thread.sleep(1000); } catch (InterruptedException ex) { Logger.getLogger(MiniDFSCluster.class.getName()).log(Level.SEVERE, null, ex); } return true; }
From source file:org.apache.hadoop.raid.RaidNode.java
private void initialize(Configuration conf) throws IOException, SAXException, InterruptedException, RaidConfigurationException, ClassNotFoundException, ParserConfigurationException, URISyntaxException, JSONException { this.startTime = RaidNode.now(); this.conf = conf; modTimePeriod = conf.getLong(RAID_MOD_TIME_PERIOD_KEY, RAID_MOD_TIME_PERIOD_DEFAULT); LOG.info("modTimePeriod: " + modTimePeriod); InetSocketAddress socAddr = RaidNode.getAddress(conf); int handlerCount = conf.getInt("fs.raidnode.handler.count", 10); addTmpJars(this.conf); // clean up temporay directory cleanUpTempDirectory(conf);//from w w w .java2s . c om // read in the configuration configMgr = new ConfigManager(conf); // create rpc server this.server = RPC.getServer(this, socAddr.getAddress().getHostAddress(), socAddr.getPort(), handlerCount, false, conf); // create checksum store if not exist RaidNode.createChecksumStore(conf, true); // create stripe store if not exist RaidNode.createStripeStore(conf, true, FileSystem.get(conf)); // The rpc-server port can be ephemeral... ensure we have the correct info this.serverAddress = this.server.getListenerAddress(); LOG.info("RaidNode up at: " + this.serverAddress); // Instantiate the metrics singleton. RaidNodeMetrics.getInstance(RaidNodeMetrics.DEFAULT_NAMESPACE_ID); this.server.start(); // start RPC server // Create a block integrity monitor and start its thread(s) this.blockIntegrityMonitor = BlockIntegrityMonitor.createBlockIntegrityMonitor(conf); boolean useBlockFixer = !conf.getBoolean(RAID_DISABLE_CORRUPT_BLOCK_FIXER_KEY, false); boolean useBlockCopier = !conf.getBoolean(RAID_DISABLE_DECOMMISSIONING_BLOCK_COPIER_KEY, true); boolean useCorruptFileCounter = !conf.getBoolean(RAID_DISABLE_CORRUPTFILE_COUNTER_KEY, false); Runnable fixer = blockIntegrityMonitor.getCorruptionMonitor(); if (useBlockFixer && (fixer != null)) { this.blockFixerThread = new Daemon(fixer); this.blockFixerThread.setName("Block Fixer"); this.blockFixerThread.start(); } Runnable copier = blockIntegrityMonitor.getDecommissioningMonitor(); if (useBlockCopier && (copier != null)) { this.blockCopierThread = new Daemon(copier); this.blockCopierThread.setName("Block Copier"); this.blockCopierThread.start(); } Runnable counter = blockIntegrityMonitor.getCorruptFileCounter(); if (useCorruptFileCounter && counter != null) { this.corruptFileCounterThread = new Daemon(counter); this.corruptFileCounterThread.setName("Corrupt File Counter"); this.corruptFileCounterThread.start(); } // start the deamon thread to fire polcies appropriately RaidNode.triggerMonitorSleepTime = conf.getLong(TRIGGER_MONITOR_SLEEP_TIME_KEY, SLEEP_TIME); RaidNode.underRedundantFilesProcessorSleepTime = conf .getLong(UNDER_REDUNDANT_FILES_PROCESSOR_SLEEP_TIME_KEY, SLEEP_TIME); this.triggerMonitor = new TriggerMonitor(); this.triggerThread = new Daemon(this.triggerMonitor); this.triggerThread.setName("Trigger Thread"); this.triggerThread.start(); this.urfProcessor = new UnderRedundantFilesProcessor(conf); this.urfThread = new Daemon(this.urfProcessor); this.urfThread.setName("UnderRedundantFilesProcessor Thread"); this.urfThread.start(); // start the thread that monitor and moves blocks this.placementMonitor = new PlacementMonitor(conf); this.placementMonitor.start(); // start the thread that deletes obsolete parity files this.purgeMonitor = new PurgeMonitor(conf, placementMonitor, this); this.purgeThread = new Daemon(purgeMonitor); this.purgeThread.setName("Purge Thread"); this.purgeThread.start(); // start the thread that creates HAR files this.harThread = new Daemon(new HarMonitor()); this.harThread.setName("HAR Thread"); this.harThread.start(); // start the thread that collects statistics this.statsCollector = new StatisticsCollector(this, configMgr, conf); this.statsCollectorThread = new Daemon(statsCollector); this.statsCollectorThread.setName("Stats Collector"); this.statsCollectorThread.start(); this.directoryTraversalShuffle = conf.getBoolean(RAID_DIRECTORYTRAVERSAL_SHUFFLE, true); this.directoryTraversalThreads = conf.getInt(RAID_DIRECTORYTRAVERSAL_THREADS, 4); startHttpServer(); this.registerMBean(); initialized = true; }