Example usage for java.net InetAddress getAllByName

List of usage examples for java.net InetAddress getAllByName

Introduction

In this page you can find the example usage for java.net InetAddress getAllByName.

Prototype

public static InetAddress[] getAllByName(String host) throws UnknownHostException 

Source Link

Document

Given the name of a host, returns an array of its IP addresses, based on the configured name service on the system.

Usage

From source file:org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter.java

protected Set<String> getProxyAddresses() throws ServletException {
    long now = System.currentTimeMillis();
    synchronized (this) {
        if (proxyAddresses == null || (lastUpdate + updateInterval) >= now) {
            proxyAddresses = new HashSet<String>();
            for (String proxyHost : proxyHosts) {
                try {
                    for (InetAddress add : InetAddress.getAllByName(proxyHost)) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("proxy address is: " + add.getHostAddress());
                        }/*from   w  w w. j a  v  a  2 s  .c o  m*/
                        proxyAddresses.add(add.getHostAddress());
                    }
                    lastUpdate = now;
                } catch (UnknownHostException e) {
                    LOG.warn("Could not locate " + proxyHost + " - skipping", e);
                }
            }
            if (proxyAddresses.isEmpty()) {
                throw new ServletException("Could not locate any of the proxy hosts");
            }
        }
        return proxyAddresses;
    }
}

From source file:org.globus.ftp.vanilla.FTPControlChannel.java

/**
 * opens the connection and returns after it is ready for communication.
 * Before returning, it intercepts the initial server reply(-ies),
 * and not positive, throws UnexpectedReplyCodeException.
 * After returning, there should be no more queued replies on the line.
 *
 * Here's the sequence for connection establishment (rfc959):
 * <PRE>//from  w  w  w  . ja va 2s  . c o m
 *     120
 *         220
 *     220
 *     421
 *</PRE>
 * @throws IOException on I/O error
 * @throws ServerException on negative or faulty server reply
 **/
public void open() throws IOException, ServerException {

    if (hasBeenOpened()) {
        throw new IOException("Attempt to open an already opened connection");
    }

    InetAddress allIPs[];

    //depending on constructor used, we may already have streams
    if (!haveStreams()) {
        boolean found = false;
        int i = 0;
        boolean firstPass = true;

        allIPs = InetAddress.getAllByName(host);

        while (!found) {
            try {
                logger.debug("opening control channel to " + allIPs[i] + " : " + port);
                InetSocketAddress isa = new InetSocketAddress(allIPs[i], port);

                socket = new Socket();
                socket.setSoTimeout(CoGProperties.getDefault().getSocketTimeout());
                socket.connect(isa, CoGProperties.getDefault().getSocketTimeout());
                found = true;
            } catch (IOException ioEx) {
                logger.debug("failed connecting to  " + allIPs[i] + " : " + port + ":" + ioEx);
                i++;
                if (i == allIPs.length) {
                    if (firstPass) {
                        firstPass = false;
                        i = 0;
                    } else {
                        throw ioEx;
                    }
                }
            }
        }

        String pv = System.getProperty("org.globus.ftp.IPNAME");
        if (pv != null) {
            host = socket.getInetAddress().getHostAddress();
        } else {
            host = socket.getInetAddress().getCanonicalHostName();
        }

        setInputStream(socket.getInputStream());
        setOutputStream(socket.getOutputStream());
    }

    readInitialReplies();

    hasBeenOpened = true;
}

From source file:edu.uci.ics.hyracks.imru.jobgen.ClusterConfig.java

/**
 * Set location constraints for an operator based on the locations of input
 * files in HDFS. Randomly assigns partitions to NCs where the HDFS files
 * are local; assigns the rest randomly.
 * /*w ww  . ja v  a2s .  c  o  m*/
 * @param spec
 *            A job specification.
 * @param operator
 *            The operator that will be constrained.
 * @param splits
 *            A list of InputSplits specifying files in HDFS.
 * @param random
 *            A source of randomness (so the partition-assignment can be
 *            repeated across iterations, provided that the HDFS file
 *            locations don't change).
 * @return The assigned partition locations.
 * @throws IOException
 * @throws HyracksException
 */
public static String[] setLocationConstraint(JobSpecification spec, IMRUOperatorDescriptor operator,
        InputSplit[] hdfsSplits, IMRUFileSplit[] splits, Random random) throws IOException {
    if (NCs == null)
        loadClusterConfig();
    if (splits.length == 0)
        return new String[0];

    if (hdfsSplits == null) {
        int partitionCount = splits.length;
        String[] partitionLocations = new String[partitionCount];
        for (int partition = 0; partition < partitionCount; partition++) {
            int pos = partition % NCs.length;
            String path = splits[partition].getPath();
            int t = path.indexOf(":");
            if (t > 0)
                partitionLocations[partition] = path.substring(0, t);
            else
                partitionLocations[partition] = NCs[pos];
        }
        if (operator != null) {
            PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, operator, partitionLocations);
            PartitionConstraintHelper.addPartitionCountConstraint(spec, operator, partitionCount);
        }
        return partitionLocations;
    }
    int partitionCount = splits.length;
    String[] partitionLocations = new String[partitionCount];
    int localAssignments = 0;
    int nonlocalAssignments = 0;
    for (int partition = 0; partition < partitionCount; partition++) {
        String[] localHosts = hdfsSplits[partition].getLocations();
        // Remove nondeterminism from the call to getLocations():
        Collections.sort(Arrays.asList(localHosts));
        Collections.shuffle(Arrays.asList(localHosts), random);
        if (localHosts.length > 0) {
            LOG.info("Partition " + partition + " is local at " + localHosts.length + " hosts: "
                    + StringUtils.join(localHosts, ", "));
            for (int host = 0; host < localHosts.length; host++) {
                InetAddress[] hostIps = InetAddress.getAllByName(localHosts[host]);
                for (InetAddress ip : hostIps) {
                    if (ipToNcMapping.get(ip.getHostAddress()) != null) {
                        List<String> ncs = ipToNcMapping.get(ip.getHostAddress());
                        int pos = random.nextInt(ncs.size());
                        partitionLocations[partition] = ncs.get(pos);
                        LOG.info("Partition " + partition + " assigned to " + ncs.get(pos)
                                + ", where it is local.");
                        localAssignments++;
                        break;
                    }
                }
                if (partitionLocations[partition] != null) {
                    break;
                }
            }
            if (partitionLocations[partition] == null) {
                int pos = random.nextInt(NCs.length);
                partitionLocations[partition] = NCs[pos];
                nonlocalAssignments++;
                LOG.info("Partition " + partition + " assigned to " + NCs[pos]
                        + " because there is no NC where it is local.");
            }
        } else {
            int pos = random.nextInt(NCs.length);
            partitionLocations[partition] = NCs[pos];
            nonlocalAssignments++;
            LOG.info("Partition " + partition + " assigned to " + NCs[pos]
                    + " becasue getLocations() returned no locations.");

        }
    }
    if (LOG.isLoggable(Level.INFO)) {
        LOG.info("NC partition counts:");
        Map<String, MutableInt> ncPartitionCounts = new HashMap<String, MutableInt>();
        for (int i = 0; i < partitionLocations.length; i++) {
            if (ncPartitionCounts.get(partitionLocations[i]) == null) {
                ncPartitionCounts.put(partitionLocations[i], new MutableInt(1));
            } else {
                ncPartitionCounts.get(partitionLocations[i]).increment();
            }
        }
        for (Map.Entry<String, MutableInt> entry : ncPartitionCounts.entrySet()) {
            LOG.info(entry.getKey() + ": " + entry.getValue().intValue() + " partitions");
        }
    }
    double localityPercentage = ((1.0 * localAssignments) / (localAssignments + nonlocalAssignments)) * 100;
    if (operator != null) {
        LOG.info(operator.getClass().getSimpleName() + ": " + localAssignments + " local; "
                + nonlocalAssignments + " non-local; " + localityPercentage + "% locality");
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, operator, partitionLocations);
        PartitionConstraintHelper.addPartitionCountConstraint(spec, operator, partitionCount);
    }
    return partitionLocations;
}

From source file:org.lobzik.home_sapiens.pi.AppData.java

public static List<String> getHTTPEndPoints() throws Exception { //tomcat-specific
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    Set<ObjectName> objs = mbs.queryNames(new ObjectName("*:type=Connector,*"),
            Query.match(Query.attr("protocol"), Query.value("HTTP/1.1")));
    String hostname = InetAddress.getLocalHost().getHostName();
    InetAddress[] addresses = InetAddress.getAllByName(hostname);
    ArrayList<String> endPoints = new ArrayList<String>();
    for (Iterator<ObjectName> i = objs.iterator(); i.hasNext();) {
        ObjectName obj = i.next();
        String scheme = mbs.getAttribute(obj, "scheme").toString();
        if (!scheme.toLowerCase().equals("http")) {
            continue;
        }/*from   w w  w. j  a  v  a 2s. c o  m*/
        String port = obj.getKeyProperty("port");
        for (InetAddress addr : addresses) {
            String host = addr.getHostAddress();
            String ep = scheme + "://" + host + ":" + port;
            endPoints.add(ep);
        }
    }
    return endPoints;
}

From source file:com.scaleoutsoftware.soss.hserver.JobScheduler.java

/**
 * Returns the SOSS host IPs which correspond to the locations for the given split.
 *
 * @param split                    split to locate
 * @param sossHostAdresses         list of available SOSS hosts
 * @param additionalSplitLocations additional locations for that split, can be null
 * @return list of split location, or empty list if none found
 *//* w w  w.  java2 s  .co m*/
private List<InetAddress> getSossLocations(Object split, List<InetAddress> sossHostAdresses,
        String[] additionalSplitLocations) {
    List<InetAddress> splitLocations = new ArrayList<InetAddress>();

    try {
        //If GridSplit, just find and return its home location
        if (split instanceof GridSplit) {
            InetAddress location = ((GridSplit) split).getLocation();
            if (location != null && sossHostAdresses.contains(location)) {
                splitLocations.add(location);
                return splitLocations;
            }
        }

        //Parse locations in the split object
        String[] locations = getSplitLocations(split);
        if (locations != null) {
            for (String location : locations) {
                try {
                    splitLocations.addAll(Arrays.asList(InetAddress.getAllByName(location)));
                } catch (UnknownHostException e) {
                    //Do nothing, must be a bad location
                }
            }
        }

        //Add additional locations, passed separate from the split
        if (additionalSplitLocations != null) {
            for (String location : additionalSplitLocations) {
                try {
                    splitLocations.addAll(Arrays.asList(InetAddress.getAllByName(location)));
                } catch (UnknownHostException e) {
                    //Do nothing, must be a bad location
                }
            }
        }

        //Remove locations which are not SOSS locations
        Iterator<InetAddress> iterator = splitLocations.iterator();
        while (iterator.hasNext()) {
            if (!sossHostAdresses.contains(iterator.next())) {
                iterator.remove();
            }
        }

    } catch (InterruptedException e) {
        //Do nothing, split will be assigned to the random location
    } catch (IOException e) {
        //Do nothing, split will be assigned to the random location
    }

    return splitLocations;
}

From source file:org.apache.hadoop.hbase.TestIPv6NIOServerSocketChannel.java

/**
 * Checks whether we are running with java.net.preferIPv4Stack=true
 *//*from  w w  w .ja  v  a  2  s  .c o  m*/
public void ensurePreferIPv4() throws IOException {
    InetAddress[] addrs = InetAddress.getAllByName("localhost");
    for (InetAddress addr : addrs) {
        LOG.info("resolved localhost as:" + addr);
        Assert.assertEquals(4, addr.getAddress().length); //ensure 4 byte ipv4 address
    }
}

From source file:org.opennms.core.test.kafka.JUnitKafkaServer.java

private static String getLocalhost() {
    String address = "localhost";
    try {//from   w  w  w.  j a  va 2s  . c  om
        // This is a workaround for people using OS X Lion.  On Lion when a process tries to connect to a link-local
        // address it takes 5 seconds to establish the connection for some reason.  So instead of using 'localhost'
        // which could return the link-local address randomly, we'll manually resolve it and look for an address to
        // return that isn't link-local.  If for some reason we can't find an address that isn't link-local then
        // we'll fall back to the default lof just looking up 'localhost'.
        for (InetAddress a : InetAddress.getAllByName("localhost")) {
            if (!a.isLinkLocalAddress()) {
                address = a.getHostAddress();
                break;
            }
        }
    } catch (UnknownHostException e) {
        // Something went wrong, just default to the existing approach of using 'localhost'.
    }
    return address;
}

From source file:com.aerospike.hadoop.mapreduce.AerospikeInputFormat.java

private List<Host> getAliases(Host host) {
    InetAddress[] addresses;//from  w w  w .jav a 2s . co m

    try {
        addresses = InetAddress.getAllByName(host.name);
    } catch (UnknownHostException uhe) {
        throw new AerospikeException.Connection("Invalid host: " + host);
    }

    if (addresses.length == 0) {
        throw new AerospikeException.Connection("Failed to find addresses for " + host);
    }

    // Add capacity for current address aliases plus IPV6 address and hostname.
    List<Host> aliases = new ArrayList<Host>(addresses.length + 2);

    for (InetAddress address : addresses) {
        aliases.add(new Host(address.getHostAddress(), host.tlsName, host.port));
    }

    return aliases;
}

From source file:org.apache.hadoop.hbase.TestIPv6NIOServerSocketChannel.java

/**
 * Tests whether every InetAddress we obtain by resolving can open a
 * ServerSocketChannel./*from  w  w  w .  j  a v  a2  s  .c  o  m*/
 */
@Test
public void testServerSocketFromLocalhostResolution() throws IOException {
    InetAddress[] addrs = InetAddress.getAllByName("localhost");
    for (InetAddress addr : addrs) {
        LOG.info("resolved localhost as:" + addr);
        bindServerSocket(addr);
        bindNIOServerSocket(addr);
    }
}

From source file:net.grinder.util.NetworkUtil.java

/**
 * Get the IP addresses from host name.//from  w w  w .j av  a 2s  . c  o m
 * 
 * @param host
 *            host
 * @return {@link InetAddress} array
 */
public static InetAddress[] getIpsFromHost(String host) {
    try {
        return InetAddress.getAllByName(host);
    } catch (UnknownHostException e) {
        LOGGER.error("Error while get localhost name for {}", host, e);
        return new InetAddress[] {};
    }
}