Example usage for java.net InetAddress getHostName

List of usage examples for java.net InetAddress getHostName

Introduction

In this page you can find the example usage for java.net InetAddress getHostName.

Prototype

public String getHostName() 

Source Link

Document

Gets the host name for this IP address.

Usage

From source file:org.gcaldaemon.core.http.HTTPListener.java

private final void checkAccess(Socket socket) throws IOException, Exception {
    if (hosts != null || addresses != null) {
        InetAddress inetAddress = socket.getInetAddress();
        if (hosts != null) {
            String host = inetAddress.getHostName();
            if (host == null || host.length() == 0 || host.equals("127.0.0.1")) {
                host = "localhost";
            } else {
                host = host.toLowerCase();
                if (host.equals("localhost.localdomain")) {
                    host = "localhost";
                }/* w ww.  ja  va2 s  . com*/
            }
            if (!isHostMatch(host)) {
                log.warn("Connection refused (" + host + " is a forbidden hostname)!");
                throw new Exception("forbidden hostname (" + host + ')');
            }
        }
        if (addresses != null) {
            String address = inetAddress.getHostAddress();
            if (address == null || address.length() == 0) {
                address = "127.0.0.1";
            }
            if (!isAddressMatch(address)) {
                log.warn("Connection refused (" + address + " is a forbidden address)!");
                throw new Exception("forbidden IP-address (" + address + ')');
            }
        }
    }
}

From source file:com.predic8.membrane.core.transport.ssl.SSLContext.java

public Socket createSocket(InetAddress host, int port, InetAddress addr, int localPort, int connectTimeout)
        throws IOException {
    Socket s = new Socket();
    s.bind(new InetSocketAddress(addr, localPort));
    s.connect(new InetSocketAddress(host, port), connectTimeout);
    SSLSocketFactory sslsf = sslc.getSocketFactory();
    SSLSocket ssls = (SSLSocket) sslsf.createSocket(s, host.getHostName(), port, true);
    applyCiphers(ssls);/*from  w w  w .jav a2s.c o  m*/
    if (protocols != null) {
        ssls.setEnabledProtocols(protocols);
    } else {
        String[] protocols = ssls.getEnabledProtocols();
        Set<String> set = new HashSet<String>();
        for (String protocol : protocols) {
            if (protocol.equals("SSLv3") || protocol.equals("SSLv2Hello")) {
                continue;
            }
            set.add(protocol);
        }
        ssls.setEnabledProtocols(set.toArray(new String[0]));
    }
    return ssls;
}

From source file:com.buaa.cfs.mount.RpcProgramMountd.java

@Override
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
    if (hostsMatcher == null) {
        return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null);
    }//from w ww  .ja  v a 2s.c om
    AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
    if (accessPrivilege == AccessPrivilege.NONE) {
        return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null);
    }

    String path = xdr.readString();
    if (LOG.isDebugEnabled()) {
        LOG.debug("MOUNT MNT path: " + path + " client: " + client);
    }

    String host = client.getHostName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Got host: " + host + " path: " + path);
    }
    if (!exports.contains(path)) {
        LOG.info("Path " + path + " is not shared.");
        MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
        return out;
    }

    FileHandle handle = null;
    //        try {
    //            HdfsFileStatus exFileStatus = dfsClient.getFileInfo(path);
    //
    //            handle = new FileHandle(exFileStatus.getFileId());
    //        } catch (IOException e) {
    //            LOG.error("Can't get handle for export:" + path, e);
    //            MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
    //            return out;
    //        }

    assert (handle != null);
    LOG.info("Giving handle (fileId:" + handle.getFileId() + ") to client for export " + path);
    mounts.add(new MountEntry(host, path));

    MountResponse.writeMNTResponse(Nfs3Status.NFS3_OK, out, xid, handle.getContent());
    return out;
}

From source file:org.apache.hadoop.hive.llap.LlapBaseInputFormat.java

private ServiceInstance getServiceInstanceForHost(LlapRegistryService registryService, String host)
        throws IOException {
    InetAddress address = InetAddress.getByName(host);
    ServiceInstanceSet instanceSet = registryService.getInstances();
    ServiceInstance serviceInstance = null;

    // The name used in the service registry may not match the host name we're using.
    // Try hostname/canonical hostname/host address

    String name = address.getHostName();
    LOG.info("Searching service instance by hostname " + name);
    serviceInstance = selectServiceInstance(instanceSet.getByHost(name));
    if (serviceInstance != null) {
        return serviceInstance;
    }/*from  w w  w . j  a va  2  s . c o  m*/

    name = address.getCanonicalHostName();
    LOG.info("Searching service instance by canonical hostname " + name);
    serviceInstance = selectServiceInstance(instanceSet.getByHost(name));
    if (serviceInstance != null) {
        return serviceInstance;
    }

    name = address.getHostAddress();
    LOG.info("Searching service instance by address " + name);
    serviceInstance = selectServiceInstance(instanceSet.getByHost(name));
    if (serviceInstance != null) {
        return serviceInstance;
    }

    return serviceInstance;
}

From source file:org.apache.synapse.transport.nhttp.ServerWorker.java

/**
 * Process the incoming request//from  ww  w  .j  ava 2s  . co m
 */
@SuppressWarnings({ "unchecked" })
public void run() {

    CustomLogSetter.getInstance().clearThreadLocalContent();
    conn.getContext().setAttribute(NhttpConstants.SERVER_WORKER_START_TIME, System.currentTimeMillis());
    conn.getContext().setAttribute(NhttpConstants.SERVER_WORKER_THREAD_ID, Thread.currentThread().getId());
    String method = request.getRequestLine().getMethod().toUpperCase();
    msgContext.setProperty(Constants.Configuration.HTTP_METHOD, request.getRequestLine().getMethod());

    if (NHttpConfiguration.getInstance().isHttpMethodDisabled(method)) {
        handleException("Unsupported method : " + method, null);
    }

    //String uri = request.getRequestLine().getUri();
    String oriUri = request.getRequestLine().getUri();
    String restUrlPostfix = NhttpUtil.getRestUrlPostfix(oriUri, cfgCtx.getServicePath());

    msgContext.setProperty(NhttpConstants.REST_URL_POSTFIX, restUrlPostfix);
    String servicePrefix = oriUri.substring(0, oriUri.indexOf(restUrlPostfix));
    if (servicePrefix.indexOf("://") == -1) {
        HttpInetConnection inetConn = (HttpInetConnection) conn;
        InetAddress localAddr = inetConn.getLocalAddress();
        if (localAddr != null) {
            servicePrefix = schemeName + "://" + localAddr.getHostName() + ":" + inetConn.getLocalPort()
                    + servicePrefix;
        }
    }
    msgContext.setProperty(NhttpConstants.SERVICE_PREFIX, servicePrefix);

    if ("GET".equals(method)) {
        httpGetRequestProcessor.process(request, response, msgContext, conn, os, isRestDispatching);
    } else if ("POST".equals(method)) {
        processEntityEnclosingMethod();
    } else if ("PUT".equals(method)) {
        processEntityEnclosingMethod();
    } else if ("HEAD".equals(method)) {
        processNonEntityEnclosingMethod();
    } else if ("OPTIONS".equals(method)) {
        processNonEntityEnclosingMethod();
    } else if ("DELETE".equals(method)) {
        processGetAndDelete("DELETE");
    } else if ("TRACE".equals(method)) {
        processNonEntityEnclosingMethod();
    } else if ("PATCH".equals(method)) {
        processEntityEnclosingMethod();
    } else {
        handleException("Unsupported method : " + method, null);
    }

    // here the RequestResponseTransport plays an important role when it comes to
    // dual channel invocation. This is becasue we need to ACK to the request once the request
    // is received to synapse. Otherwise we will not be able to support the single channel
    // invocation within the actual service and synapse for a dual channel request from the
    // client.
    if (isAckRequired()) {
        String respWritten = "";
        if (msgContext.getOperationContext() != null) {
            respWritten = (String) msgContext.getOperationContext().getProperty(Constants.RESPONSE_WRITTEN);
        }
        boolean respWillFollow = !Constants.VALUE_TRUE.equals(respWritten) && !"SKIP".equals(respWritten);
        boolean acked = (((RequestResponseTransport) msgContext
                .getProperty(RequestResponseTransport.TRANSPORT_CONTROL))
                        .getStatus() == RequestResponseTransport.RequestResponseTransportStatus.ACKED);
        boolean forced = msgContext.isPropertyTrue(NhttpConstants.FORCE_SC_ACCEPTED);
        boolean nioAck = msgContext.isPropertyTrue("NIO-ACK-Requested", false);

        if (respWillFollow || acked || forced || nioAck) {

            if (!nioAck) {
                if (log.isDebugEnabled()) {
                    log.debug("Sending 202 Accepted response for MessageID : " + msgContext.getMessageID()
                            + " response written : " + respWritten + " response will follow : " + respWillFollow
                            + " acked : " + acked + " forced ack : " + forced);
                }
                response.setStatusCode(HttpStatus.SC_ACCEPTED);
            } else {
                if (log.isDebugEnabled()) {
                    log.debug(
                            "Sending ACK response with status " + msgContext.getProperty(NhttpConstants.HTTP_SC)
                                    + ", for MessageID : " + msgContext.getMessageID());
                }
                response.setStatusCode(
                        Integer.parseInt(msgContext.getProperty(NhttpConstants.HTTP_SC).toString()));
                Map<String, String> responseHeaders = (Map<String, String>) msgContext
                        .getProperty(MessageContext.TRANSPORT_HEADERS);
                if (responseHeaders != null) {
                    for (String headerName : responseHeaders.keySet()) {
                        response.addHeader(headerName, responseHeaders.get(headerName));

                        String excessProp = NhttpConstants.EXCESS_TRANSPORT_HEADERS;

                        Map map = (Map) msgContext.getProperty(excessProp);
                        if (map != null) {
                            log.debug("Number of excess values for " + headerName + " header is : "
                                    + ((Collection) (map.get(headerName))).size());

                            for (Iterator iterator = map.keySet().iterator(); iterator.hasNext();) {
                                String key = (String) iterator.next();

                                for (String excessVal : (Collection<String>) map.get(key)) {
                                    response.addHeader(headerName, (String) excessVal);
                                }

                            }
                        }
                    }

                }
            }

            if (metrics != null) {
                metrics.incrementMessagesSent();
            }

            try {

                /* 
                  * Remove Content-Length and Transfer-Encoding headers, if already present.
                  * */
                response.removeHeaders(HTTP.TRANSFER_ENCODING);
                response.removeHeaders(HTTP.CONTENT_LEN);

                serverHandler.commitResponse(conn, response);

            } catch (HttpException e) {
                if (metrics != null) {
                    metrics.incrementFaultsSending();
                }
                handleException("Unexpected HTTP protocol error : " + e.getMessage(), e);
            } catch (ConnectionClosedException e) {
                if (metrics != null) {
                    metrics.incrementFaultsSending();
                }
                log.warn("Connection closed by client (Connection closed)");
            } catch (IllegalStateException e) {
                if (metrics != null) {
                    metrics.incrementFaultsSending();
                }
                log.warn("Connection closed by client (Buffer closed)");
            } catch (IOException e) {
                if (metrics != null) {
                    metrics.incrementFaultsSending();
                }
                handleException("IO Error sending response message", e);
            } catch (Exception e) {
                if (metrics != null) {
                    metrics.incrementFaultsSending();
                }
                handleException("General Error sending response message", e);
            }

            if (is != null) {
                try {
                    is.close();
                } catch (IOException ignore) {
                }
            }

            // make sure that the output stream is flushed and closed properly
            try {
                os.flush();
                os.close();
            } catch (IOException ignore) {
            }
        }
    }
}

From source file:org.apache.nifi.web.api.DataTransferResource.java

private Peer constructPeer(final HttpServletRequest req, final InputStream inputStream,
        final OutputStream outputStream, final String portId, final String transactionId) {
    String clientHostName = req.getRemoteHost();
    try {//from  ww w  . j  a v  a2 s .  c  o  m
        // req.getRemoteHost returns IP address, try to resolve hostname to be consistent with RAW protocol.
        final InetAddress clientAddress = InetAddress.getByName(clientHostName);
        clientHostName = clientAddress.getHostName();
    } catch (UnknownHostException e) {
        logger.info("Failed to resolve client hostname {}, due to {}", clientHostName, e.getMessage());
    }
    final int clientPort = req.getRemotePort();

    final PeerDescription peerDescription = new PeerDescription(clientHostName, clientPort, req.isSecure());

    final NiFiUser user = NiFiUserUtils.getNiFiUser();
    final String userDn = user == null ? null : user.getIdentity();
    final HttpServerCommunicationsSession commSession = new HttpServerCommunicationsSession(inputStream,
            outputStream, transactionId, userDn);

    boolean useCompression = false;
    final String useCompressionStr = req.getHeader(HANDSHAKE_PROPERTY_USE_COMPRESSION);
    if (!isEmpty(useCompressionStr) && Boolean.valueOf(useCompressionStr)) {
        useCompression = true;
    }

    final String requestExpiration = req.getHeader(HANDSHAKE_PROPERTY_REQUEST_EXPIRATION);
    final String batchCount = req.getHeader(HANDSHAKE_PROPERTY_BATCH_COUNT);
    final String batchSize = req.getHeader(HANDSHAKE_PROPERTY_BATCH_SIZE);
    final String batchDuration = req.getHeader(HANDSHAKE_PROPERTY_BATCH_DURATION);

    commSession.putHandshakeParam(HandshakeProperty.PORT_IDENTIFIER, portId);
    commSession.putHandshakeParam(HandshakeProperty.GZIP, String.valueOf(useCompression));

    if (!isEmpty(requestExpiration)) {
        commSession.putHandshakeParam(REQUEST_EXPIRATION_MILLIS, requestExpiration);
    }
    if (!isEmpty(batchCount)) {
        commSession.putHandshakeParam(BATCH_COUNT, batchCount);
    }
    if (!isEmpty(batchSize)) {
        commSession.putHandshakeParam(BATCH_SIZE, batchSize);
    }
    if (!isEmpty(batchDuration)) {
        commSession.putHandshakeParam(BATCH_DURATION, batchDuration);
    }

    if (peerDescription.isSecure()) {
        final NiFiUser nifiUser = NiFiUserUtils.getNiFiUser();
        logger.debug("initiating peer, nifiUser={}", nifiUser);
        commSession.setUserDn(nifiUser.getIdentity());
    }

    // TODO: Followed how SocketRemoteSiteListener define peerUrl and clusterUrl, but it can be more meaningful values, especially for clusterUrl.
    final String peerUrl = "nifi://" + clientHostName + ":" + clientPort;
    final String clusterUrl = "nifi://localhost:" + req.getLocalPort();

    return new Peer(peerDescription, commSession, peerUrl, clusterUrl);
}

From source file:org.apache.whirr.service.hbase.HBaseRegionServerClusterActionHandler.java

@Override
protected void beforeConfigure(ClusterActionEvent event) throws IOException, InterruptedException {
    ClusterSpec clusterSpec = event.getClusterSpec();
    Cluster cluster = event.getCluster();
    Configuration conf = getConfiguration(clusterSpec);

    Instance instance = cluster.getInstanceMatching(role(HBaseMasterClusterActionHandler.ROLE));
    InetAddress masterPublicAddress = instance.getPublicAddress();

    event.getFirewallManager()//w  w  w  .  jav  a 2  s  .c  om
            .addRules(Rule.create().destination(instance).ports(REGIONSERVER_WEB_UI_PORT, REGIONSERVER_PORT));

    try {
        event.getStatementBuilder().addStatements(buildHBaseSite("/tmp/hbase-site.xml", clusterSpec, cluster),
                buildHBaseEnv("/tmp/hbase-env.sh", clusterSpec, cluster),
                TemplateUtils.createFileFromTemplate("/tmp/hbase-hadoop-metrics.properties",
                        event.getTemplateEngine(), getMetricsTemplate(event, clusterSpec, cluster), clusterSpec,
                        cluster));
    } catch (ConfigurationException e) {
        throw new IOException(e);
    }

    String master = masterPublicAddress.getHostName();
    String quorum = ZooKeeperCluster.getHosts(cluster);

    String tarurl = prepareRemoteFileUrl(event, conf.getString(HBaseConstants.KEY_TARBALL_URL));

    addStatement(event, call(getConfigureFunction(conf), ROLE, HBaseConstants.PARAM_MASTER, master,
            HBaseConstants.PARAM_QUORUM, quorum, HBaseConstants.PARAM_TARBALL_URL, tarurl));
}

From source file:org.apache.hadoop.mapreduce.JobSubmitter.java

/**
 * Internal method for submitting jobs to the system.
 * /*from w ww .ja v  a 2 s  . c om*/
 * <p>The job submission process involves:
 * <ol>
 *   <li>
 *   Checking the input and output specifications of the job.
 *   </li>
 *   <li>
 *   Computing the {@link InputSplit}s for the job.
 *   </li>
 *   <li>
 *   Setup the requisite accounting information for the 
 *   {@link DistributedCache} of the job, if necessary.
 *   </li>
 *   <li>
 *   Copying the job's jar and configuration to the map-reduce system
 *   directory on the distributed file-system. 
 *   </li>
 *   <li>
 *   Submitting the job to the <code>JobTracker</code> and optionally
 *   monitoring it's status.
 *   </li>
 * </ol></p>
 * @param job the configuration to submit
 * @param cluster the handle to the Cluster
 * @throws ClassNotFoundException
 * @throws InterruptedException
 * @throws IOException
 */
JobStatus submitJobInternal(Job job, Cluster cluster)
        throws ClassNotFoundException, InterruptedException, IOException {

    //validate the jobs output specs 
    checkSpecs(job);

    Configuration conf = job.getConfiguration();
    addMRFrameworkToDistributedCache(conf);

    Path jobStagingArea = JobSubmissionFiles.getStagingDir(cluster, conf);
    //configure the command line options correctly on the submitting dfs
    InetAddress ip = InetAddress.getLocalHost();
    if (ip != null) {
        submitHostAddress = ip.getHostAddress();
        submitHostName = ip.getHostName();
        conf.set(MRJobConfig.JOB_SUBMITHOST, submitHostName);
        conf.set(MRJobConfig.JOB_SUBMITHOSTADDR, submitHostAddress);
    }
    JobID jobId = submitClient.getNewJobID();
    job.setJobID(jobId);
    Path submitJobDir = new Path(jobStagingArea, jobId.toString());
    JobStatus status = null;
    try {
        conf.set(MRJobConfig.USER_NAME, UserGroupInformation.getCurrentUser().getShortUserName());
        conf.set("hadoop.http.filter.initializers",
                "org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer");
        conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, submitJobDir.toString());
        LOG.debug("Configuring job " + jobId + " with " + submitJobDir + " as the submit dir");
        // get delegation token for the dir
        TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { submitJobDir }, conf);

        populateTokenCache(conf, job.getCredentials());

        // generate a secret to authenticate shuffle transfers
        if (TokenCache.getShuffleSecretKey(job.getCredentials()) == null) {
            KeyGenerator keyGen;
            try {

                int keyLen = CryptoUtils.isShuffleEncrypted(conf)
                        ? conf.getInt(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS,
                                MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS)
                        : SHUFFLE_KEY_LENGTH;
                keyGen = KeyGenerator.getInstance(SHUFFLE_KEYGEN_ALGORITHM);
                keyGen.init(keyLen);
            } catch (NoSuchAlgorithmException e) {
                throw new IOException("Error generating shuffle secret key", e);
            }
            SecretKey shuffleKey = keyGen.generateKey();
            TokenCache.setShuffleSecretKey(shuffleKey.getEncoded(), job.getCredentials());
        }

        copyAndConfigureFiles(job, submitJobDir);

        Path submitJobFile = JobSubmissionFiles.getJobConfPath(submitJobDir);

        // Create the splits for the job
        LOG.debug("Creating splits at " + jtFs.makeQualified(submitJobDir));
        int maps = writeSplits(job, submitJobDir);
        conf.setInt(MRJobConfig.NUM_MAPS, maps);
        LOG.info("number of splits:" + maps);

        // write "queue admins of the queue to which job is being submitted"
        // to job file.
        String queue = conf.get(MRJobConfig.QUEUE_NAME, JobConf.DEFAULT_QUEUE_NAME);
        AccessControlList acl = submitClient.getQueueAdmins(queue);
        conf.set(toFullPropertyName(queue, QueueACL.ADMINISTER_JOBS.getAclName()), acl.getAclString());

        // removing jobtoken referrals before copying the jobconf to HDFS
        // as the tasks don't need this setting, actually they may break
        // because of it if present as the referral will point to a
        // different job.
        TokenCache.cleanUpTokenReferral(conf);

        if (conf.getBoolean(MRJobConfig.JOB_TOKEN_TRACKING_IDS_ENABLED,
                MRJobConfig.DEFAULT_JOB_TOKEN_TRACKING_IDS_ENABLED)) {
            // Add HDFS tracking ids
            ArrayList<String> trackingIds = new ArrayList<String>();
            for (Token<? extends TokenIdentifier> t : job.getCredentials().getAllTokens()) {
                trackingIds.add(t.decodeIdentifier().getTrackingId());
            }
            conf.setStrings(MRJobConfig.JOB_TOKEN_TRACKING_IDS,
                    trackingIds.toArray(new String[trackingIds.size()]));
        }

        // Set reservation info if it exists
        ReservationId reservationId = job.getReservationId();
        if (reservationId != null) {
            conf.set(MRJobConfig.RESERVATION_ID, reservationId.toString());
        }

        // Write job file to submit dir
        writeConf(conf, submitJobFile);
        Limits.reset(conf);

        //
        // Now, actually submit the job (using the submit name)
        //
        printTokens(jobId, job.getCredentials());
        status = submitClient.submitJob(jobId, submitJobDir.toString(), job.getCredentials());
        if (status != null) {
            return status;
        } else {
            throw new IOException("Could not launch job");
        }
    } finally {
        if (status == null) {
            LOG.info("Cleaning up the staging area " + submitJobDir);
            if (jtFs != null && submitJobDir != null)
                jtFs.delete(submitJobDir, true);

        }
    }
}

From source file:org.apache.whirr.service.hbase.HBaseMasterClusterActionHandler.java

@Override
protected void beforeConfigure(ClusterActionEvent event) throws IOException, InterruptedException {
    ClusterSpec clusterSpec = event.getClusterSpec();
    Cluster cluster = event.getCluster();
    Configuration conf = getConfiguration(clusterSpec);

    LOG.info("Authorizing firewall");
    Instance instance = cluster.getInstanceMatching(role(ROLE));
    InetAddress masterPublicAddress = instance.getPublicAddress();

    event.getFirewallManager()//www  .  ja v a  2s .  c om
            .addRules(Rule.create().destination(instance).ports(MASTER_WEB_UI_PORT, MASTER_PORT));

    try {
        event.getStatementBuilder().addStatements(buildHBaseSite("/tmp/hbase-site.xml", clusterSpec, cluster),
                buildHBaseEnv("/tmp/hbase-env.sh", clusterSpec, cluster),
                TemplateUtils.createFileFromTemplate("/tmp/hbase-hadoop-metrics.properties",
                        event.getTemplateEngine(), getMetricsTemplate(event, clusterSpec, cluster), clusterSpec,
                        cluster));

    } catch (ConfigurationException e) {
        throw new IOException(e);
    }

    String master = masterPublicAddress.getHostName();
    String quorum = ZooKeeperCluster.getHosts(cluster);

    String tarurl = prepareRemoteFileUrl(event, conf.getString(HBaseConstants.KEY_TARBALL_URL));

    addStatement(event, call(getConfigureFunction(conf), ROLE, HBaseConstants.PARAM_MASTER, master,
            HBaseConstants.PARAM_QUORUM, quorum, HBaseConstants.PARAM_TARBALL_URL, tarurl));
}

From source file:edu.ku.brc.specify.toycode.RegPivot.java

/**
 * @param tblName// w  w w  .  j  a v a2s .c o m
 * @param keyName
 */
public void fillCountryCity(final String tblName, final String keyName) {
    Statement stmt = null;
    try {
        stmt = connection.createStatement();

        BasicSQLUtils.setDBConnection(connection);

        HTTPGetter httpGetter = new HTTPGetter();

        String sql = String.format("SELECT %s, IP, Lookup, Country, City FROM %s WHERE Country IS NULL",
                keyName, tblName);
        PreparedStatement pStmt = connection.prepareStatement(
                String.format("UPDATE %s SET lookup=?, Country=?, City=? WHERE %s = ?", tblName, keyName));

        HashMap<String, String> ipHash = new HashMap<String, String>();
        HashMap<String, Pair<String, String>> ccHash = new HashMap<String, Pair<String, String>>();
        ResultSet rs = stmt.executeQuery(sql);
        while (rs.next()) {
            int regId = rs.getInt(1);
            String ip = rs.getString(2);
            String lookup = rs.getString(3);
            String country = rs.getString(4);
            String city = rs.getString(5);

            boolean allEmpty = StringUtils.isEmpty(lookup) && StringUtils.isEmpty(country)
                    && StringUtils.isEmpty(city);

            String hostName = ipHash.get(ip);

            if (allEmpty && hostName == null) {
                String rvStr = new String(
                        httpGetter.doHTTPRequest("http://api.hostip.info/get_html.php?ip=" + ip));
                country = parse(rvStr, "Country:");
                city = parse(rvStr, "City:");
                System.out.println(rvStr + "[" + country + "][" + city + "]");

                try {
                    InetAddress addr = InetAddress.getByName(ip);
                    hostName = addr.getHostName();
                    ipHash.put(ip, hostName);
                    ccHash.put(ip, new Pair<String, String>(country, city));

                } catch (UnknownHostException e) {
                    e.printStackTrace();
                }
            } else {
                Pair<String, String> p = ccHash.get(ip);
                if (p != null) {
                    country = p.first;
                    city = p.second;
                }
            }

            pStmt.setString(1, hostName);
            pStmt.setString(2, country);
            pStmt.setString(3, city);
            pStmt.setInt(4, regId);
            pStmt.executeUpdate();
        }
        pStmt.close();

        stmt.close();
        colDBConn.close();

    } catch (Exception ex) {
        ex.printStackTrace();
    }

    System.out.println("Done.");
}