List of usage examples for java.net InetAddress toString
public String toString()
From source file:orca.registry.DatabaseOperations.java
License:asdf
/** * insert version for inserting the actors and their properties * @param act_name/*from www .j a v a2 s. c om*/ * @param act_type * @param act_guid * @param act_desc * @param act_soapaxis2url * @param act_class * @param act_mapper_class * @param act_pubkey * @param act_cert64 */ public String insert(String act_name, String act_type, String act_guid, String act_desc, String act_soapaxis2url, String act_class, String act_mapper_class, String act_pubkey, String act_cert64) { if ((act_name == null) || (act_type == null) || (act_guid == null) || (act_soapaxis2url == null) || (act_class == null) || (act_mapper_class == null) || (act_pubkey == null) || (act_cert64 == null)) return "STATUS: ERROR; invalid insert parameters"; if (act_desc == null) { act_desc = "No description"; } log.debug( "Inside DatabaseOperations: insert() - inserting actors and their properties for guid " + act_guid); String status = STATUS_SUCCESS; Connection conn = null; // check for name duplicate if (checkNameDuplicate(act_name, act_guid)) { log.error("This registration is invalid, actor " + act_name + "/" + act_guid + " will not be allowed to register"); return "STATUS: ERROR; duplicate actor name detected"; } try { // Query the Actors table to find out if act_guid already present // If act_guid already present, check if the ip address of the client // matches the IP address returned by InetAddress.getByName(act_soapaxis2url - the extracted portion of soapaxis2url) // If it matches, execute an 'Update' command for that row, OR, delete that row and insert this new row // Set new timestamp for that row String clientIP = RegistryServlet.getClientIpAddress(); //System.out.println("clientIP = " + clientIP); log.debug("DatabaseOperations: insert() - clientIP = " + clientIP); if (clientIP == null) { //System.out.println("Can't get IP address of client; Insert failed"); log.error("DatabaseOperations: insert() - Can't get IP address of client; Insert failed"); return "STATUS: ERROR; Can't get IP address of client; Insert failed"; } String[] splitSoapUrl = act_soapaxis2url.split("//"); String noHttp = splitSoapUrl[1]; String[] splitNoHttp = noHttp.split(":"); String ipSoapUrl = splitNoHttp[0]; //System.out.println("ip in SoapUrl = " + ipSoapUrl); log.debug("DatabaseOperations: insert() - ip in SoapUrl = " + ipSoapUrl); String humanReadableIP = null; String numericIP = null; try { InetAddress address = InetAddress.getByName(ipSoapUrl); //System.out.println("humanreadable IP/numeric IP = " + address.toString()); //log.debug("humanreadable IP/numeric IP = " + address.toString()); String[] splitResultGetByName = address.toString().split("/"); humanReadableIP = splitResultGetByName[0]; numericIP = splitResultGetByName[1]; } catch (UnknownHostException ex) { log.error("Error converting IP address: " + ex.toString()); return "STATUS: ERROR; Exception encountered"; } boolean insertEntry = false; String act_production_deployment = FALSE_STRING; if (clientIP.equalsIgnoreCase(numericIP)) { insertEntry = true; act_production_deployment = TRUE_STRING; if (ipSoapUrl.equalsIgnoreCase("localhost") || ipSoapUrl.equalsIgnoreCase("127.0.0.1")) { // Special check: if the soapaxis url is localhost (implying test deployment) set production deployment as false // REMINDER: set to FALSE_STRING before deploying otherwise localhost actors will be considered valid act_production_deployment = FALSE_STRING; } } else { log.error( "Can't verify the identity of the client; client IP doesn't match with IP in SOAP-Axis URL of the Actor; It is also not a test deployment. INSERT Failed !!!"); return "STATUS: ERROR; Can't verify the identity of the client; client IP doesn't match with IP in SOAP-Axis URL of the Actor;"; } boolean actorExists = checkExistingGuid(act_guid); //System.out.println("Trying to get a new instance"); log.debug("Inside DatabaseOperations: insert() - Trying to get a new instance"); Class.forName("com.mysql.jdbc.Driver").newInstance(); //System.out.println("Trying to get a database connection"); log.debug("Inside DatabaseOperations: insert() - Trying to get a database connection"); conn = DriverManager.getConnection(url, userName, password); //System.out.println ("Database connection established"); log.debug("Inside DatabaseOperations: insert() - Database connection established"); Calendar cal = Calendar.getInstance(); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); String act_last_update = sdf.format(cal.getTime()); if (insertEntry) { // valid client trying to insert new entry or trying to update an existing entry if (!actorExists) { // New actor PreparedStatement pStat = conn.prepareStatement( "INSERT into `Actors` ( `act_name` , `act_guid` , `act_type`, `act_desc`, `act_soapaxis2url`, `act_class`, `act_mapper_class`, `act_pubkey`, `act_cert64`, `act_production_deployment`, `act_last_update`, `act_verified`) values " + "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); pStat.setString(1, act_name); pStat.setString(2, act_guid); pStat.setString(3, act_type); pStat.setString(4, act_desc); pStat.setString(5, act_soapaxis2url); pStat.setString(6, act_class); pStat.setString(7, act_mapper_class); pStat.setString(8, act_pubkey); pStat.setString(9, act_cert64); pStat.setString(10, act_production_deployment); pStat.setString(11, act_last_update); pStat.setString(12, FALSE_STRING); pStat.execute(); pStat.close(); } else { // Existing actor // get ALL known entries Map<String, String> res = queryMapForGuid(act_guid, false); // update if necessary: only location and description can be updated boolean needUpdate = false; if (!res.get(ActorLocation).equals(act_soapaxis2url)) needUpdate = true; if (res.get(ActorDesc) == null) { if (act_desc != null) needUpdate = true; } else if (!res.get(ActorDesc).equals(act_desc)) needUpdate = true; if (needUpdate) { log.debug("Updating description or location for actor " + act_guid); PreparedStatement pStat = conn.prepareStatement( "UPDATE Actors SET act_soapaxis2url = ?, act_desc = ?, act_last_update = ? WHERE act_guid = ?"); pStat.setString(1, act_soapaxis2url); pStat.setString(2, act_desc); pStat.setString(3, act_last_update); pStat.setString(4, act_guid); pStat.execute(); pStat.close(); } else { // if any other mismatch - return error if (!res.get(ActorName).equals(act_name) || !res.get(ActorClazz).equals(act_class) || !res.get(ActorMapperclass).equals(act_mapper_class) || !res.get(ActorPubkey).equals(act_pubkey) || !res.get(ActorCert64).equals(act_cert64)) { status = "STATUS: ERROR; Mimatch to previous registration for this guid. Please change the guid and generate new certificate;"; log.error("Error inserting information for actor " + act_name + ":" + act_guid + " due to mismatch to previous registration"); } else { // otherwise simply insert heartbeat for this guid insertHeartbeat(act_guid); } } } } } catch (Exception e) { //System.err.println ("Error inserting into Actors table"); log.error("DatabaseOperations: insert() - Error inserting into Actors table: " + e.toString()); status = "STATUS: ERROR; Exception encountered during insert"; } finally { if (conn != null) { try { conn.close(); //System.out.println ("Database connection terminated"); log.debug("Database connection terminated"); } catch (Exception e) { /* ignore close errors */ } } } return status; }
From source file:com.pivotal.gemfire.tools.pulse.internal.data.JMXDataUpdater.java
private JMXConnector getJMXConnection() { JMXConnector connection = null; // Reference to repository Repository repository = Repository.get(); try {//from w w w . j a v a 2 s .co m String jmxSerURL = ""; if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_USE_LOCATOR_VALUE") + ":" + repository.getJmxUseLocator()); } if (repository.getJmxUseLocator()) { String locatorHost = repository.getJmxHost(); int locatorPort = Integer.parseInt(repository.getJmxPort()); if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_HOST") + " : " + locatorHost + " & " + resourceBundle.getString("LOG_MSG_PORT") + " : " + locatorPort); } InetAddress inetAddr = InetAddress.getByName(locatorHost); if ((inetAddr instanceof Inet4Address) || (inetAddr instanceof Inet6Address)) { if (inetAddr instanceof Inet4Address) { // Locator has IPv4 Address if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_LOCATOR_IPV4_ADDRESS") + " - " + inetAddr.toString()); } } else { // Locator has IPv6 Address if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_LOCATOR_IPV6_ADDRESS") + " - " + inetAddr.toString()); } } JmxManagerInfo jmxManagerInfo = JmxManagerFinder.askLocatorForJmxManager(inetAddr, locatorPort, 15000, repository.isUseSSLLocator()); if (jmxManagerInfo.port == 0) { if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_LOCATOR_COULD_NOT_FIND_MANAGER")); } } else { if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_LOCATOR_FOUND_MANAGER") + " : " + resourceBundle.getString("LOG_MSG_HOST") + " : " + jmxManagerInfo.host + " & " + resourceBundle.getString("LOG_MSG_PORT") + " : " + jmxManagerInfo.port + (jmxManagerInfo.ssl ? resourceBundle.getString("LOG_MSG_WITH_SSL") : resourceBundle.getString("LOG_MSG_WITHOUT_SSL"))); } jmxSerURL = formJMXServiceURLString(jmxManagerInfo.host, String.valueOf(jmxManagerInfo.port)); } } /* * else if (inetAddr instanceof Inet6Address) { // Locator has IPv6 * Address if(LOGGER.infoEnabled()){ * LOGGER.info(resourceBundle.getString * ("LOG_MSG_LOCATOR_IPV6_ADDRESS")); } // update message to display * on UI cluster.setConnectionErrorMsg(resourceBundle.getString( * "LOG_MSG_JMX_CONNECTION_IPv6_ADDRESS")); * * } */else { // Locator has Invalid locator Address if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_LOCATOR_BAD_ADDRESS")); } // update message to display on UI cluster.setConnectionErrorMsg(resourceBundle.getString("LOG_MSG_JMX_CONNECTION_BAD_ADDRESS")); } } else { if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_HOST") + " : " + this.serverName + " & " + resourceBundle.getString("LOG_MSG_PORT") + " : " + this.port); } jmxSerURL = formJMXServiceURLString(this.serverName, this.port); } if (StringUtils.isNotNullNotEmptyNotWhiteSpace(jmxSerURL)) { JMXServiceURL url = new JMXServiceURL(jmxSerURL); // String[] creds = {"controlRole", "R&D"}; String[] creds = { this.userName, this.userPassword }; Map<String, Object> env = new HashMap<String, Object>(); env.put(JMXConnector.CREDENTIALS, creds); if (repository.isUseSSLManager()) { // use ssl to connect env.put("com.sun.jndi.rmi.factory.socket", new SslRMIClientSocketFactory()); } connection = JMXConnectorFactory.connect(url, env); // Register Pulse URL if not already present in the JMX Manager registerPulseUrlToManager(connection); } } catch (Exception e) { if (e instanceof UnknownHostException) { // update message to display on UI cluster.setConnectionErrorMsg(resourceBundle.getString("LOG_MSG_JMX_CONNECTION_UNKNOWN_HOST")); } // write errors StringWriter swBuffer = new StringWriter(); PrintWriter prtWriter = new PrintWriter(swBuffer); e.printStackTrace(prtWriter); LOGGER.severe("Exception Details : " + swBuffer.toString() + "\n"); if (this.conn != null) { try { this.conn.close(); } catch (Exception e1) { LOGGER.severe("Error closing JMX connection " + swBuffer.toString() + "\n"); } this.conn = null; } } return connection; }
From source file:org.commoncrawl.service.crawler.CrawlerEngine.java
public void logDNSQuery(String hostName, InetAddress address, long ttl, String opCName) { synchronized (_DNSSuccessLog) { _DNSSuccessLog.info(hostName + "," + address.toString() + "," + ttl + "," + opCName); }/*from w ww .j a v a2 s .co m*/ }
From source file:org.epics.archiverappliance.config.DefaultConfigService.java
@Override public void postStartup() throws ConfigException { if (this.startupState != STARTUP_SEQUENCE.READY_TO_JOIN_APPLIANCE) { configlogger.info("Webapp is not in correct state for postStartup " + this.getWarFile().toString() + ". It is in " + this.startupState.toString()); return;/*from w w w .jav a 2 s. c om*/ } this.startupState = STARTUP_SEQUENCE.POST_STARTUP_RUNNING; configlogger.info("Post startup for " + this.getWarFile().toString()); // Inherit logging from log4j configuration. try { PlatformLoggingMXBean logging = ManagementFactory.getPlatformMXBean(PlatformLoggingMXBean.class); if (logging != null) { java.util.logging.Logger.getLogger("com.hazelcast"); if (clusterLogger.isDebugEnabled()) { logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.FINE.toString()); } else if (clusterLogger.isInfoEnabled()) { logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.INFO.toString()); } else { logger.info( "Setting clustering logging based on log levels for cluster." + getClass().getName()); logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.SEVERE.toString()); } } Logger hzMain = Logger.getLogger("com.hazelcast"); if (clusterLogger.isDebugEnabled()) { hzMain.setLevel(Level.DEBUG); } else if (clusterLogger.isInfoEnabled()) { hzMain.setLevel(Level.INFO); } else { logger.info("Setting clustering logging based on log levels for cluster." + getClass().getName()); hzMain.setLevel(Level.FATAL); } } catch (Exception ex) { logger.error("Exception setting logging JVM levels ", ex); } // Add this to the system props before doing anything with Hz System.getProperties().put("hazelcast.logging.type", "log4j"); HazelcastInstance hzinstance = null; // Set the thread count to control how may threads this library spawns. Properties hzThreadCounts = new Properties(); if (System.getenv().containsKey("ARCHAPPL_ALL_APPS_ON_ONE_JVM")) { logger.info("Reducing the generic clustering thread counts."); hzThreadCounts.put("hazelcast.clientengine.thread.count", "2"); hzThreadCounts.put("hazelcast.operation.generic.thread.count", "2"); hzThreadCounts.put("hazelcast.operation.thread.count", "2"); } if (this.warFile == WAR_FILE.MGMT) { // The management webapps are the head honchos in the cluster. We set them up differently configlogger.debug("Initializing the MGMT webapp's clustering"); // If we have a hazelcast.xml in the servlet classpath, the XmlConfigBuilder picks that up. // If not we use the default config found in hazelcast.jar // We then alter this config to suit our purposes. Config config = new XmlConfigBuilder().build(); try { if (this.getClass().getResource("hazelcast.xml") == null) { logger.info("We override the default cluster config by disabling multicast discovery etc."); // We do not use multicast as it is not supported on all networks. config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false); // We use TCPIP to discover the members in the cluster. // This is part of the config that comes from appliance.xml config.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true); // Clear any tcpip config that comes from the default config // This gets rid of the localhost in the default that prevents clusters from forming.. // If we need localhost, we'll add it back later. config.getNetworkConfig().getJoin().getTcpIpConfig().clear(); // Enable interfaces; we seem to need this after 2.4 for clients to work correctly in a multi-homed environment. // We'll add the actual interface later below config.getNetworkConfig().getInterfaces().setEnabled(true); config.getNetworkConfig().getInterfaces().clear(); // We don't really use the authentication provided by the tool; however, we set it to some default config.getGroupConfig().setName("archappl"); config.getGroupConfig().setPassword("archappl"); // Backup count is 1 by default; we set it explicitly however... config.getMapConfig("default").setBackupCount(1); config.setProperty("hazelcast.logging.type", "log4j"); } else { logger.debug( "There is a hazelcast.xml in the classpath; skipping default configuration in the code."); } } catch (Exception ex) { throw new ConfigException("Exception configuring cluster", ex); } config.setInstanceName(myIdentity); if (!hzThreadCounts.isEmpty()) { logger.info("Reducing the generic clustering thread counts."); config.getProperties().putAll(hzThreadCounts); } try { String[] myAddrParts = myApplianceInfo.getClusterInetPort().split(":"); String myHostName = myAddrParts[0]; InetAddress myInetAddr = InetAddress.getByName(myHostName); if (!myHostName.equals("localhost") && myInetAddr.isLoopbackAddress()) { logger.info("Address for this appliance -- " + myInetAddr.toString() + " is a loopback address. Changing this to 127.0.0.1 to clustering happy"); myInetAddr = InetAddress.getByName("127.0.0.1"); } int myClusterPort = Integer.parseInt(myAddrParts[1]); logger.debug("We do not let the port auto increment for the MGMT webap"); config.getNetworkConfig().setPortAutoIncrement(false); config.getNetworkConfig().setPort(myClusterPort); config.getNetworkConfig().getInterfaces().addInterface(myInetAddr.getHostAddress()); configlogger.info("Setting my cluster port base to " + myClusterPort + " and using interface " + myInetAddr.getHostAddress()); for (ApplianceInfo applInfo : appliances.values()) { if (applInfo.getIdentity().equals(myIdentity) && this.warFile == WAR_FILE.MGMT) { logger.debug("Not adding myself to the discovery process when I am the mgmt webapp"); } else { String[] addressparts = applInfo.getClusterInetPort().split(":"); String inetaddrpart = addressparts[0]; try { InetAddress inetaddr = InetAddress.getByName(inetaddrpart); if (!inetaddrpart.equals("localhost") && inetaddr.isLoopbackAddress()) { logger.info("Address for appliance " + applInfo.getIdentity() + " - " + inetaddr.toString() + " is a loopback address. Changing this to 127.0.0.1 to clustering happy"); inetaddr = InetAddress.getByName("127.0.0.1"); } int clusterPort = Integer.parseInt(addressparts[1]); logger.info("Adding " + applInfo.getIdentity() + " from appliances.xml to the cluster discovery using cluster inetport " + inetaddr.toString() + ":" + clusterPort); config.getNetworkConfig().getJoin().getTcpIpConfig() .addMember(inetaddr.getHostAddress() + ":" + clusterPort); } catch (UnknownHostException ex) { configlogger.info("Cannnot resolve the IP address for appliance " + inetaddrpart + ". Skipping adding this appliance to the cliuster."); } } } hzinstance = Hazelcast.newHazelcastInstance(config); } catch (Exception ex) { throw new ConfigException("Exception adding member to cluster", ex); } } else { // All other webapps are "native" clients. try { configlogger.debug("Initializing a non-mgmt webapp's clustering"); ClientConfig clientConfig = new ClientConfig(); clientConfig.getGroupConfig().setName("archappl"); clientConfig.getGroupConfig().setPassword("archappl"); clientConfig.setExecutorPoolSize(4); // Non mgmt client can only connect to their MGMT webapp. String[] myAddrParts = myApplianceInfo.getClusterInetPort().split(":"); String myHostName = myAddrParts[0]; InetAddress myInetAddr = InetAddress.getByName(myHostName); if (!myHostName.equals("localhost") && myInetAddr.isLoopbackAddress()) { logger.info("Address for this appliance -- " + myInetAddr.toString() + " is a loopback address. Changing this to 127.0.0.1 to clustering happy"); myInetAddr = InetAddress.getByName("127.0.0.1"); } int myClusterPort = Integer.parseInt(myAddrParts[1]); configlogger.debug(this.warFile + " connecting as a native client to " + myInetAddr.getHostAddress() + ":" + myClusterPort); clientConfig.getNetworkConfig().addAddress(myInetAddr.getHostAddress() + ":" + myClusterPort); clientConfig.setProperty("hazelcast.logging.type", "log4j"); if (!hzThreadCounts.isEmpty()) { logger.info("Reducing the generic clustering thread counts."); clientConfig.getProperties().putAll(hzThreadCounts); } if (!clusterLogger.isDebugEnabled()) { // The client code logs some SEVERE exceptions on shutdown when deploying on the same Tomcat container. // These exceptions are confusing; ideally, we would not have to set the log levels like so. Logger.getLogger("com.hazelcast.client.spi.impl.ClusterListenerThread").setLevel(Level.OFF); Logger.getLogger("com.hazelcast.client.spi.ClientPartitionService").setLevel(Level.OFF); } hzinstance = HazelcastClient.newHazelcastClient(clientConfig); } catch (Exception ex) { throw new ConfigException("Exception adding client to cluster", ex); } } pv2appliancemapping = hzinstance.getMap("pv2appliancemapping"); namedFlags = hzinstance.getMap("namedflags"); typeInfos = hzinstance.getMap("typeinfo"); archivePVRequests = hzinstance.getMap("archivePVRequests"); channelArchiverDataServers = hzinstance.getMap("channelArchiverDataServers"); clusterInet2ApplianceIdentity = hzinstance.getMap("clusterInet2ApplianceIdentity"); aliasNamesToRealNames = hzinstance.getMap("aliasNamesToRealNames"); pv2ChannelArchiverDataServer = hzinstance.getMap("pv2ChannelArchiverDataServer"); pubSub = hzinstance.getTopic("pubSub"); final HazelcastInstance shutdownHzInstance = hzinstance; shutdownHooks.add(0, new Runnable() { @Override public void run() { logger.debug("Shutting down clustering instance in webapp " + warFile.toString()); shutdownHzInstance.shutdown(); } }); if (this.warFile == WAR_FILE.MGMT) { Cluster cluster = hzinstance.getCluster(); String localInetPort = getMemberKey(cluster.getLocalMember()); clusterInet2ApplianceIdentity.put(localInetPort, myIdentity); logger.debug("Adding myself " + myIdentity + " as having inetport " + localInetPort); hzinstance.getMap("clusterInet2ApplianceIdentity") .addEntryListener(new EntryAddedListener<Object, Object>() { @Override public void entryAdded(EntryEvent<Object, Object> event) { String appliden = (String) event.getValue(); appliancesInCluster.add(appliden); logger.info("Adding appliance " + appliden + " to the list of active appliances as inetport " + ((String) event.getKey())); } }, true); hzinstance.getMap("clusterInet2ApplianceIdentity") .addEntryListener(new EntryRemovedListener<Object, Object>() { @Override public void entryRemoved(EntryEvent<Object, Object> event) { String appliden = (String) event.getValue(); appliancesInCluster.remove(appliden); logger.info("Removing appliance " + appliden + " from the list of active appliancesas inetport " + ((String) event.getKey())); } }, true); logger.debug( "Establishing a cluster membership listener to detect when appliances drop off the cluster"); cluster.addMembershipListener(new MembershipListener() { public void memberAdded(MembershipEvent membersipEvent) { Member member = membersipEvent.getMember(); String inetPort = getMemberKey(member); if (clusterInet2ApplianceIdentity.containsKey(inetPort)) { String appliden = clusterInet2ApplianceIdentity.get(inetPort); appliancesInCluster.add(appliden); configlogger.info("Adding newly started appliance " + appliden + " to the list of active appliances for inetport " + inetPort); } else { logger.debug("Skipping adding appliance using inetport " + inetPort + " to the list of active instances as we do not have a mapping to its identity"); } } public void memberRemoved(MembershipEvent membersipEvent) { Member member = membersipEvent.getMember(); String inetPort = getMemberKey(member); if (clusterInet2ApplianceIdentity.containsKey(inetPort)) { String appliden = clusterInet2ApplianceIdentity.get(inetPort); appliancesInCluster.remove(appliden); configlogger.info("Removing appliance " + appliden + " from the list of active appliances"); } else { configlogger.debug("Received member removed event for " + inetPort); } } @Override public void memberAttributeChanged(MemberAttributeEvent membersipEvent) { Member member = membersipEvent.getMember(); String inetPort = getMemberKey(member); configlogger.debug("Received membership attribute changed event for " + inetPort); } }); logger.debug( "Adding the current members in the cluster after establishing the cluster membership listener"); for (Member member : cluster.getMembers()) { String mbrInetPort = getMemberKey(member); logger.debug("Found member " + mbrInetPort); if (clusterInet2ApplianceIdentity.containsKey(mbrInetPort)) { String appliden = clusterInet2ApplianceIdentity.get(mbrInetPort); appliancesInCluster.add(appliden); logger.info("Adding appliance " + appliden + " to the list of active appliances for inetport " + mbrInetPort); } else { logger.debug("Skipping adding appliance using inetport " + mbrInetPort + " to the list of active instances as we do not have a mapping to its identity"); } } logger.info("Established subscription(s) for appliance availability"); if (this.getInstallationProperties().containsKey(ARCHAPPL_NAMEDFLAGS_PROPERTIES_FILE_PROPERTY)) { String namedFlagsFileName = (String) this.getInstallationProperties() .get(ARCHAPPL_NAMEDFLAGS_PROPERTIES_FILE_PROPERTY); configlogger.info("Loading named flags from file " + namedFlagsFileName); File namedFlagsFile = new File(namedFlagsFileName); if (!namedFlagsFile.exists()) { configlogger.error( "File containing named flags " + namedFlagsFileName + " specified but not present"); } else { Properties namedFlagsFromFile = new Properties(); try (FileInputStream is = new FileInputStream(namedFlagsFile)) { namedFlagsFromFile.load(is); for (Object namedFlagFromFile : namedFlagsFromFile.keySet()) { try { String namedFlagFromFileStr = (String) namedFlagFromFile; Boolean namedFlagFromFileValue = Boolean .parseBoolean((String) namedFlagsFromFile.get(namedFlagFromFileStr)); logger.debug("Setting named flag " + namedFlagFromFileStr + " to " + namedFlagFromFileValue); this.namedFlags.put(namedFlagFromFileStr, namedFlagFromFileValue); } catch (Exception ex) { logger.error("Exception loading named flag from file" + namedFlagsFileName, ex); } } } catch (Exception ex) { configlogger.error("Exception loading named flags from " + namedFlagsFileName, ex); } } } } if (this.warFile == WAR_FILE.ENGINE) { // It can take a while for the engine to start up. // We probably want to do this in the background so that the appliance as a whole starts up quickly and we get retrieval up and running quickly. this.startupExecutor.schedule(new Runnable() { @Override public void run() { try { logger.debug("Starting up the engine's channels on startup."); archivePVSonStartup(); logger.debug("Done starting up the engine's channels in startup."); } catch (Throwable t) { configlogger.fatal("Exception starting up the engine channels on startup", t); } } }, 1, TimeUnit.SECONDS); } else if (this.warFile == WAR_FILE.ETL) { this.etlPVLookup.postStartup(); } else if (this.warFile == WAR_FILE.MGMT) { pvsForThisAppliance = new ConcurrentSkipListSet<String>(); pausedPVsForThisAppliance = new ConcurrentSkipListSet<String>(); initializePersistenceLayer(); loadTypeInfosFromPersistence(); loadAliasesFromPersistence(); loadArchiveRequestsFromPersistence(); loadExternalServersFromPersistence(); registerForNewExternalServers(hzinstance.getMap("channelArchiverDataServers")); // Cache the aggregate of all the PVs that are registered to this appliance. logger.debug("Building a local aggregate of PV infos that are registered to this appliance"); for (String pvName : getPVsForThisAppliance()) { if (!pvsForThisAppliance.contains(pvName)) { applianceAggregateInfo.addInfoForPV(pvName, this.getTypeInfoForPV(pvName), this); } } } // Register for changes to the typeinfo map. logger.info("Registering for changes to typeinfos"); hzinstance.getMap("typeinfo").addEntryListener(new EntryAddedListener<Object, Object>() { @Override public void entryAdded(EntryEvent<Object, Object> entryEvent) { logger.debug("Received entryAdded for pvTypeInfo"); PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getValue(); String pvName = typeInfo.getPvName(); eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_ADDED)); if (persistanceLayer != null) { try { persistanceLayer.putTypeInfo(pvName, typeInfo); } catch (Exception ex) { logger.error("Exception persisting pvTypeInfo for pv " + pvName, ex); } } } }, true); hzinstance.getMap("typeinfo").addEntryListener(new EntryRemovedListener<Object, Object>() { @Override public void entryRemoved(EntryEvent<Object, Object> entryEvent) { PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getOldValue(); String pvName = typeInfo.getPvName(); logger.info("Received entryRemoved for pvTypeInfo " + pvName); eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_DELETED)); if (persistanceLayer != null) { try { persistanceLayer.deleteTypeInfo(pvName); } catch (Exception ex) { logger.error("Exception deleting pvTypeInfo for pv " + pvName, ex); } } } }, true); hzinstance.getMap("typeinfo").addEntryListener(new EntryUpdatedListener<Object, Object>() { @Override public void entryUpdated(EntryEvent<Object, Object> entryEvent) { PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getValue(); String pvName = typeInfo.getPvName(); eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_MODIFIED)); logger.debug("Received entryUpdated for pvTypeInfo"); if (persistanceLayer != null) { try { persistanceLayer.putTypeInfo(pvName, typeInfo); } catch (Exception ex) { logger.error("Exception persisting pvTypeInfo for pv " + pvName, ex); } } } }, true); eventBus.register(this); pubSub.addMessageListener(new MessageListener<PubSubEvent>() { @Override public void onMessage(Message<PubSubEvent> pubSubEventMsg) { PubSubEvent pubSubEvent = pubSubEventMsg.getMessageObject(); if (pubSubEvent.getDestination() != null) { if (pubSubEvent.getDestination().equals("ALL") || (pubSubEvent.getDestination().startsWith(myIdentity) && pubSubEvent.getDestination() .endsWith(DefaultConfigService.this.warFile.toString()))) { // We publish messages from hazelcast into this VM only if the intened WAR file is us. logger.debug("Publishing event into this JVM " + pubSubEvent.generateEventDescription()); // In this case, we set the source as being the cluster to prevent republishing back into the cluster. pubSubEvent.markSourceAsCluster(); eventBus.post(pubSubEvent); } else { logger.debug("Skipping publishing event into this JVM " + pubSubEvent.generateEventDescription() + " as destination is not me " + DefaultConfigService.this.warFile.toString()); } } else { logger.debug("Skipping publishing event with null destination"); } } }); logger.info("Done registering for changes to typeinfos"); this.startupState = STARTUP_SEQUENCE.STARTUP_COMPLETE; configlogger.info("Start complete for webapp " + this.warFile); }
From source file:edu.nwpu.gemfire.monitor.data.JMXDataUpdater.java
private JMXConnector getJMXConnection() { JMXConnector connection = null; // Reference to repository Repository repository = Repository.get(); try {//www . j a v a 2 s . c o m String jmxSerURL = ""; if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_USE_LOCATOR_VALUE") + ":" + repository.getJmxUseLocator()); } if (repository.getJmxUseLocator()) { String locatorHost = repository.getJmxHost(); int locatorPort = Integer.parseInt(repository.getJmxPort()); if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_HOST") + " : " + locatorHost + " & " + resourceBundle.getString("LOG_MSG_PORT") + " : " + locatorPort); } InetAddress inetAddr = InetAddress.getByName(locatorHost); if ((inetAddr instanceof Inet4Address) || (inetAddr instanceof Inet6Address)) { if (inetAddr instanceof Inet4Address) { // Locator has IPv4 Address if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_LOCATOR_IPV4_ADDRESS") + " - " + inetAddr.toString()); } } else { // Locator has IPv6 Address if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_LOCATOR_IPV6_ADDRESS") + " - " + inetAddr.toString()); } } JmxManagerInfo jmxManagerInfo = JmxManagerFinder.askLocatorForJmxManager(inetAddr, locatorPort, 15000, repository.isUseSSLLocator()); if (jmxManagerInfo.port == 0) { if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_LOCATOR_COULD_NOT_FIND_MANAGER")); } } else { if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_LOCATOR_FOUND_MANAGER") + " : " + resourceBundle.getString("LOG_MSG_HOST") + " : " + jmxManagerInfo.host + " & " + resourceBundle.getString("LOG_MSG_PORT") + " : " + jmxManagerInfo.port + (jmxManagerInfo.ssl ? resourceBundle.getString("LOG_MSG_WITH_SSL") : resourceBundle.getString("LOG_MSG_WITHOUT_SSL"))); } jmxSerURL = formJMXServiceURLString(jmxManagerInfo.host, String.valueOf(jmxManagerInfo.port)); } } /* * else if (inetAddr instanceof Inet6Address) { // Locator has IPv6 * Address if(LOGGER.infoEnabled()){ * LOGGER.info(resourceBundle.getString * ("LOG_MSG_LOCATOR_IPV6_ADDRESS")); } // update message to display * on UI cluster.setConnectionErrorMsg(resourceBundle.getString( * "LOG_MSG_JMX_CONNECTION_IPv6_ADDRESS")); * * } */else { // Locator has Invalid locator Address if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_LOCATOR_BAD_ADDRESS")); } // update message to display on UI cluster.setConnectionErrorMsg(resourceBundle.getString("LOG_MSG_JMX_CONNECTION_BAD_ADDRESS")); } } else { if (LOGGER.infoEnabled()) { LOGGER.info(resourceBundle.getString("LOG_MSG_HOST") + " : " + this.serverName + " & " + resourceBundle.getString("LOG_MSG_PORT") + " : " + this.port); } jmxSerURL = formJMXServiceURLString(this.serverName, this.port); } if (StringUtils.isNotNullNotEmptyNotWhiteSpace(jmxSerURL)) { JMXServiceURL url = new JMXServiceURL(jmxSerURL); // String[] creds = {"controlRole", "R&D"}; String[] creds = { this.userName, this.userPassword }; Map<String, Object> env = new HashMap<String, Object>(); env.put(JMXConnector.CREDENTIALS, creds); if (repository.isUseSSLManager()) { // use ssl to connect env.put("com.sun.jndi.rmi.factory.socket", new SslRMIClientSocketFactory()); } connection = JMXConnectorFactory.connect(url, env); // Register Pulse URL if not already present in the JMX Manager registerPulseUrlToManager(connection); } } catch (Exception e) { if (e instanceof UnknownHostException) { // update message to display on UI cluster.setConnectionErrorMsg(resourceBundle.getString("LOG_MSG_JMX_CONNECTION_UNKNOWN_HOST")); } // write errors StringWriter swBuffer = new StringWriter(); PrintWriter prtWriter = new PrintWriter(swBuffer); e.printStackTrace(prtWriter); LOGGER.severe("Exception Details : " + swBuffer.toString() + "\n"); if (this.conn != null) { try { this.conn.close(); } catch (Exception e1) { LOGGER.severe("Error closing JMX connection " + swBuffer.toString() + "\n"); } this.conn = null; } } return connection; }
From source file:org.commoncrawl.io.internal.NIODNSLocalResolver.java
public ReverseDNSQueryResult doReverseDNSQuery(InetAddress address, boolean useTCP, int timeoutValue) { // Ask dnsjava for the inetaddress. Should be in its cache. Message response = null;//from w ww .j a v a 2 s . c om Exception resolverException = null; // check cache first ... ReverseDNSQueryResult result = new ReverseDNSQueryResult(address); if (true) { try { // allocate a simple resolver object ... NIODNSSimpleResolverImpl resolver = new NIODNSSimpleResolverImpl(this, _dnsServerAddress); // use tcp if requested ... if (useTCP) resolver.setTCP(true); // set the timeout ... resolver.setTimeout(timeoutValue); // create appropriate data structures ... Name name = ReverseMap.fromAddress(address); Record rec = Record.newRecord(name, Type.PTR, DClass.IN); Message query = Message.newQuery(rec); // send it off ... try { response = resolver.send(query); } catch (Exception e) { LOG.error("Reverse DNS Resolution for:" + address + " threw IOException:" + StringUtils.stringifyException(e)); resolverException = e; } if (response != null && response.getRcode() == Rcode.NOERROR) { // get answer Record records[] = response.getSectionArray(Section.ANSWER); if (records != null) { // walk records ... for (Record record : records) { // store CName for later use ... if (record.getType() == Type.PTR) { result.getHostNames().add(((PTRRecord) record).getTarget().toString()); } } } } } catch (UnknownHostException e) { resolverException = e; } if (response == null) { result.setStatus(Status.RESOLVER_FAILURE); LOG.error("Critical Reverse DNS Failure for host:" + address.toString()); if (resolverException != null) { LOG.error(CCStringUtils.stringifyException(resolverException)); result.setErrorDesc(resolverException.toString()); } } else if (response.getRcode() != Rcode.NOERROR) { result.setStatus(Status.SERVER_FAILURE); result.setErrorDesc(Rcode.string(response.getRcode())); } else if (response.getRcode() == Rcode.NOERROR) { if (result.getHostNames().size() != 0) { result.setStatus(Status.SUCCESS); } else { result.setStatus(Status.SERVER_FAILURE); result.setErrorDesc("NO PTR RECORDS FOUND"); } } } // return result ... will be added to completion queue (to be retrieved via // poll method)... return result; }
From source file:org.commoncrawl.io.NIODNSLocalResolver.java
public ReverseDNSQueryResult doReverseDNSQuery(InetAddress address, boolean useTCP, int timeoutValue) { // Ask dnsjava for the inetaddress. Should be in its cache. Message response = null;// w w w .j a va2 s .c o m Exception resolverException = null; // check cache first ... ReverseDNSQueryResult result = new ReverseDNSQueryResult(address); if (true) { try { // allocate a simple resolver object ... NIODNSSimpleResolverImpl resolver = new NIODNSSimpleResolverImpl(this, _dnsServerAddress); // use tcp if requested ... if (useTCP) resolver.setTCP(true); // set the timeout ... resolver.setTimeout(timeoutValue); // create appropriate data structures ... Name name = ReverseMap.fromAddress(address); Record rec = Record.newRecord(name, Type.PTR, DClass.IN); Message query = Message.newQuery(rec); // send it off ... try { response = resolver.send(query); } catch (Exception e) { LOG.error("Reverse DNS Resolution for:" + address + " threw IOException:" + StringUtils.stringifyException(e)); resolverException = e; } if (response != null && response.getRcode() == Rcode.NOERROR) { // get answer Record records[] = response.getSectionArray(Section.ANSWER); if (records != null) { // walk records ... for (Record record : records) { // store CName for later use ... if (record.getType() == Type.PTR) { result.getHostNames().add(((PTRRecord) record).getTarget().toString()); } } } } } catch (UnknownHostException e) { resolverException = e; } if (response == null) { result.setStatus(Status.RESOLVER_FAILURE); LOG.error("Critical Reverse DNS Failure for host:" + address.toString()); if (resolverException != null) { LOG.error(StringUtils.stringifyException(resolverException)); result.setErrorDesc(resolverException.toString()); } } else if (response.getRcode() != Rcode.NOERROR) { result.setStatus(Status.SERVER_FAILURE); result.setErrorDesc(Rcode.string(response.getRcode())); } else if (response.getRcode() == Rcode.NOERROR) { if (result.getHostNames().size() != 0) { result.setStatus(Status.SUCCESS); } else { result.setStatus(Status.SERVER_FAILURE); result.setErrorDesc("NO PTR RECORDS FOUND"); } } } // return result ... will be added to completion queue (to be retrieved via // poll method)... return result; }
From source file:org.mobicents.servlet.restcomm.telephony.Call.java
private SipURI getInitialIpAddressPort(SipServletMessage message) throws ServletParseException, UnknownHostException { // Issue #268 - https://bitbucket.org/telestax/telscale-restcomm/issue/268 // First get the Initial Remote Address (real address that the request came from) // Then check the following: // 1. If contact header address is private network address // 2. If there are no "Record-Route" headers (there is no proxy in the call) // 3. If contact header address != real ip address // Finally, if all of the above are true, create a SIP URI using the realIP address and the SIP port // and store it to the sip session to be used as request uri later SipURI uri = null;/*from w ww . java 2s .c o m*/ try { String realIP = message.getInitialRemoteAddr(); Integer realPort = message.getInitialRemotePort(); if (realPort == null || realPort == -1) { realPort = 5060; } if (realPort == 0) { realPort = message.getRemotePort(); } final ListIterator<String> recordRouteHeaders = message.getHeaders("Record-Route"); final Address contactAddr = factory.createAddress(message.getHeader("Contact")); InetAddress contactInetAddress = InetAddress.getByName(((SipURI) contactAddr.getURI()).getHost()); InetAddress inetAddress = InetAddress.getByName(realIP); int remotePort = message.getRemotePort(); int contactPort = ((SipURI) contactAddr.getURI()).getPort(); String remoteAddress = message.getRemoteAddr(); // Issue #332: https://telestax.atlassian.net/browse/RESTCOMM-332 final String initialIpBeforeLB = message.getHeader("X-Sip-Balancer-InitialRemoteAddr"); String initialPortBeforeLB = message.getHeader("X-Sip-Balancer-InitialRemotePort"); String contactAddress = ((SipURI) contactAddr.getURI()).getHost(); if (initialIpBeforeLB != null) { if (initialPortBeforeLB == null) initialPortBeforeLB = "5060"; if (logger.isInfoEnabled()) { logger.info("We are behind load balancer, storing Initial Remote Address " + initialIpBeforeLB + ":" + initialPortBeforeLB + " to the session for later use"); } realIP = initialIpBeforeLB + ":" + initialPortBeforeLB; uri = factory.createSipURI(null, realIP); } else if (contactInetAddress.isSiteLocalAddress() && !recordRouteHeaders.hasNext() && !contactInetAddress.toString().equalsIgnoreCase(inetAddress.toString())) { if (logger.isInfoEnabled()) { logger.info("Contact header address " + contactAddr.toString() + " is a private network ip address, storing Initial Remote Address " + realIP + ":" + realPort + " to the session for later use"); } realIP = realIP + ":" + realPort; uri = factory.createSipURI(null, realIP); } } catch (Exception e) { logger.warning("Exception while trying to get the Initial IP Address and Port: " + e); } return uri; }