List of usage examples for java.net InetAddress isLoopbackAddress
public boolean isLoopbackAddress()
From source file:org.mobicents.servlet.restcomm.telephony.Call.java
private void onSipServletResponse(SipServletResponse message, ActorRef self, ActorRef sender) throws Exception { this.lastResponse = message; final int code = message.getStatus(); switch (code) { case SipServletResponse.SC_CALL_BEING_FORWARDED: { forwarding(message);// w ww. java2 s .com break; } case SipServletResponse.SC_RINGING: case SipServletResponse.SC_SESSION_PROGRESS: { if (!is(ringing)) { if (logger.isInfoEnabled()) { logger.info("Got 180 Ringing for Call: " + self().path() + " To: " + to + " sender: " + sender.path() + " observers size: " + observers.size()); } fsm.transition(message, ringing); } break; } case SipServletResponse.SC_BUSY_HERE: case SipServletResponse.SC_BUSY_EVERYWHERE: case SipServletResponse.SC_DECLINE: { sendCallInfoToObservers(); //Important. If state is DIALING, then do nothing about the BUSY. If not DIALING state move to failingBusy // // Notify the observers. // external = CallStateChanged.State.BUSY; // final CallStateChanged event = new CallStateChanged(external); // for (final ActorRef observer : observers) { // observer.tell(event, self); // } // XXX shouldnt it move to failingBusy IF dialing ???? // if (is(dialing)) { // break; // } else { // fsm.transition(message, failingBusy); // } fsm.transition(message, failingBusy); break; } case SipServletResponse.SC_UNAUTHORIZED: case SipServletResponse.SC_PROXY_AUTHENTICATION_REQUIRED: { // Handles Auth for https://bitbucket.org/telestax/telscale-restcomm/issue/132/implement-twilio-sip-out if (this.username == null || this.password == null) { sendCallInfoToObservers(); fsm.transition(message, failed); } else { AuthInfo authInfo = this.factory.createAuthInfo(); String authHeader = message.getHeader("Proxy-Authenticate"); if (authHeader == null) { authHeader = message.getHeader("WWW-Authenticate"); } String tempRealm = authHeader.substring(authHeader.indexOf("realm=\"") + "realm=\"".length()); String realm = tempRealm.substring(0, tempRealm.indexOf("\"")); authInfo.addAuthInfo(message.getStatus(), realm, this.username, this.password); SipServletRequest challengeRequest = message.getSession() .createRequest(message.getRequest().getMethod()); challengeRequest.addAuthHeader(message, authInfo); challengeRequest.setContent(this.invite.getContent(), this.invite.getContentType()); this.invite = challengeRequest; // https://github.com/Mobicents/RestComm/issues/147 Make sure we send the SDP again this.invite.setContent(message.getRequest().getContent(), "application/sdp"); challengeRequest.send(); } break; } // https://github.com/Mobicents/RestComm/issues/148 // Session in Progress Response should trigger MMS to start the Media Session // case SipServletResponse.SC_SESSION_PROGRESS: case SipServletResponse.SC_OK: { if (is(dialing) || (is(ringing) && !"inbound".equals(direction))) { fsm.transition(message, updatingMediaSession); } break; } default: { if (code >= 400 && code != 487) { if (code == 487 && isOutbound()) { String initialIpBeforeLB = null; String initialPortBeforeLB = null; try { initialIpBeforeLB = message.getHeader("X-Sip-Balancer-InitialRemoteAddr"); initialPortBeforeLB = message.getHeader("X-Sip-Balancer-InitialRemotePort"); } catch (Exception e) { if (logger.isDebugEnabled()) { logger.debug("Exception during check of LB custom headers for IP address and port"); } } final SipServletRequest ack = message.createAck(); addCustomHeaders(ack); SipSession session = message.getSession(); if (initialIpBeforeLB != null) { if (initialPortBeforeLB == null) initialPortBeforeLB = "5060"; if (logger.isInfoEnabled()) { logger.info("We are behind load balancer, will use: " + initialIpBeforeLB + ":" + initialPortBeforeLB + " for ACK message, "); } String realIP = initialIpBeforeLB + ":" + initialPortBeforeLB; SipURI uri = factory.createSipURI(null, realIP); ack.setRequestURI(uri); } else if (!ack.getHeaders("Route").hasNext()) { final SipServletRequest originalInvite = message.getRequest(); final SipURI realInetUri = (SipURI) originalInvite.getRequestURI(); if ((SipURI) session.getAttribute("realInetUri") == null) { session.setAttribute("realInetUri", realInetUri); } final InetAddress ackRURI = InetAddress.getByName(((SipURI) ack.getRequestURI()).getHost()); final int ackRURIPort = ((SipURI) ack.getRequestURI()).getPort(); if (realInetUri != null && (ackRURI.isSiteLocalAddress() || ackRURI.isAnyLocalAddress() || ackRURI.isLoopbackAddress()) && (ackRURIPort != realInetUri.getPort())) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address and port of the sip client " + realInetUri.toString() + " as a request uri of the ACK"); } ack.setRequestURI(realInetUri); } } ack.send(); if (logger.isInfoEnabled()) { logger.info("Just sent out ACK : " + ack.toString()); } } this.fail = true; sendCallInfoToObservers(); fsm.transition(message, stopping); } } } }
From source file:org.mobicents.servlet.restcomm.telephony.CallManager.java
private void info(final SipServletRequest request) throws IOException { final ActorRef self = self(); final SipApplicationSession application = request.getApplicationSession(); // if this response is coming from a client that is in a p2p session with another registered client // we will just proxy the response SipSession linkedB2BUASession = B2BUAHelper.getLinkedSession(request); if (linkedB2BUASession != null) { if (logger.isInfoEnabled()) { logger.info(String.format("B2BUA: Got INFO request: \n %s", request)); }//from w w w . jav a2 s .c om request.getSession().setAttribute(B2BUAHelper.B2BUA_LAST_REQUEST, request); SipServletRequest clonedInfo = linkedB2BUASession.createRequest("INFO"); linkedB2BUASession.setAttribute(B2BUAHelper.B2BUA_LAST_REQUEST, clonedInfo); // Issue #307: https://telestax.atlassian.net/browse/RESTCOMM-307 SipURI toInetUri = (SipURI) request.getSession().getAttribute("toInetUri"); SipURI fromInetUri = (SipURI) request.getSession().getAttribute("fromInetUri"); InetAddress infoRURI = null; try { infoRURI = InetAddress.getByName(((SipURI) clonedInfo.getRequestURI()).getHost()); } catch (UnknownHostException e) { } if (patchForNatB2BUASessions) { if (toInetUri != null && infoRURI == null) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address of the sip client " + toInetUri.toString() + " as a request uri of the CloneBye request"); } clonedInfo.setRequestURI(toInetUri); } else if (toInetUri != null && (infoRURI.isSiteLocalAddress() || infoRURI.isAnyLocalAddress() || infoRURI.isLoopbackAddress())) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address of the sip client " + toInetUri.toString() + " as a request uri of the CloneInfo request"); } clonedInfo.setRequestURI(toInetUri); } else if (fromInetUri != null && (infoRURI.isSiteLocalAddress() || infoRURI.isAnyLocalAddress() || infoRURI.isLoopbackAddress())) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address of the sip client " + fromInetUri.toString() + " as a request uri of the CloneInfo request"); } clonedInfo.setRequestURI(fromInetUri); } } clonedInfo.send(); } else { final ActorRef call = (ActorRef) application.getAttribute(Call.class.getName()); call.tell(request, self); } }
From source file:org.mobicents.servlet.restcomm.telephony.CallManager.java
private void ack(SipServletRequest request) throws IOException { SipServletResponse response = B2BUAHelper.getLinkedResponse(request); // if this is an ACK that belongs to a B2BUA session, then we proxy it to the other client if (response != null) { SipServletRequest ack = response.createAck(); // if (!ack.getHeaders("Route").hasNext() && patchForNatB2BUASessions) { if (patchForNatB2BUASessions) { InetAddress ackRURI = null; try { ackRURI = InetAddress.getByName(((SipURI) ack.getRequestURI()).getHost()); } catch (UnknownHostException e) { }/*w w w . j a va 2s. co m*/ // Issue #307: https://telestax.atlassian.net/browse/RESTCOMM-307 SipURI toInetUri = (SipURI) request.getSession().getAttribute("toInetUri"); if (toInetUri != null && ackRURI == null) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address of the sip client " + toInetUri.toString() + " as a request uri of the ACK request"); } ack.setRequestURI(toInetUri); } else if (toInetUri != null && (ackRURI.isSiteLocalAddress() || ackRURI.isAnyLocalAddress() || ackRURI.isLoopbackAddress())) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address of the sip client " + toInetUri.toString() + " as a request uri of the ACK request"); } ack.setRequestURI(toInetUri); } else if (toInetUri == null && (ackRURI.isSiteLocalAddress() || ackRURI.isAnyLocalAddress() || ackRURI.isLoopbackAddress())) { if (logger.isInfoEnabled()) { logger.info( "Public IP toInetUri from SipSession is null, will check LB headers from last Response"); } final String initialIpBeforeLB = response.getHeader("X-Sip-Balancer-InitialRemoteAddr"); String initialPortBeforeLB = response.getHeader("X-Sip-Balancer-InitialRemotePort"); if (initialIpBeforeLB != null) { if (initialPortBeforeLB == null) initialPortBeforeLB = "5060"; if (logger.isInfoEnabled()) { logger.info("We are behind load balancer, will use Initial Remote Address " + initialIpBeforeLB + ":" + initialPortBeforeLB + " for the ACK request"); } String realIP = initialIpBeforeLB + ":" + initialPortBeforeLB; SipURI uri = sipFactory.createSipURI(null, realIP); ack.setRequestURI(uri); } else { if (logger.isInfoEnabled()) { logger.info("LB Headers are also null"); } } } } ack.send(); SipApplicationSession sipApplicationSession = request.getApplicationSession(); // Defaulting the sip application session to 1h sipApplicationSession.setExpires(60); } else { if (logger.isInfoEnabled()) { logger.info("Linked Response couldn't be found for ACK request"); } final ActorRef call = (ActorRef) request.getApplicationSession().getAttribute(Call.class.getName()); if (call != null) { if (logger.isInfoEnabled()) { logger.info("Will send ACK to call actor: " + call.path()); } call.tell(request, self()); } } // else { // SipSession sipSession = request.getSession(); // SipApplicationSession sipAppSession = request.getApplicationSession(); // if(sipSession.getInvalidateWhenReady()){ // logger.info("Invalidating sipSession: "+sipSession.getId()); // sipSession.invalidate(); // } // if(sipAppSession.getInvalidateWhenReady()){ // logger.info("Invalidating sipAppSession: "+sipAppSession.getId()); // sipAppSession.invalidate(); // } // } }
From source file:org.epics.archiverappliance.config.DefaultConfigService.java
@Override public void postStartup() throws ConfigException { if (this.startupState != STARTUP_SEQUENCE.READY_TO_JOIN_APPLIANCE) { configlogger.info("Webapp is not in correct state for postStartup " + this.getWarFile().toString() + ". It is in " + this.startupState.toString()); return;/*from w w w . j ava2 s . c o m*/ } this.startupState = STARTUP_SEQUENCE.POST_STARTUP_RUNNING; configlogger.info("Post startup for " + this.getWarFile().toString()); // Inherit logging from log4j configuration. try { PlatformLoggingMXBean logging = ManagementFactory.getPlatformMXBean(PlatformLoggingMXBean.class); if (logging != null) { java.util.logging.Logger.getLogger("com.hazelcast"); if (clusterLogger.isDebugEnabled()) { logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.FINE.toString()); } else if (clusterLogger.isInfoEnabled()) { logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.INFO.toString()); } else { logger.info( "Setting clustering logging based on log levels for cluster." + getClass().getName()); logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.SEVERE.toString()); } } Logger hzMain = Logger.getLogger("com.hazelcast"); if (clusterLogger.isDebugEnabled()) { hzMain.setLevel(Level.DEBUG); } else if (clusterLogger.isInfoEnabled()) { hzMain.setLevel(Level.INFO); } else { logger.info("Setting clustering logging based on log levels for cluster." + getClass().getName()); hzMain.setLevel(Level.FATAL); } } catch (Exception ex) { logger.error("Exception setting logging JVM levels ", ex); } // Add this to the system props before doing anything with Hz System.getProperties().put("hazelcast.logging.type", "log4j"); HazelcastInstance hzinstance = null; // Set the thread count to control how may threads this library spawns. Properties hzThreadCounts = new Properties(); if (System.getenv().containsKey("ARCHAPPL_ALL_APPS_ON_ONE_JVM")) { logger.info("Reducing the generic clustering thread counts."); hzThreadCounts.put("hazelcast.clientengine.thread.count", "2"); hzThreadCounts.put("hazelcast.operation.generic.thread.count", "2"); hzThreadCounts.put("hazelcast.operation.thread.count", "2"); } if (this.warFile == WAR_FILE.MGMT) { // The management webapps are the head honchos in the cluster. We set them up differently configlogger.debug("Initializing the MGMT webapp's clustering"); // If we have a hazelcast.xml in the servlet classpath, the XmlConfigBuilder picks that up. // If not we use the default config found in hazelcast.jar // We then alter this config to suit our purposes. Config config = new XmlConfigBuilder().build(); try { if (this.getClass().getResource("hazelcast.xml") == null) { logger.info("We override the default cluster config by disabling multicast discovery etc."); // We do not use multicast as it is not supported on all networks. config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false); // We use TCPIP to discover the members in the cluster. // This is part of the config that comes from appliance.xml config.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true); // Clear any tcpip config that comes from the default config // This gets rid of the localhost in the default that prevents clusters from forming.. // If we need localhost, we'll add it back later. config.getNetworkConfig().getJoin().getTcpIpConfig().clear(); // Enable interfaces; we seem to need this after 2.4 for clients to work correctly in a multi-homed environment. // We'll add the actual interface later below config.getNetworkConfig().getInterfaces().setEnabled(true); config.getNetworkConfig().getInterfaces().clear(); // We don't really use the authentication provided by the tool; however, we set it to some default config.getGroupConfig().setName("archappl"); config.getGroupConfig().setPassword("archappl"); // Backup count is 1 by default; we set it explicitly however... config.getMapConfig("default").setBackupCount(1); config.setProperty("hazelcast.logging.type", "log4j"); } else { logger.debug( "There is a hazelcast.xml in the classpath; skipping default configuration in the code."); } } catch (Exception ex) { throw new ConfigException("Exception configuring cluster", ex); } config.setInstanceName(myIdentity); if (!hzThreadCounts.isEmpty()) { logger.info("Reducing the generic clustering thread counts."); config.getProperties().putAll(hzThreadCounts); } try { String[] myAddrParts = myApplianceInfo.getClusterInetPort().split(":"); String myHostName = myAddrParts[0]; InetAddress myInetAddr = InetAddress.getByName(myHostName); if (!myHostName.equals("localhost") && myInetAddr.isLoopbackAddress()) { logger.info("Address for this appliance -- " + myInetAddr.toString() + " is a loopback address. Changing this to 127.0.0.1 to clustering happy"); myInetAddr = InetAddress.getByName("127.0.0.1"); } int myClusterPort = Integer.parseInt(myAddrParts[1]); logger.debug("We do not let the port auto increment for the MGMT webap"); config.getNetworkConfig().setPortAutoIncrement(false); config.getNetworkConfig().setPort(myClusterPort); config.getNetworkConfig().getInterfaces().addInterface(myInetAddr.getHostAddress()); configlogger.info("Setting my cluster port base to " + myClusterPort + " and using interface " + myInetAddr.getHostAddress()); for (ApplianceInfo applInfo : appliances.values()) { if (applInfo.getIdentity().equals(myIdentity) && this.warFile == WAR_FILE.MGMT) { logger.debug("Not adding myself to the discovery process when I am the mgmt webapp"); } else { String[] addressparts = applInfo.getClusterInetPort().split(":"); String inetaddrpart = addressparts[0]; try { InetAddress inetaddr = InetAddress.getByName(inetaddrpart); if (!inetaddrpart.equals("localhost") && inetaddr.isLoopbackAddress()) { logger.info("Address for appliance " + applInfo.getIdentity() + " - " + inetaddr.toString() + " is a loopback address. Changing this to 127.0.0.1 to clustering happy"); inetaddr = InetAddress.getByName("127.0.0.1"); } int clusterPort = Integer.parseInt(addressparts[1]); logger.info("Adding " + applInfo.getIdentity() + " from appliances.xml to the cluster discovery using cluster inetport " + inetaddr.toString() + ":" + clusterPort); config.getNetworkConfig().getJoin().getTcpIpConfig() .addMember(inetaddr.getHostAddress() + ":" + clusterPort); } catch (UnknownHostException ex) { configlogger.info("Cannnot resolve the IP address for appliance " + inetaddrpart + ". Skipping adding this appliance to the cliuster."); } } } hzinstance = Hazelcast.newHazelcastInstance(config); } catch (Exception ex) { throw new ConfigException("Exception adding member to cluster", ex); } } else { // All other webapps are "native" clients. try { configlogger.debug("Initializing a non-mgmt webapp's clustering"); ClientConfig clientConfig = new ClientConfig(); clientConfig.getGroupConfig().setName("archappl"); clientConfig.getGroupConfig().setPassword("archappl"); clientConfig.setExecutorPoolSize(4); // Non mgmt client can only connect to their MGMT webapp. String[] myAddrParts = myApplianceInfo.getClusterInetPort().split(":"); String myHostName = myAddrParts[0]; InetAddress myInetAddr = InetAddress.getByName(myHostName); if (!myHostName.equals("localhost") && myInetAddr.isLoopbackAddress()) { logger.info("Address for this appliance -- " + myInetAddr.toString() + " is a loopback address. Changing this to 127.0.0.1 to clustering happy"); myInetAddr = InetAddress.getByName("127.0.0.1"); } int myClusterPort = Integer.parseInt(myAddrParts[1]); configlogger.debug(this.warFile + " connecting as a native client to " + myInetAddr.getHostAddress() + ":" + myClusterPort); clientConfig.getNetworkConfig().addAddress(myInetAddr.getHostAddress() + ":" + myClusterPort); clientConfig.setProperty("hazelcast.logging.type", "log4j"); if (!hzThreadCounts.isEmpty()) { logger.info("Reducing the generic clustering thread counts."); clientConfig.getProperties().putAll(hzThreadCounts); } if (!clusterLogger.isDebugEnabled()) { // The client code logs some SEVERE exceptions on shutdown when deploying on the same Tomcat container. // These exceptions are confusing; ideally, we would not have to set the log levels like so. Logger.getLogger("com.hazelcast.client.spi.impl.ClusterListenerThread").setLevel(Level.OFF); Logger.getLogger("com.hazelcast.client.spi.ClientPartitionService").setLevel(Level.OFF); } hzinstance = HazelcastClient.newHazelcastClient(clientConfig); } catch (Exception ex) { throw new ConfigException("Exception adding client to cluster", ex); } } pv2appliancemapping = hzinstance.getMap("pv2appliancemapping"); namedFlags = hzinstance.getMap("namedflags"); typeInfos = hzinstance.getMap("typeinfo"); archivePVRequests = hzinstance.getMap("archivePVRequests"); channelArchiverDataServers = hzinstance.getMap("channelArchiverDataServers"); clusterInet2ApplianceIdentity = hzinstance.getMap("clusterInet2ApplianceIdentity"); aliasNamesToRealNames = hzinstance.getMap("aliasNamesToRealNames"); pv2ChannelArchiverDataServer = hzinstance.getMap("pv2ChannelArchiverDataServer"); pubSub = hzinstance.getTopic("pubSub"); final HazelcastInstance shutdownHzInstance = hzinstance; shutdownHooks.add(0, new Runnable() { @Override public void run() { logger.debug("Shutting down clustering instance in webapp " + warFile.toString()); shutdownHzInstance.shutdown(); } }); if (this.warFile == WAR_FILE.MGMT) { Cluster cluster = hzinstance.getCluster(); String localInetPort = getMemberKey(cluster.getLocalMember()); clusterInet2ApplianceIdentity.put(localInetPort, myIdentity); logger.debug("Adding myself " + myIdentity + " as having inetport " + localInetPort); hzinstance.getMap("clusterInet2ApplianceIdentity") .addEntryListener(new EntryAddedListener<Object, Object>() { @Override public void entryAdded(EntryEvent<Object, Object> event) { String appliden = (String) event.getValue(); appliancesInCluster.add(appliden); logger.info("Adding appliance " + appliden + " to the list of active appliances as inetport " + ((String) event.getKey())); } }, true); hzinstance.getMap("clusterInet2ApplianceIdentity") .addEntryListener(new EntryRemovedListener<Object, Object>() { @Override public void entryRemoved(EntryEvent<Object, Object> event) { String appliden = (String) event.getValue(); appliancesInCluster.remove(appliden); logger.info("Removing appliance " + appliden + " from the list of active appliancesas inetport " + ((String) event.getKey())); } }, true); logger.debug( "Establishing a cluster membership listener to detect when appliances drop off the cluster"); cluster.addMembershipListener(new MembershipListener() { public void memberAdded(MembershipEvent membersipEvent) { Member member = membersipEvent.getMember(); String inetPort = getMemberKey(member); if (clusterInet2ApplianceIdentity.containsKey(inetPort)) { String appliden = clusterInet2ApplianceIdentity.get(inetPort); appliancesInCluster.add(appliden); configlogger.info("Adding newly started appliance " + appliden + " to the list of active appliances for inetport " + inetPort); } else { logger.debug("Skipping adding appliance using inetport " + inetPort + " to the list of active instances as we do not have a mapping to its identity"); } } public void memberRemoved(MembershipEvent membersipEvent) { Member member = membersipEvent.getMember(); String inetPort = getMemberKey(member); if (clusterInet2ApplianceIdentity.containsKey(inetPort)) { String appliden = clusterInet2ApplianceIdentity.get(inetPort); appliancesInCluster.remove(appliden); configlogger.info("Removing appliance " + appliden + " from the list of active appliances"); } else { configlogger.debug("Received member removed event for " + inetPort); } } @Override public void memberAttributeChanged(MemberAttributeEvent membersipEvent) { Member member = membersipEvent.getMember(); String inetPort = getMemberKey(member); configlogger.debug("Received membership attribute changed event for " + inetPort); } }); logger.debug( "Adding the current members in the cluster after establishing the cluster membership listener"); for (Member member : cluster.getMembers()) { String mbrInetPort = getMemberKey(member); logger.debug("Found member " + mbrInetPort); if (clusterInet2ApplianceIdentity.containsKey(mbrInetPort)) { String appliden = clusterInet2ApplianceIdentity.get(mbrInetPort); appliancesInCluster.add(appliden); logger.info("Adding appliance " + appliden + " to the list of active appliances for inetport " + mbrInetPort); } else { logger.debug("Skipping adding appliance using inetport " + mbrInetPort + " to the list of active instances as we do not have a mapping to its identity"); } } logger.info("Established subscription(s) for appliance availability"); if (this.getInstallationProperties().containsKey(ARCHAPPL_NAMEDFLAGS_PROPERTIES_FILE_PROPERTY)) { String namedFlagsFileName = (String) this.getInstallationProperties() .get(ARCHAPPL_NAMEDFLAGS_PROPERTIES_FILE_PROPERTY); configlogger.info("Loading named flags from file " + namedFlagsFileName); File namedFlagsFile = new File(namedFlagsFileName); if (!namedFlagsFile.exists()) { configlogger.error( "File containing named flags " + namedFlagsFileName + " specified but not present"); } else { Properties namedFlagsFromFile = new Properties(); try (FileInputStream is = new FileInputStream(namedFlagsFile)) { namedFlagsFromFile.load(is); for (Object namedFlagFromFile : namedFlagsFromFile.keySet()) { try { String namedFlagFromFileStr = (String) namedFlagFromFile; Boolean namedFlagFromFileValue = Boolean .parseBoolean((String) namedFlagsFromFile.get(namedFlagFromFileStr)); logger.debug("Setting named flag " + namedFlagFromFileStr + " to " + namedFlagFromFileValue); this.namedFlags.put(namedFlagFromFileStr, namedFlagFromFileValue); } catch (Exception ex) { logger.error("Exception loading named flag from file" + namedFlagsFileName, ex); } } } catch (Exception ex) { configlogger.error("Exception loading named flags from " + namedFlagsFileName, ex); } } } } if (this.warFile == WAR_FILE.ENGINE) { // It can take a while for the engine to start up. // We probably want to do this in the background so that the appliance as a whole starts up quickly and we get retrieval up and running quickly. this.startupExecutor.schedule(new Runnable() { @Override public void run() { try { logger.debug("Starting up the engine's channels on startup."); archivePVSonStartup(); logger.debug("Done starting up the engine's channels in startup."); } catch (Throwable t) { configlogger.fatal("Exception starting up the engine channels on startup", t); } } }, 1, TimeUnit.SECONDS); } else if (this.warFile == WAR_FILE.ETL) { this.etlPVLookup.postStartup(); } else if (this.warFile == WAR_FILE.MGMT) { pvsForThisAppliance = new ConcurrentSkipListSet<String>(); pausedPVsForThisAppliance = new ConcurrentSkipListSet<String>(); initializePersistenceLayer(); loadTypeInfosFromPersistence(); loadAliasesFromPersistence(); loadArchiveRequestsFromPersistence(); loadExternalServersFromPersistence(); registerForNewExternalServers(hzinstance.getMap("channelArchiverDataServers")); // Cache the aggregate of all the PVs that are registered to this appliance. logger.debug("Building a local aggregate of PV infos that are registered to this appliance"); for (String pvName : getPVsForThisAppliance()) { if (!pvsForThisAppliance.contains(pvName)) { applianceAggregateInfo.addInfoForPV(pvName, this.getTypeInfoForPV(pvName), this); } } } // Register for changes to the typeinfo map. logger.info("Registering for changes to typeinfos"); hzinstance.getMap("typeinfo").addEntryListener(new EntryAddedListener<Object, Object>() { @Override public void entryAdded(EntryEvent<Object, Object> entryEvent) { logger.debug("Received entryAdded for pvTypeInfo"); PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getValue(); String pvName = typeInfo.getPvName(); eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_ADDED)); if (persistanceLayer != null) { try { persistanceLayer.putTypeInfo(pvName, typeInfo); } catch (Exception ex) { logger.error("Exception persisting pvTypeInfo for pv " + pvName, ex); } } } }, true); hzinstance.getMap("typeinfo").addEntryListener(new EntryRemovedListener<Object, Object>() { @Override public void entryRemoved(EntryEvent<Object, Object> entryEvent) { PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getOldValue(); String pvName = typeInfo.getPvName(); logger.info("Received entryRemoved for pvTypeInfo " + pvName); eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_DELETED)); if (persistanceLayer != null) { try { persistanceLayer.deleteTypeInfo(pvName); } catch (Exception ex) { logger.error("Exception deleting pvTypeInfo for pv " + pvName, ex); } } } }, true); hzinstance.getMap("typeinfo").addEntryListener(new EntryUpdatedListener<Object, Object>() { @Override public void entryUpdated(EntryEvent<Object, Object> entryEvent) { PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getValue(); String pvName = typeInfo.getPvName(); eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_MODIFIED)); logger.debug("Received entryUpdated for pvTypeInfo"); if (persistanceLayer != null) { try { persistanceLayer.putTypeInfo(pvName, typeInfo); } catch (Exception ex) { logger.error("Exception persisting pvTypeInfo for pv " + pvName, ex); } } } }, true); eventBus.register(this); pubSub.addMessageListener(new MessageListener<PubSubEvent>() { @Override public void onMessage(Message<PubSubEvent> pubSubEventMsg) { PubSubEvent pubSubEvent = pubSubEventMsg.getMessageObject(); if (pubSubEvent.getDestination() != null) { if (pubSubEvent.getDestination().equals("ALL") || (pubSubEvent.getDestination().startsWith(myIdentity) && pubSubEvent.getDestination() .endsWith(DefaultConfigService.this.warFile.toString()))) { // We publish messages from hazelcast into this VM only if the intened WAR file is us. logger.debug("Publishing event into this JVM " + pubSubEvent.generateEventDescription()); // In this case, we set the source as being the cluster to prevent republishing back into the cluster. pubSubEvent.markSourceAsCluster(); eventBus.post(pubSubEvent); } else { logger.debug("Skipping publishing event into this JVM " + pubSubEvent.generateEventDescription() + " as destination is not me " + DefaultConfigService.this.warFile.toString()); } } else { logger.debug("Skipping publishing event with null destination"); } } }); logger.info("Done registering for changes to typeinfos"); this.startupState = STARTUP_SEQUENCE.STARTUP_COMPLETE; configlogger.info("Start complete for webapp " + this.warFile); }
From source file:com.max2idea.android.limbo.main.LimboActivity.java
public static String getLocalIpAddress() { try {//from ww w. j av a 2 s . c o m for (Enumeration<NetworkInterface> en = NetworkInterface.getNetworkInterfaces(); en .hasMoreElements();) { NetworkInterface intf = en.nextElement(); for (Enumeration<InetAddress> enumIpAddr = intf.getInetAddresses(); enumIpAddr.hasMoreElements();) { InetAddress inetAddress = enumIpAddr.nextElement(); if (!inetAddress.isLoopbackAddress() && inetAddress.getHostAddress().toString().contains(".")) { Log.v("Internal ip", inetAddress.getHostAddress().toString()); return inetAddress.getHostAddress().toString(); } } } } catch (SocketException ex) { Log.e("Internal IP", ex.toString()); } return null; }
From source file:org.apache.hadoop.hdfs.DFSClient.java
private static boolean isLocalAddress(InetSocketAddress targetAddr) { InetAddress addr = targetAddr.getAddress(); if (localIpAddresses.contains(addr.getHostAddress())) { if (LOG.isTraceEnabled()) { LOG.trace("Address " + targetAddr + " is local"); }//from w w w .ja va 2 s. c o m return true; } // Check if the address is any local or loop back boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress(); // Check if the address is defined on any interface if (!local) { try { local = NetworkInterface.getByInetAddress(addr) != null; } catch (SocketException e) { local = false; } } if (LOG.isTraceEnabled()) { LOG.trace("Address " + targetAddr + " is local"); } if (local == true) { localIpAddresses.add(addr.getHostAddress()); } return local; }
From source file:org.restcomm.connect.telephony.Call.java
private void onSipServletResponse(SipServletResponse message, ActorRef self, ActorRef sender) throws Exception { this.lastResponse = message; final int code = message.getStatus(); switch (code) { case SipServletResponse.SC_CALL_BEING_FORWARDED: { forwarding(message);//from w w w. j a v a2s . co m break; } case SipServletResponse.SC_RINGING: case SipServletResponse.SC_SESSION_PROGRESS: { if (!is(ringing)) { if (logger.isInfoEnabled()) { logger.info("Got 180 Ringing for Call: " + self().path() + " To: " + to + " sender: " + sender.path() + " observers size: " + observers.size()); } fsm.transition(message, ringing); } break; } case SipServletResponse.SC_BUSY_HERE: case SipServletResponse.SC_BUSY_EVERYWHERE: case SipServletResponse.SC_DECLINE: { sendCallInfoToObservers(); //Important. If state is DIALING, then do nothing about the BUSY. If not DIALING state move to failingBusy // // Notify the observers. // external = CallStateChanged.State.BUSY; // final CallStateChanged event = new CallStateChanged(external); // for (final ActorRef observer : observers) { // observer.tell(event, self); // } // XXX shouldnt it move to failingBusy IF dialing ???? // if (is(dialing)) { // break; // } else { // fsm.transition(message, failingBusy); // } if (!is(failingNoAnswer)) fsm.transition(message, failingBusy); break; } case SipServletResponse.SC_UNAUTHORIZED: case SipServletResponse.SC_PROXY_AUTHENTICATION_REQUIRED: { // Handles Auth for https://bitbucket.org/telestax/telscale-restcomm/issue/132/implement-twilio-sip-out if ((this.username != null || this.username.isEmpty()) && (this.password != null && this.password.isEmpty())) { sendCallInfoToObservers(); fsm.transition(message, failed); } else { AuthInfo authInfo = this.factory.createAuthInfo(); String authHeader = message.getHeader("Proxy-Authenticate"); if (authHeader == null) { authHeader = message.getHeader("WWW-Authenticate"); } String tempRealm = authHeader.substring(authHeader.indexOf("realm=\"") + "realm=\"".length()); String realm = tempRealm.substring(0, tempRealm.indexOf("\"")); authInfo.addAuthInfo(message.getStatus(), realm, this.username, this.password); SipServletRequest challengeRequest = message.getSession() .createRequest(message.getRequest().getMethod()); challengeRequest.addAuthHeader(message, authInfo); challengeRequest.setContent(this.invite.getContent(), this.invite.getContentType()); this.invite = challengeRequest; // https://github.com/Mobicents/RestComm/issues/147 Make sure we send the SDP again this.invite.setContent(message.getRequest().getContent(), "application/sdp"); if (outboundToIms) { final SipURI uri = factory.createSipURI(null, imsProxyAddress); uri.setPort(imsProxyPort); uri.setLrParam(true); challengeRequest.pushRoute(uri); } challengeRequest.send(); } break; } // https://github.com/Mobicents/RestComm/issues/148 // Session in Progress Response should trigger MMS to start the Media Session // case SipServletResponse.SC_SESSION_PROGRESS: case SipServletResponse.SC_OK: { if (is(dialing) || (is(ringing) && !"inbound".equals(direction))) { fsm.transition(message, updatingMediaSession); } break; } default: { if (code >= 400 && code != 487) { if (code == 487 && isOutbound()) { String initialIpBeforeLB = null; String initialPortBeforeLB = null; try { initialIpBeforeLB = message.getHeader("X-Sip-Balancer-InitialRemoteAddr"); initialPortBeforeLB = message.getHeader("X-Sip-Balancer-InitialRemotePort"); } catch (Exception e) { if (logger.isDebugEnabled()) { logger.debug("Exception during check of LB custom headers for IP address and port"); } } final SipServletRequest ack = message.createAck(); addCustomHeaders(ack); SipSession session = message.getSession(); if (initialIpBeforeLB != null) { if (initialPortBeforeLB == null) initialPortBeforeLB = "5060"; if (logger.isInfoEnabled()) { logger.info("We are behind load balancer, will use: " + initialIpBeforeLB + ":" + initialPortBeforeLB + " for ACK message, "); } String realIP = initialIpBeforeLB + ":" + initialPortBeforeLB; SipURI uri = factory.createSipURI(((SipURI) ack.getRequestURI()).getUser(), realIP); ack.setRequestURI(uri); } else if (!ack.getHeaders("Route").hasNext()) { final SipServletRequest originalInvite = message.getRequest(); final SipURI realInetUri = (SipURI) originalInvite.getRequestURI(); if ((SipURI) session.getAttribute("realInetUri") == null) { session.setAttribute("realInetUri", realInetUri); } final InetAddress ackRURI = InetAddress.getByName(((SipURI) ack.getRequestURI()).getHost()); final int ackRURIPort = ((SipURI) ack.getRequestURI()).getPort(); if (realInetUri != null && (ackRURI.isSiteLocalAddress() || ackRURI.isAnyLocalAddress() || ackRURI.isLoopbackAddress()) && (ackRURIPort != realInetUri.getPort())) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address and port of the sip client " + realInetUri.toString() + " as a request uri of the ACK"); } realInetUri.setUser(((SipURI) ack.getRequestURI()).getUser()); ack.setRequestURI(realInetUri); } } ack.send(); if (logger.isInfoEnabled()) { logger.info("Just sent out ACK : " + ack.toString()); } } this.fail = true; fsm.transition(message, stopping); } } } }
From source file:org.restcomm.connect.telephony.CallManager.java
private void info(final SipServletRequest request) throws IOException { final ActorRef self = self(); final SipApplicationSession application = request.getApplicationSession(); // if this response is coming from a client that is in a p2p session with another registered client // we will just proxy the response SipSession linkedB2BUASession = B2BUAHelper.getLinkedSession(request); if (linkedB2BUASession != null) { if (logger.isInfoEnabled()) { logger.info(String.format("B2BUA: Got INFO request: \n %s", request)); }/*from w w w . ja v a 2s . com*/ request.getSession().setAttribute(B2BUAHelper.B2BUA_LAST_REQUEST, request); SipServletRequest clonedInfo = linkedB2BUASession.createRequest("INFO"); linkedB2BUASession.setAttribute(B2BUAHelper.B2BUA_LAST_REQUEST, clonedInfo); // Issue #307: https://telestax.atlassian.net/browse/RESTCOMM-307 SipURI toInetUri = (SipURI) request.getSession().getAttribute(B2BUAHelper.TO_INET_URI); SipURI fromInetUri = (SipURI) request.getSession().getAttribute(B2BUAHelper.FROM_INET_URI); InetAddress infoRURI = null; try { infoRURI = InetAddress.getByName(((SipURI) clonedInfo.getRequestURI()).getHost()); } catch (UnknownHostException e) { } if (patchForNatB2BUASessions) { if (toInetUri != null && infoRURI == null) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address of the sip client " + toInetUri.toString() + " as a request uri of the CloneBye request"); } clonedInfo.setRequestURI(toInetUri); } else if (toInetUri != null && (infoRURI.isSiteLocalAddress() || infoRURI.isAnyLocalAddress() || infoRURI.isLoopbackAddress())) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address of the sip client " + toInetUri.toString() + " as a request uri of the CloneInfo request"); } clonedInfo.setRequestURI(toInetUri); } else if (fromInetUri != null && (infoRURI.isSiteLocalAddress() || infoRURI.isAnyLocalAddress() || infoRURI.isLoopbackAddress())) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address of the sip client " + fromInetUri.toString() + " as a request uri of the CloneInfo request"); } clonedInfo.setRequestURI(fromInetUri); } } clonedInfo.send(); } else { final ActorRef call = (ActorRef) application.getAttribute(Call.class.getName()); call.tell(request, self); } }
From source file:cgeo.geocaching.cgBase.java
public static String getLocalIpAddress() { try {//w w w . jav a 2s . c o m for (Enumeration<NetworkInterface> en = NetworkInterface.getNetworkInterfaces(); en .hasMoreElements();) { NetworkInterface intf = en.nextElement(); for (Enumeration<InetAddress> enumIpAddr = intf.getInetAddresses(); enumIpAddr.hasMoreElements();) { InetAddress inetAddress = enumIpAddr.nextElement(); if (!inetAddress.isLoopbackAddress()) { return inetAddress.getHostAddress(); } } } } catch (SocketException e) { // nothing } return null; }
From source file:com.cloud.server.ManagementServerImpl.java
@Override @ActionEvent(eventType = EventTypes.EVENT_VOLUME_EXTRACT, eventDescription = "extracting volume", async = true) public Long extractVolume(ExtractVolumeCmd cmd) throws URISyntaxException { Long volumeId = cmd.getId();/*from w ww .j a v a 2 s . co m*/ String url = cmd.getUrl(); Long zoneId = cmd.getZoneId(); AsyncJobVO job = null; // FIXME: cmd.getJob(); String mode = cmd.getMode(); Account account = UserContext.current().getCaller(); if (!_accountMgr.isRootAdmin(account.getType()) && ApiDBUtils.isExtractionDisabled()) { throw new PermissionDeniedException("Extraction has been disabled by admin"); } VolumeVO volume = _volumeDao.findById(volumeId); if (volume == null) { InvalidParameterValueException ex = new InvalidParameterValueException( "Unable to find volume with specified volumeId"); ex.addProxyObject(volume, volumeId, "volumeId"); throw ex; } // perform permission check _accountMgr.checkAccess(account, null, true, volume); if (_dcDao.findById(zoneId) == null) { throw new InvalidParameterValueException("Please specify a valid zone."); } if (volume.getPoolId() == null) { throw new InvalidParameterValueException( "The volume doesnt belong to a storage pool so cant extract it"); } // Extract activity only for detached volumes or for volumes whose instance is stopped if (volume.getInstanceId() != null && ApiDBUtils.findVMInstanceById(volume.getInstanceId()).getState() != State.Stopped) { s_logger.debug("Invalid state of the volume with ID: " + volumeId + ". It should be either detached or the VM should be in stopped state."); PermissionDeniedException ex = new PermissionDeniedException( "Invalid state of the volume with specified ID. It should be either detached or the VM should be in stopped state."); ex.addProxyObject(volume, volumeId, "volumeId"); throw ex; } if (volume.getVolumeType() != Volume.Type.DATADISK) { // Datadisk dont have any template dependence. VMTemplateVO template = ApiDBUtils.findTemplateById(volume.getTemplateId()); if (template != null) { // For ISO based volumes template = null and we allow extraction of all ISO based volumes boolean isExtractable = template.isExtractable() && template.getTemplateType() != Storage.TemplateType.SYSTEM; if (!isExtractable && account != null && account.getType() != Account.ACCOUNT_TYPE_ADMIN) { // Global // admins are always allowed to extract PermissionDeniedException ex = new PermissionDeniedException( "The volume with specified volumeId is not allowed to be extracted"); ex.addProxyObject(volume, volumeId, "volumeId"); throw ex; } } } Upload.Mode extractMode; if (mode == null || (!mode.equals(Upload.Mode.FTP_UPLOAD.toString()) && !mode.equals(Upload.Mode.HTTP_DOWNLOAD.toString()))) { throw new InvalidParameterValueException("Please specify a valid extract Mode "); } else { extractMode = mode.equals(Upload.Mode.FTP_UPLOAD.toString()) ? Upload.Mode.FTP_UPLOAD : Upload.Mode.HTTP_DOWNLOAD; } // If mode is upload perform extra checks on url and also see if there is an ongoing upload on the same. if (extractMode == Upload.Mode.FTP_UPLOAD) { URI uri = new URI(url); if ((uri.getScheme() == null) || (!uri.getScheme().equalsIgnoreCase("ftp"))) { throw new IllegalArgumentException("Unsupported scheme for url: " + url); } String host = uri.getHost(); try { InetAddress hostAddr = InetAddress.getByName(host); if (hostAddr.isAnyLocalAddress() || hostAddr.isLinkLocalAddress() || hostAddr.isLoopbackAddress() || hostAddr.isMulticastAddress()) { throw new IllegalArgumentException("Illegal host specified in url"); } if (hostAddr instanceof Inet6Address) { throw new IllegalArgumentException( "IPV6 addresses not supported (" + hostAddr.getHostAddress() + ")"); } } catch (UnknownHostException uhe) { throw new IllegalArgumentException("Unable to resolve " + host); } if (_uploadMonitor.isTypeUploadInProgress(volumeId, Upload.Type.VOLUME)) { throw new IllegalArgumentException(volume.getName() + " upload is in progress. Please wait for some time to schedule another upload for the same"); } } long accountId = volume.getAccountId(); StoragePoolVO srcPool = _poolDao.findById(volume.getPoolId()); HostVO sserver = _storageMgr.getSecondaryStorageHost(zoneId); String secondaryStorageURL = sserver.getStorageUrl(); List<UploadVO> extractURLList = _uploadDao.listByTypeUploadStatus(volumeId, Upload.Type.VOLUME, UploadVO.Status.DOWNLOAD_URL_CREATED); if (extractMode == Upload.Mode.HTTP_DOWNLOAD && extractURLList.size() > 0) { return extractURLList.get(0).getId(); // If download url already exists then return } else { UploadVO uploadJob = _uploadMonitor.createNewUploadEntry(sserver.getId(), volumeId, UploadVO.Status.COPY_IN_PROGRESS, Upload.Type.VOLUME, url, extractMode); s_logger.debug("Extract Mode - " + uploadJob.getMode()); uploadJob = _uploadDao.createForUpdate(uploadJob.getId()); // Update the async Job ExtractResponse resultObj = new ExtractResponse(volumeId, volume.getName(), accountId, UploadVO.Status.COPY_IN_PROGRESS.toString(), uploadJob.getId()); resultObj.setResponseName(cmd.getCommandName()); AsyncJobExecutor asyncExecutor = BaseAsyncJobExecutor.getCurrentExecutor(); if (asyncExecutor != null) { job = asyncExecutor.getJob(); _asyncMgr.updateAsyncJobAttachment(job.getId(), Upload.Type.VOLUME.toString(), volumeId); _asyncMgr.updateAsyncJobStatus(job.getId(), AsyncJobResult.STATUS_IN_PROGRESS, resultObj); } String value = _configs.get(Config.CopyVolumeWait.toString()); int copyvolumewait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); // Copy the volume from the source storage pool to secondary storage CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), volume.getPath(), srcPool, secondaryStorageURL, true, copyvolumewait); CopyVolumeAnswer cvAnswer = null; try { cvAnswer = (CopyVolumeAnswer) _storageMgr.sendToPool(srcPool, cvCmd); } catch (StorageUnavailableException e) { s_logger.debug("Storage unavailable"); } // Check if you got a valid answer. if (cvAnswer == null || !cvAnswer.getResult()) { String errorString = "Failed to copy the volume from the source primary storage pool to secondary storage."; // Update the async job. resultObj.setResultString(errorString); resultObj.setUploadStatus(UploadVO.Status.COPY_ERROR.toString()); if (asyncExecutor != null) { _asyncMgr.completeAsyncJob(job.getId(), AsyncJobResult.STATUS_FAILED, 0, resultObj); } // Update the DB that volume couldn't be copied uploadJob.setUploadState(UploadVO.Status.COPY_ERROR); uploadJob.setErrorString(errorString); uploadJob.setLastUpdated(new Date()); _uploadDao.update(uploadJob.getId(), uploadJob); throw new CloudRuntimeException(errorString); } String volumeLocalPath = "volumes/" + volume.getId() + "/" + cvAnswer.getVolumePath() + "." + getFormatForPool(srcPool); // Update the DB that volume is copied and volumePath uploadJob.setUploadState(UploadVO.Status.COPY_COMPLETE); uploadJob.setLastUpdated(new Date()); uploadJob.setInstallPath(volumeLocalPath); _uploadDao.update(uploadJob.getId(), uploadJob); if (extractMode == Mode.FTP_UPLOAD) { // Now that the volume is copied perform the actual uploading _uploadMonitor.extractVolume(uploadJob, sserver, volume, url, zoneId, volumeLocalPath, cmd.getStartEventId(), job.getId(), _asyncMgr); return uploadJob.getId(); } else { // Volume is copied now make it visible under apache and create a URL. _uploadMonitor.createVolumeDownloadURL(volumeId, volumeLocalPath, Upload.Type.VOLUME, zoneId, uploadJob.getId()); return uploadJob.getId(); } } }