List of usage examples for java.net InetSocketAddress getHostName
public final String getHostName()
From source file:org.apache.hadoop.dfs.DataNode.java
/** * This method starts the data node with the specified conf. * /*from w ww. j a va2 s. c om*/ * @param conf - the configuration * if conf's CONFIG_PROPERTY_SIMULATED property is set * then a simulated storage based data node is created. * * @param dataDirs - only for a non-simulated storage data node * @throws IOException */ void startDataNode(Configuration conf, AbstractList<File> dataDirs) throws IOException { // use configured nameserver & interface to get local hostname if (conf.get("slave.host.name") != null) { machineName = conf.get("slave.host.name"); } if (machineName == null) { machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"), conf.get("dfs.datanode.dns.nameserver", "default")); } InetSocketAddress nameNodeAddr = NameNode.getAddress(conf); this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE); this.socketTimeout = conf.getInt("dfs.socket.timeout", FSConstants.READ_TIMEOUT); this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", FSConstants.WRITE_TIMEOUT); /* Based on results on different platforms, we might need set the default * to false on some of them. */ this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true); this.writePacketSize = conf.getInt("dfs.write.packet.size", 64 * 1024); String address = NetUtils.getServerAddress(conf, "dfs.datanode.bindAddress", "dfs.datanode.port", "dfs.datanode.address"); InetSocketAddress socAddr = NetUtils.createSocketAddr(address); int tmpPort = socAddr.getPort(); storage = new DataStorage(); // construct registration this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort); // connect to name node this.namenode = (DatanodeProtocol) RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID, nameNodeAddr, conf); // get version and id info from the name-node NamespaceInfo nsInfo = handshake(); StartupOption startOpt = getStartupOption(conf); assert startOpt != null : "Startup option must be set."; boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false); if (simulatedFSDataset) { setNewStorageID(dnRegistration); dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION; dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID; // it would have been better to pass storage as a parameter to // constructor below - need to augment ReflectionUtils used below. conf.set("StorageId", dnRegistration.getStorageID()); try { //Equivalent of following (can't do because Simulated is in test dir) // this.data = new SimulatedFSDataset(conf); this.data = (FSDatasetInterface) ReflectionUtils .newInstance(Class.forName("org.apache.hadoop.dfs.SimulatedFSDataset"), conf); } catch (ClassNotFoundException e) { throw new IOException(StringUtils.stringifyException(e)); } } else { // real storage // read storage info, lock data dirs and transition fs state if necessary storage.recoverTransitionRead(nsInfo, dataDirs, startOpt); // adjust this.dnRegistration.setStorageInfo(storage); // initialize data node internal structure this.data = new FSDataset(storage, conf); } // find free port ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, 0); ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); ss.setSoTimeout(conf.getInt("dfs.dataXceiver.timeoutInMS", 30000)); //30s // adjust machine name with the actual port tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); this.dnRegistration.setName(machineName + ":" + tmpPort); LOG.info("Opened info server at " + tmpPort); this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers", MAX_XCEIVER_COUNT); this.threadGroup = new ThreadGroup("dataXceiveServer"); this.dataXceiveServer = new Daemon(threadGroup, new DataXceiveServer(ss)); this.threadGroup.setDaemon(true); // auto destroy when empty this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL); this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY) * 1000L; if (this.initialBlockReportDelay >= blockReportInterval) { this.initialBlockReportDelay = 0; LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:"); } this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L; DataNode.nameNodeAddr = nameNodeAddr; this.balancingThrottler = new BlockBalanceThrottler( conf.getLong("dfs.balance.bandwidthPerSec", 1024L * 1024)); //initialize periodic block scanner String reason = null; if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) { reason = "verification is turned off by configuration"; } else if (!(data instanceof FSDataset)) { reason = "verifcation is supported only with FSDataset"; } if (reason == null) { blockScanner = new DataBlockScanner(this, (FSDataset) data, conf); } else { LOG.info("Periodic Block Verification is disabled because " + reason + "."); } //create a servlet to serve full-file content String infoAddr = NetUtils.getServerAddress(conf, "dfs.datanode.info.bindAddress", "dfs.datanode.info.port", "dfs.datanode.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = new StatusHttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0); InetSocketAddress secInfoSocAddr = NetUtils .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0)); Configuration sslConf = new Configuration(conf); sslConf.addResource(conf.get("https.keystore.info.rsrc", "sslinfo.xml")); String keyloc = sslConf.get("https.keystore.location"); if (null != keyloc) { this.infoServer.addSslListener(secInfoSocAddr, keyloc, sslConf.get("https.keystore.password", ""), sslConf.get("https.keystore.keypassword", "")); } this.infoServer.addServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.setAttribute("datanode.blockScanner", blockScanner); this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class); this.infoServer.start(); // adjust info port this.dnRegistration.setInfoPort(this.infoServer.getPort()); myMetrics = new DataNodeMetrics(conf, dnRegistration.getStorageID()); //init ipc server InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address")); ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), conf.getInt("dfs.datanode.handler.count", 3), false, conf); ipcServer.start(); dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); LOG.info("dnRegistration = " + dnRegistration); }
From source file:com.datatorrent.stram.StreamingContainerManager.java
private BufferServerController getBufferServerClient(PTOperator operator) { BufferServerController bsc = new BufferServerController(operator.getLogicalId()); bsc.setToken(operator.getContainer().getBufferServerToken()); InetSocketAddress address = operator.getContainer().bufferServerAddress; StreamingContainer.eventloop.connect( address.isUnresolved() ? new InetSocketAddress(address.getHostName(), address.getPort()) : address, bsc);//from w w w .j a v a 2 s.c o m return bsc; }
From source file:de.dal33t.powerfolder.Controller.java
/** * Starts controller with a special config file, and creates and starts all * components of PowerFolder./*w w w .jav a2s .c o m*/ * * @param filename * The filename to uses as config file (located in the * "getConfigLocationBase()") */ public void startConfig(String filename) { if (started) { throw new IllegalStateException("Configuration already started, shutdown controller first"); } additionalConnectionListeners = Collections.synchronizedList(new ArrayList<ConnectionListener>()); started = false; shuttingDown = false; threadPool = new WrappedScheduledThreadPoolExecutor(Constants.CONTROLLER_THREADS_IN_THREADPOOL, new NamedThreadFactory("Controller-Thread-")); // Initialize resource bundle eager // check forced language file from commandline if (commandLine != null && commandLine.hasOption("f")) { String langfilename = commandLine.getOptionValue("f"); try { ResourceBundle resourceBundle = new ForcedLanguageFileResourceBundle(langfilename); Translation.setResourceBundle(resourceBundle); logInfo("Loading language bundle from file " + langfilename); } catch (FileNotFoundException fnfe) { logSevere("forced language file (" + langfilename + ") not found: " + fnfe.getMessage()); logSevere("using setup language"); Translation.resetResourceBundle(); } catch (IOException ioe) { logSevere("forced language file io error: " + ioe.getMessage()); logSevere("using setup language"); Translation.resetResourceBundle(); } } else { Translation.resetResourceBundle(); } Translation.getResourceBundle(); // loadConfigFile if (!loadConfigFile(filename)) { return; } boolean isDefaultConfig = Constants.DEFAULT_CONFIG_FILE.startsWith(getConfigName()); if (isDefaultConfig) { // To keep compatible with previous versions preferences = Preferences.userNodeForPackage(PowerFolder.class); } else { preferences = Preferences.userNodeForPackage(PowerFolder.class).node(getConfigName()); } // initialize logger // Enabled verbose mode if in config. // This logs to file for analysis. verbose = ConfigurationEntry.VERBOSE.getValueBoolean(this); initLogger(); if (verbose) { ByteSerializer.BENCHMARK = true; scheduleAndRepeat(new Runnable() { @Override public void run() { ByteSerializer.printStats(); } }, 600000L, 600000L); Profiling.setEnabled(false); Profiling.reset(); } String arch = OSUtil.is64BitPlatform() ? "64bit" : "32bit"; logFine("OS: " + System.getProperty("os.name") + " (" + arch + ')'); logFine("Java: " + JavaVersion.systemVersion().toString() + " (" + System.getProperty("java.vendor") + ')'); logFine("Current time: " + new Date()); Runtime runtime = Runtime.getRuntime(); long maxMemory = runtime.maxMemory(); long totalMemory = runtime.totalMemory(); logFine("Max Memory: " + Format.formatBytesShort(maxMemory) + ", Total Memory: " + Format.formatBytesShort(totalMemory)); if (!Desktop.isDesktopSupported() && isUIEnabled()) { logWarning("Desktop utility not supported"); } // If we have a new config. clear the preferences. clearPreferencesOnConfigSwitch(); // Load and set http proxy settings HTTPProxySettings.loadFromConfig(this); // #2179: Load from server. How to handle timeouts? // Command line option -c http://are.de ConfigurationLoader.loadAndMergeCLI(this); // Config entry in file ConfigurationLoader.loadAndMergeConfigURL(this); // Read from installer temp file ConfigurationLoader.loadAndMergeFromInstaller(this); if (verbose != ConfigurationEntry.VERBOSE.getValueBoolean(this)) { verbose = ConfigurationEntry.VERBOSE.getValueBoolean(this); initLogger(); } // Init paused only if user expects pause to be permanent or // "while I work" int pauseSecs = ConfigurationEntry.PAUSE_RESUME_SECONDS.getValueInt(getController()); paused = PreferencesEntry.PAUSED.getValueBoolean(this) && (pauseSecs == Integer.MAX_VALUE || pauseSecs == 0); // Now set it, just in case it was paused in permanent mode. PreferencesEntry.PAUSED.setValue(this, paused); // Load and set http proxy settings again. HTTPProxySettings.loadFromConfig(this); // Initialize branding/preconfiguration of the client initDistribution(); logFine("Build time: " + getBuildTime()); logInfo("Program version " + PROGRAM_VERSION); if (getDistribution().getBinaryName().toLowerCase().contains("powerfolder")) { Debug.writeSystemProperties(); } if (ConfigurationEntry.KILL_RUNNING_INSTANCE.getValueBoolean(this)) { killRunningInstance(); } FolderList.removeMemberFiles(this); // Initialize plugins setupProPlugins(); pluginManager = new PluginManager(this); pluginManager.init(); // create node manager nodeManager = new NodeManager(this); // Only one task brother left... taskManager = new PersistentTaskManager(this); // Folder repository folderRepository = new FolderRepository(this); setLoadingCompletion(0, 10); // Create transfer manager // If this is a unit test it might have been set before. try { transferManager = transferManagerFactory.call(); } catch (Exception e) { logSevere("Exception", e); } reconnectManager = new ReconnectManager(this); // Create os client osClient = new ServerClient(this); if (isUIEnabled()) { uiController = new UIController(this); if (ConfigurationEntry.USER_INTERFACE_LOCKED.getValueBoolean(this)) { // Don't let the user pass this step. new UIUnLockDialog(this).openAndWait(); } } setLoadingCompletion(10, 20); // The io provider. ioProvider = new IOProvider(this); ioProvider.start(); // Set hostname by CLI if (commandLine != null && commandLine.hasOption('d')) { String host = commandLine.getOptionValue("d"); if (StringUtils.isNotBlank(host)) { InetSocketAddress addr = Util.parseConnectionString(host); if (addr != null) { ConfigurationEntry.HOSTNAME.setValue(this, addr.getHostName()); ConfigurationEntry.NET_BIND_PORT.setValue(this, addr.getPort()); } } } // initialize dyndns manager dyndnsManager = new DynDnsManager(this); setLoadingCompletion(20, 30); // initialize listener on local port if (!initializeListenerOnLocalPort()) { return; } if (!isUIEnabled()) { // Disable paused function paused = false; } setLoadingCompletion(30, 35); // Start the nodemanager nodeManager.init(); if (!ProUtil.isRunningProVersion()) { // Nodemanager gets later (re) started by ProLoader. nodeManager.start(); } setLoadingCompletion(35, 60); securityManager = new SecurityManagerClient(this, osClient); // init repo (read folders) folderRepository.init(); logInfo("Dataitems: " + Debug.countDataitems(Controller.this)); // init of folders takes rather long so a big difference with // last number to get smooth bar... ;-) setLoadingCompletion(60, 65); // start repo maintainance Thread folderRepository.start(); setLoadingCompletion(65, 70); // Start the transfer manager thread transferManager.start(); setLoadingCompletion(70, 75); // Initalize rcon manager startRConManager(); setLoadingCompletion(75, 80); // Start all configured listener if not in paused mode startConfiguredListener(); setLoadingCompletion(80, 85); // open broadcast listener openBroadcastManager(); setLoadingCompletion(85, 90); // Controller now started started = true; startTime = new Date(); // Now taskmanager taskManager.start(); logInfo("Controller started"); // dyndns updater /* * boolean onStartUpdate = ConfigurationEntry.DYNDNS_AUTO_UPDATE * .getValueBoolean(this).booleanValue(); if (onStartUpdate) { * getDynDnsManager().onStartUpdate(); } */ dyndnsManager.updateIfNessesary(); setLoadingCompletion(90, 100); // Login to OS if (Feature.OS_CLIENT.isEnabled()) { try { osClient.loginWithLastKnown(); } catch (Exception e) { logWarning("Unable to login with last known username. " + e); logFiner(e); } } // Start Plugins pluginManager.start(); // open UI if (isConsoleMode()) { logFine("Running in console"); } else { logFine("Opening UI"); openUI(); } // Load anything that was not handled last time. loadPersistentObjects(); setLoadingCompletion(100, 100); if (!isConsoleMode()) { uiController.hideSplash(); } if (ConfigurationEntry.AUTO_CONNECT.getValueBoolean(this)) { // Now start the connecting process reconnectManager.start(); } else { logFine("Not starting reconnection process. " + "Config auto.connect set to false"); } // Start connecting to OS client. if (Feature.OS_CLIENT.isEnabled() && ConfigurationEntry.SERVER_CONNECT.getValueBoolean(this)) { osClient.start(); } else { logInfo("Not connecting to server (" + osClient.getServerString() + "): Disabled"); } // Setup our background working tasks setupPeriodicalTasks(); if (MacUtils.isSupported()) { if (isFirstStart()) { MacUtils.getInstance().setPFStartup(true, this); } MacUtils.getInstance().setAppReOpenedListener(this); } if (pauseSecs == 0) { // Activate adaptive logic setPaused(paused); } }
From source file:org.apache.hadoop.mapred.CoronaJobTracker.java
private void startInfoServer() throws IOException { InetSocketAddress infoSocAddr = NetUtils .createSocketAddr(java.net.InetAddress.getLocalHost().getCanonicalHostName(), 0); String infoBindAddress = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); infoServer = new HttpServer("jt", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf); infoServer.setAttribute("job.tracker", this); infoServer.start();// w ww .ja v a2 s. c o m this.infoPort = this.infoServer.getPort(); String hostname = java.net.InetAddress.getLocalHost().getCanonicalHostName(); this.conf.set("mapred.job.tracker.http.address", hostname + ":" + this.infoPort); this.conf.setInt("mapred.job.tracker.info.port", this.infoPort); this.conf.set("mapred.job.tracker.info.bindAddress", hostname); LOG.info("JobTracker webserver: " + this.infoPort); }
From source file:org.apache.hadoop.mapred.CoronaJobTracker.java
private void initializePJTClient() throws IOException { InetSocketAddress address = NetUtils .createSocketAddr(new CoronaConf(conf).getProxyJobTrackerThriftAddress()); pjtTransport = new TFramedTransport(new TSocket(address.getHostName(), address.getPort())); pjtClient = new CoronaProxyJobTrackerService.Client(new TBinaryProtocol(pjtTransport)); try {// ww w.j ava 2 s . c o m pjtTransport.open(); } catch (TException e) { LOG.info("Transport Exception: ", e); } }
From source file:org.apache.hadoop.mapred.CoronaJobTracker.java
@Override public void restoreTaskLaunch(TaskLaunch launch) { TaskAttemptID attemptId = launch.getTaskId(); TaskInProgress tip = job.getTaskInProgress(attemptId.getTaskID()); String trackerName = launch.getTrackerName(); InetSocketAddress trackerAddr = launch.getAddress(); // Update trackerName -> trackerAddress mapping in ResourceTracker resourceTracker.updateTrackerAddr(trackerName, new InetAddress(trackerAddr.getHostName(), trackerAddr.getPort())); Task task = null;// ww w. ja va2 s. c om if (tip.isMapTask()) { task = job.forceNewMapTaskForTip(trackerName, trackerAddr.getHostName(), tip); } else { task = job.forceNewReduceTaskForTip(trackerName, trackerAddr.getHostName(), tip); } if (task != null && task.getTaskID().equals(attemptId)) { TaskAttemptID taskId = task.getTaskID(); Integer grantId = launch.getGrantId(); taskLookupTable.createTaskEntry(taskId, trackerName, tip, grantId); // Skip launching task, but add to expire logic expireTasks.addNewTask(task.getTaskID()); trackerStats.recordTask(trackerName); } else { LOG.error("Failed to register restored task " + attemptId); } }
From source file:com.kixeye.chassis.transport.shared.JettyConnectorRegistry.java
/** * Register to listen to HTTPS.// w w w . ja v a2s .c o m * * @param server * @param address * @throws Exception */ public static void registerHttpsConnector(Server server, InetSocketAddress address, boolean selfSigned, boolean mutualSsl, String keyStorePath, String keyStoreData, String keyStorePassword, String keyManagerPassword, String trustStorePath, String trustStoreData, String trustStorePassword, String[] excludedCipherSuites) throws Exception { // SSL Context Factory SslContextFactory sslContextFactory = new SslContextFactory(); if (selfSigned) { char[] passwordChars = UUID.randomUUID().toString().toCharArray(); KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); keyStore.load(null, passwordChars); KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA"); keyPairGenerator.initialize(1024); KeyPair keyPair = keyPairGenerator.generateKeyPair(); X509V3CertificateGenerator v3CertGen = new X509V3CertificateGenerator(); v3CertGen.setSerialNumber(BigInteger.valueOf(new SecureRandom().nextInt()).abs()); v3CertGen.setIssuerDN(new X509Principal("CN=" + "kixeye.com" + ", OU=None, O=None L=None, C=None")); v3CertGen.setNotBefore(new Date(System.currentTimeMillis() - 1000L * 60 * 60 * 24 * 30)); v3CertGen.setNotAfter(new Date(System.currentTimeMillis() + (1000L * 60 * 60 * 24 * 365 * 10))); v3CertGen.setSubjectDN(new X509Principal("CN=" + "kixeye.com" + ", OU=None, O=None L=None, C=None")); v3CertGen.setPublicKey(keyPair.getPublic()); v3CertGen.setSignatureAlgorithm("MD5WithRSAEncryption"); X509Certificate privateKeyCertificate = v3CertGen.generateX509Certificate(keyPair.getPrivate()); keyStore.setKeyEntry("selfSigned", keyPair.getPrivate(), passwordChars, new java.security.cert.Certificate[] { privateKeyCertificate }); ByteArrayOutputStream keyStoreBaos = new ByteArrayOutputStream(); keyStore.store(keyStoreBaos, passwordChars); keyStoreData = new String(Hex.encode(keyStoreBaos.toByteArray()), Charsets.UTF_8); keyStorePassword = new String(passwordChars); keyManagerPassword = keyStorePassword; sslContextFactory.setTrustAll(true); } KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); if (StringUtils.isNotBlank(keyStoreData)) { keyStore.load(new ByteArrayInputStream(Hex.decode(keyStoreData)), keyStorePassword.toCharArray()); } else if (StringUtils.isNotBlank(keyStorePath)) { try (InputStream inputStream = new DefaultResourceLoader().getResource(keyStorePath).getInputStream()) { keyStore.load(inputStream, keyStorePassword.toCharArray()); } } sslContextFactory.setKeyStore(keyStore); sslContextFactory.setKeyStorePassword(keyStorePassword); if (StringUtils.isBlank(keyManagerPassword)) { keyManagerPassword = keyStorePassword; } sslContextFactory.setKeyManagerPassword(keyManagerPassword); KeyStore trustStore = null; if (StringUtils.isNotBlank(trustStoreData)) { trustStore = KeyStore.getInstance(KeyStore.getDefaultType()); trustStore.load(new ByteArrayInputStream(Hex.decode(trustStoreData)), trustStorePassword.toCharArray()); } else if (StringUtils.isNotBlank(trustStorePath)) { trustStore = KeyStore.getInstance(KeyStore.getDefaultType()); try (InputStream inputStream = new DefaultResourceLoader().getResource(trustStorePath) .getInputStream()) { trustStore.load(inputStream, trustStorePassword.toCharArray()); } } if (trustStore != null) { sslContextFactory.setTrustStore(trustStore); sslContextFactory.setTrustStorePassword(trustStorePassword); } sslContextFactory.setNeedClientAuth(mutualSsl); sslContextFactory.setExcludeCipherSuites(excludedCipherSuites); // SSL Connector ServerConnector connector = new ServerConnector(server, new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory()); connector.setHost(address.getHostName()); connector.setPort(address.getPort()); server.addConnector(connector); }
From source file:org.apache.hadoop.hdfs.server.namenode.FileSystemProvider.java
/** * ? ? ?? ? ? ? ? ? .//from www . j av a2 s . co m * * @param contentsMap { * * bestNode ? ? ? ?? ? * buttonType ? * chunkSizeToView ? ? ? * clusterName ? * currentContentsBlockSize ? ? Block Size * currentPage ? * dfsBlockSize DFS Block Size * dfsBlockStartOffset DFS Block Start Offset * filePath ? * fileSize ? ? * lastDfsBlockSize Last DFS Block Size * startOffset Start Offset * totalPage ? * } * @return contentsMap */ public Map view(Map contentsMap) { try { String filePath = (String) contentsMap.get("filePath"); FileSystem fs = FileSystem.get(Namenode2Agent.configuration); ContentSummary summary = fs.getContentSummary(new Path(filePath)); long fileSize = summary.getLength(); long dfsBlockSize = Long.parseLong(String.valueOf(contentsMap.get("dfsBlockSize"))); long startOffset = Long.parseLong(String.valueOf(contentsMap.get("startOffset"))); long dfsBlockStartOffset = Long.parseLong(String.valueOf(contentsMap.get("dfsBlockStartOffset"))); int currentContentsBlockSize = Integer .parseInt(String.valueOf(contentsMap.get("currentContentsBlockSize"))); int currentPage = (int) contentsMap.get("currentPage"); int totalPage = Integer.parseInt(String.valueOf(contentsMap.get("totalPage"))); String buttonType = (String) contentsMap.get("buttonType"); long chunkSizeToView = contentsMap.containsKey("chunkSizeToView") ? Long.parseLong(String.valueOf(contentsMap.get("chunkSizeToView"))) : DEFAULT_CHUNK_SIZE; long lastDfsBlockSize = 0; if (fileSize > dfsBlockSize) { if (contentsMap.containsKey("lastDfsBlockSize")) { lastDfsBlockSize = Long.parseLong(String.valueOf(contentsMap.get("lastDfsBlockSize"))); } } DFSClient dfsClient = new DFSClient(fs.getUri(), Namenode2Agent.configuration); if (!FileUtils.pathValidator(filePath)) { throw new ServiceException("Invalid path. Please check the path."); } if (chunkSizeToView <= 0) { chunkSizeToView = DEFAULT_CHUNK_SIZE; } long lastPageChunkSizeToView = fileSize % chunkSizeToView; if (currentPage == 0) { if (fileSize > chunkSizeToView) { totalPage = (int) (fileSize / chunkSizeToView); if (lastPageChunkSizeToView > 0) { totalPage++; } } else { totalPage = 1; } if (fileSize > dfsBlockSize) { long lastDfsBlockStartOffset = fileSize; LocatedBlocks locatedBlocks = dfsClient.getNamenode().getBlockLocations(filePath, lastDfsBlockStartOffset, chunkSizeToView); lastDfsBlockSize = locatedBlocks.getLastLocatedBlock().getBlockSize(); contentsMap.put("lastDfsBlockSize", lastDfsBlockSize); } } // ? ? ?(chunkSizeToView) ? ?? ? ? contentsMap.put("totalPage", totalPage); // BlockPool? DFS Block? ? int dfsBlockCount = (int) (fileSize / dfsBlockSize); long dfsBlockResidue = fileSize / dfsBlockSize; if (dfsBlockResidue > 0) { dfsBlockCount++; } int moveToPage; long viewSize = chunkSizeToView; // File contents range to view for DFS Block in BlockPool /** * CurrentPage? ? ? FirstButton? ?? ? 0 . * * Case 1. Next Button * Case 1.1. ? ? ?? * Case 1.2. ? ?? * * Case 2. Last Button * Case 2.1. ? ?? * * Case 3. Previous Button * Case 3.1. ? ? ?? * Case 3.2. ?? ? ? ?? * Case 3.2.1 ?? ? ?? * Case 3.2.2 ?? ? ? ? * * Case 4 Custom Page * Case 4.1. ? ?? * Case 4.2. ? ?? * Case 4.2. ? ?? * * Case 5. Default Page * Case 5.1 ? ? ?? */ switch (buttonType) { case "nextButton": moveToPage = currentPage + 1; if (moveToPage < totalPage) { startOffset += chunkSizeToView; } else if (moveToPage == totalPage) { startOffset = fileSize - lastPageChunkSizeToView; viewSize = lastPageChunkSizeToView; } break; case "lastButton": moveToPage = totalPage; startOffset = fileSize - lastPageChunkSizeToView; viewSize = lastPageChunkSizeToView; break; case "prevButton": moveToPage = currentPage - 1; if (currentPage < totalPage) { startOffset -= chunkSizeToView; } else if (currentPage == totalPage) { if (moveToPage == 1) { startOffset = 0; } else { startOffset -= chunkSizeToView; } } break; case "customPage": moveToPage = currentPage; if (moveToPage == 1) { startOffset = (long) 0; } else if (moveToPage < totalPage) { startOffset = chunkSizeToView * moveToPage; } else if (moveToPage == totalPage) { startOffset = fileSize - lastPageChunkSizeToView; viewSize = lastPageChunkSizeToView; } break; default: moveToPage = 1; startOffset = (long) 0; // ? chunkSizeToView ?? ? ?? ?. if (fileSize < chunkSizeToView) { viewSize = fileSize; } break; } // ??? ? ? contentsMap.put("currentPage", moveToPage); contentsMap.put("startOffset", startOffset); /** * ? ?? ? ? ?? * ??? ?? (fileSize, blockSize, blockCount, genStamp, location...) . * ? ? ?? DFS Client ? ?. * DFS Pool? startOffset Pool? ? ??? DFS ? ? startOffset ? ? ? ? ? ?. */ LocatedBlocks locatedBlocks = dfsClient.getNamenode().getBlockLocations(filePath, startOffset, viewSize); int nextContentsBlockSize = locatedBlocks.locatedBlockCount(); // DFS Block Size ? chunkSizeToView ?? ? ? long dfsBlockViewCount = dfsBlockSize / chunkSizeToView; long dfsBlockViewResidueSize = dfsBlockSize % chunkSizeToView; if (dfsBlockViewResidueSize > 0) { dfsBlockViewCount++; } List<Long> startOffsetPerDfsBlocks = new ArrayList<>(); List<Long> accumulatedStartOffsetPerDfsBlocks = new ArrayList<>(); List<Long> lastStartOffsetPerDfsBlocks = new ArrayList<>(); List<Long> lastChunkSizePerDfsBlocks = new ArrayList<>(); List<Long> pageCheckPoints = new ArrayList<>(); /** * ? ? DFS Block Size ? * ? ? ?? ? ? ?? Block ID ?. * ID ? startOffset ? locatedBlockList ? ? Block ID ?. * ? LocatedBlockSize ? 2. * ? ?(ChunkSizeToView)? ? ?? ? DFS Block? ?? * ? ? ? ?(currentBlockChunkSizeToView) * ? ? ? ? ?(nextBlockChunkSizeToView)? . * ? Block ID ? ?? ? Block ID? ? ? * ? ? startOffset ? ? Merge . * DFS Block Pool? ?? ?? ? startOffset ? DFS Block? startOffset . * * DFS Block Size = 128 MB (134,217,728 B), StartOffset Range Per DFS Block = 0 ~ 134217727, ChunkSizeToView : 10000 * ex. moveToPage == 13421, locatedBlocks size == 2 * First DFS Block's Last StartOffset : 134210000 * Second DFS Block's First(Accumulated) Offset : 0 ~ 2271 * Second DFS Block's Second StartOffset : 2272 * Second DFS Block's Last StartOffset : 134212272 * Third DFS Block's First(Accumulated) Offset : 0 ~ 4543 * Third DFS Block's Second StartOffset : 4544 */ if (fileSize > dfsBlockSize) { long accumulatedStartOffset; long startOffsetForDfsBlock; long startOffsetForSecondDfsBlock = chunkSizeToView - dfsBlockViewResidueSize; long dfsBlockLastChunkSize = chunkSizeToView; for (int i = 0; i < dfsBlockCount; i++) { accumulatedStartOffset = startOffsetForSecondDfsBlock * i; accumulatedStartOffsetPerDfsBlocks.add(i, accumulatedStartOffset); if (dfsBlockLastChunkSize < startOffsetForSecondDfsBlock) { dfsBlockLastChunkSize += chunkSizeToView; } // ? ? ? ? . long lastDfsBlockLastStartOffset = 0; if (i == dfsBlockCount - 1) { long lastDfsBlockViewCount = lastDfsBlockSize / chunkSizeToView; long lastDfsBlockResidue = lastDfsBlockSize % chunkSizeToView; if (lastDfsBlockResidue < dfsBlockLastChunkSize) { lastDfsBlockViewCount--; } lastDfsBlockLastStartOffset = (lastDfsBlockViewCount * chunkSizeToView) + (chunkSizeToView - dfsBlockLastChunkSize); //47841808 dfsBlockLastChunkSize = lastDfsBlockSize - lastDfsBlockLastStartOffset; } else { dfsBlockLastChunkSize -= startOffsetForSecondDfsBlock; } lastChunkSizePerDfsBlocks.add(i, dfsBlockLastChunkSize); long dfsBlockLastStartOffset; if (i == dfsBlockCount - 1) { dfsBlockLastStartOffset = lastDfsBlockLastStartOffset; } else { dfsBlockLastStartOffset = dfsBlockSize - dfsBlockLastChunkSize; } lastStartOffsetPerDfsBlocks.add(i, dfsBlockLastStartOffset); startOffsetForDfsBlock = dfsBlockLastStartOffset % chunkSizeToView; startOffsetPerDfsBlocks.add(i, startOffsetForDfsBlock); } // ? DFS Block? ? contentsMap.put("accumulatedStartOffsetPerDfsBlocks", accumulatedStartOffsetPerDfsBlocks); contentsMap.put("lastStartOffsetPerDfsBlocks", lastStartOffsetPerDfsBlocks); contentsMap.put("lastChunkSizePerDfsBlocks", lastChunkSizePerDfsBlocks); contentsMap.put("startOffsetPerDfsBlocks", startOffsetPerDfsBlocks); long firstPageCheckPoint = dfsBlockSize / chunkSizeToView; long pageCheckPoint = 0; long pageCheckChunkSizeToView = chunkSizeToView; for (int i = 0; i < 15; i++) { pageCheckPoint += firstPageCheckPoint; int j = i; j++; if (j < accumulatedStartOffsetPerDfsBlocks.size()) { if (accumulatedStartOffsetPerDfsBlocks.get(j) > pageCheckChunkSizeToView) { pageCheckChunkSizeToView += chunkSizeToView; pageCheckPoint -= 1; } pageCheckPoints.add(i, pageCheckPoint); pageCheckPoint++; } } // CustomPage ? DFS Block Size ? ? . contentsMap.put("pageCheckPoints", pageCheckPoints); } /** * locatedBlocks ? ? : moveToPage >= dfsBlockViewCount - 1 * * ex. * offsetRange 0 >> moveToPage < dfsBlockViewCount - 1 : 13420 - (13422-1) * offsetRange 1 >> moveToPage == dfsBlockViewCount - 1 : 13421 - (13422-1) * offsetRange 2 >> moveToPage > dfsBlockViewCount - 1 : 13422 - (13422-1) */ int offsetRange = (int) (moveToPage / (dfsBlockViewCount - 1)); LocatedBlock locatedBlock; LocatedBlock nextLocatedBlock = null; long currentBlockLastStartOffset = 0; long currentBlockLastChunkSizeToView = 0; long nextBlockFirstStartOffset = 0; long nextBlockFirstChunkSizeToView = 0; boolean splitViewFlag = false; /** * ?? ? ? ? ? DFS ? * Criteria : DFS Block Size(128MB) and ChunkSizeToView(10000B) * * ? StartOffset ? ? StartOffset(0)? ?? ChunkSizeToView . * currentBlockLastStartOffset ~ nextBlockAccumulatedStartOffset * ex. 134210000 ~ 2272 */ if (nextContentsBlockSize > 1) { splitViewFlag = true; locatedBlock = locatedBlocks.get(0); nextLocatedBlock = locatedBlocks.get(1); dfsBlockStartOffset = startOffsetPerDfsBlocks.get(offsetRange); contentsMap.put("dfsBlockStartOffset", dfsBlockStartOffset); // ? ? startOffset currentBlockLastStartOffset = lastStartOffsetPerDfsBlocks.get(offsetRange - 1); currentBlockLastChunkSizeToView = lastChunkSizePerDfsBlocks.get(offsetRange - 1); nextBlockFirstStartOffset = 0; nextBlockFirstChunkSizeToView = chunkSizeToView - currentBlockLastChunkSizeToView; } else { locatedBlock = locatedBlocks.get(0); } // DFS Block? ? ? ? ? . if (offsetRange < pageCheckPoints.size()) { contentsMap.put("dfsBlockSize", dfsBlockSize); } // ? ? ? ? boolean currentPageSplitViewFlag = false; if (currentContentsBlockSize > 1) { currentPageSplitViewFlag = true; } /** * DFS1 -> DFS0 ?? * currentPageSplitViewFlag true ? dfsBlockStartOffset * ex. 13421 -> 13420 */ if (moveToPage < (dfsBlockViewCount - 1) && (moveToPage + 1) == (dfsBlockViewCount - 1)) { dfsBlockStartOffset = startOffset; } // DFS Block Size ? ? DFS Block ?? ?? StartOffset ? boolean dfsBlockStartOffsetRangeFlag = false; if (fileSize > dfsBlockSize && moveToPage >= dfsBlockViewCount && !splitViewFlag) { dfsBlockStartOffsetRangeFlag = true; } if (dfsBlockStartOffsetRangeFlag) { if (buttonType.equalsIgnoreCase("nextButton")) { if (moveToPage == totalPage) { dfsBlockStartOffset = lastStartOffsetPerDfsBlocks.get(offsetRange); chunkSizeToView = lastChunkSizePerDfsBlocks.get(offsetRange); } else { /** * ? DFS Block startOffset ? ?? ? * ex) DFS Block Size : 128 MB * Second DFS Block StartOffset : 2272 * * ?? ? ? DFS Block? ? startOffset ? . * moveToPage range per DFS block * 0 ~ 13421 : First DFS Block * 13422 ~ 26843 * 26844 ~ 53687 */ if (currentContentsBlockSize < 2) { dfsBlockStartOffset += chunkSizeToView; } } } else if (buttonType.equalsIgnoreCase("prevButton")) { // ?? ? ? ? DFS Block ? ?? ? ? ?? if (currentPageSplitViewFlag) { dfsBlockStartOffset = lastStartOffsetPerDfsBlocks.get(offsetRange - 1); dfsBlockStartOffset -= chunkSizeToView; } else { dfsBlockStartOffset -= chunkSizeToView; } } else if (buttonType.equalsIgnoreCase("customPage")) { // DFS Block Size ? ? splitView . if (moveToPage == totalPage) { dfsBlockStartOffset = lastStartOffsetPerDfsBlocks.get(offsetRange); chunkSizeToView = lastChunkSizePerDfsBlocks.get(offsetRange); } else { long dfsBlockAccumulatedStartOffset = startOffsetPerDfsBlocks.get(offsetRange); long pageCheckPoint = pageCheckPoints.get(offsetRange - 1); long currentPageCount = moveToPage - pageCheckPoint;// 50000-40265=9735 // ?? ? DFS Block ? ? ? ?? if (currentPageCount == 1) { dfsBlockStartOffset = dfsBlockAccumulatedStartOffset; } else { long pageRange = chunkSizeToView; currentPageCount--; if (currentPageCount > 0) { pageRange *= currentPageCount; //97340000, 134210000 } dfsBlockStartOffset = pageRange + dfsBlockAccumulatedStartOffset; // 97346816 } } } else if (buttonType.equalsIgnoreCase("lastButton")) { dfsBlockStartOffset = lastStartOffsetPerDfsBlocks.get(offsetRange); chunkSizeToView = lastChunkSizePerDfsBlocks.get(offsetRange); } contentsMap.put("dfsBlockStartOffset", dfsBlockStartOffset); } contentsMap.put("currentContentsBlockSize", nextContentsBlockSize); contentsMap.put("offsetRange", offsetRange); if (fileSize < dfsBlockSize) { if (moveToPage == totalPage) { chunkSizeToView = lastPageChunkSizeToView; } } /** * Case 1. BestNode , Block ID ? URL . * Case 2. DataNode? BestNode ?. */ InetSocketAddress address; InetSocketAddress nextAddress = null; DatanodeInfo chosenNode; DatanodeInfo nextChosenNode; if (contentsMap.containsKey("bestNode") && !splitViewFlag && !currentPageSplitViewFlag && !dfsBlockStartOffsetRangeFlag && !buttonType.equalsIgnoreCase("customPage")) { String bestNode = (String) contentsMap.get("bestNode"); address = NetUtils.createSocketAddr(bestNode); contentsMap.put("bestNode", bestNode); } else { chosenNode = bestNode(locatedBlock); address = NetUtils.createSocketAddr(chosenNode.getName()); contentsMap.put("bestNode", chosenNode.getName()); if (splitViewFlag) { nextChosenNode = bestNode(nextLocatedBlock); nextAddress = NetUtils.createSocketAddr(nextChosenNode.getName()); contentsMap.put("bestNode", nextChosenNode.getName()); } } /** * DFS File Block Size in HDFS * * ?? DFS ? ?? HDFS? ? * ?? ? ? ? . * ? ? ? locatedBlockCount ? 1 ?. * * ? DFS Block Size * 64 (MB) >> 67,108,864 (B) * 128 (MB) >> 134,217,728 (B) */ String poolId = locatedBlock.getBlock().getBlockPoolId(); long blockId = locatedBlock.getBlock().getBlockId(); long genStamp = locatedBlock.getBlock().getGenerationStamp(); Token<BlockTokenIdentifier> blockToken = locatedBlock.getBlockToken(); DatanodeID datanodeID = new DatanodeID(address.getAddress().getHostAddress(), address.getHostName(), poolId, address.getPort(), 0, 0, 0); Peer peer = dfsClient.newConnectedPeer(address, blockToken, datanodeID); CachingStrategy cachingStrategy = dfsClient.getDefaultReadCachingStrategy(); ExtendedBlock extendedBlock = new ExtendedBlock(poolId, blockId, fileSize, genStamp); String contents; if (splitViewFlag) { String currentBlockContents = streamBlockInAscii(address, blockToken, fileSize, currentBlockLastStartOffset, currentBlockLastChunkSizeToView, fs.getConf(), filePath, dfsClient.getClientName(), extendedBlock, false, peer, datanodeID, cachingStrategy); long nextBlockId = nextLocatedBlock.getBlock().getBlockId(); long nextGenStamp = nextLocatedBlock.getBlock().getGenerationStamp(); Token<BlockTokenIdentifier> nextBlockToken = nextLocatedBlock.getBlockToken(); DatanodeID nextDatanodeID = new DatanodeID(nextAddress.getAddress().getHostAddress(), nextAddress.getHostName(), poolId, nextAddress.getPort(), 0, 0, 0); Peer nextPeer = dfsClient.newConnectedPeer(nextAddress, nextBlockToken, nextDatanodeID); CachingStrategy nextCachingStrategy = dfsClient.getDefaultReadCachingStrategy(); ExtendedBlock nextExtendedBlock = new ExtendedBlock(poolId, nextBlockId, fileSize, nextGenStamp); String nextBlockContents = streamBlockInAscii(nextAddress, nextBlockToken, fileSize, nextBlockFirstStartOffset, nextBlockFirstChunkSizeToView, fs.getConf(), filePath, dfsClient.getClientName(), nextExtendedBlock, false, nextPeer, nextDatanodeID, nextCachingStrategy); // Merge two block's contents contents = currentBlockContents + nextBlockContents; contentsMap.put("startOffset", startOffset); } else { startOffset = dfsBlockStartOffsetRangeFlag || currentPageSplitViewFlag ? dfsBlockStartOffset : startOffset; contents = streamBlockInAscii(address, blockToken, fileSize, startOffset, chunkSizeToView, fs.getConf(), filePath, dfsClient.getClientName(), extendedBlock, false, peer, datanodeID, cachingStrategy); } contentsMap.put("chunkSizeToView", chunkSizeToView); contentsMap.put("lastPageChunkSizeToView", lastPageChunkSizeToView); contentsMap.put("contents", contents); } catch (IOException e) { e.printStackTrace(); } return contentsMap; }
From source file:org.apache.hadoop.mapred.TaskTracker.java
/** * Start with the local machine name, and the default JobTracker *///from ww w.j a v a 2 s . c om public TaskTracker(JobConf conf) throws IOException, InterruptedException { originalConf = conf; FILE_CACHE_SIZE = conf.getInt("mapred.tasktracker.file.cache.size", 2000); maxMapSlots = conf.getInt("mapred.tasktracker.map.tasks.maximum", 2); maxReduceSlots = conf.getInt("mapred.tasktracker.reduce.tasks.maximum", 2); diskHealthCheckInterval = conf.getLong(DISK_HEALTH_CHECK_INTERVAL_PROPERTY, DEFAULT_DISK_HEALTH_CHECK_INTERVAL); UserGroupInformation.setConfiguration(originalConf); aclsManager = new ACLsManager(conf, new JobACLsManager(conf), null); this.jobTrackAddr = JobTracker.getAddress(conf); String infoAddr = NetUtils.getServerAddress(conf, "tasktracker.http.bindAddress", "tasktracker.http.port", "mapred.task.tracker.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String httpBindAddress = infoSocAddr.getHostName(); int httpPort = infoSocAddr.getPort(); this.server = new HttpServer("task", httpBindAddress, httpPort, httpPort == 0, conf, aclsManager.getAdminsAcl()); workerThreads = conf.getInt("tasktracker.http.threads", 40); server.setThreads(1, workerThreads); // let the jsp pages get to the task tracker, config, and other relevant // objects FileSystem local = FileSystem.getLocal(conf); this.localDirAllocator = new LocalDirAllocator("mapred.local.dir"); Class<? extends TaskController> taskControllerClass = conf.getClass("mapred.task.tracker.task-controller", DefaultTaskController.class, TaskController.class); fConf = new JobConf(conf); localStorage = new LocalStorage(fConf.getLocalDirs()); localStorage.checkDirs(); taskController = (TaskController) ReflectionUtils.newInstance(taskControllerClass, fConf); taskController.setup(localDirAllocator, localStorage); lastNumFailures = localStorage.numFailures(); // create user log manager setUserLogManager(new UserLogManager(conf, taskController)); SecurityUtil.login(originalConf, TT_KEYTAB_FILE, TT_USER_NAME); initialize(); this.shuffleServerMetrics = ShuffleServerInstrumentation.create(this); server.setAttribute("task.tracker", this); server.setAttribute("local.file.system", local); server.setAttribute("log", LOG); server.setAttribute("localDirAllocator", localDirAllocator); server.setAttribute("shuffleServerMetrics", shuffleServerMetrics); String exceptionStackRegex = conf.get("mapreduce.reduce.shuffle.catch.exception.stack.regex"); String exceptionMsgRegex = conf.get("mapreduce.reduce.shuffle.catch.exception.message.regex"); // Percent of shuffle exceptions (out of sample size) seen before it's // fatal - acceptable values are from 0 to 1.0, 0 disables the check. // ie. 0.3 = 30% of the last X number of requests matched the exception, // so abort. float shuffleExceptionLimit = conf.getFloat("mapreduce.reduce.shuffle.catch.exception.percent.limit.fatal", 0); if ((shuffleExceptionLimit > 1) || (shuffleExceptionLimit < 0)) { throw new IllegalArgumentException( "mapreduce.reduce.shuffle.catch.exception.percent.limit.fatal " + " must be between 0 and 1.0"); } // The number of trailing requests we track, used for the fatal // limit calculation int shuffleExceptionSampleSize = conf.getInt("mapreduce.reduce.shuffle.catch.exception.sample.size", 1000); if (shuffleExceptionSampleSize <= 0) { throw new IllegalArgumentException( "mapreduce.reduce.shuffle.catch.exception.sample.size " + " must be greater than 0"); } shuffleExceptionTracking = new ShuffleExceptionTracker(shuffleExceptionSampleSize, exceptionStackRegex, exceptionMsgRegex, shuffleExceptionLimit); server.setAttribute("shuffleExceptionTracking", shuffleExceptionTracking); server.addInternalServlet("mapOutput", "/mapOutput", MapOutputServlet.class); server.addServlet("taskLog", "/tasklog", TaskLogServlet.class); server.start(); this.httpPort = server.getPort(); checkJettyPort(httpPort); LOG.info("FILE_CACHE_SIZE for mapOutputServlet set to : " + FILE_CACHE_SIZE); mapRetainSize = conf.getLong(TaskLogsTruncater.MAP_USERLOG_RETAIN_SIZE, TaskLogsTruncater.DEFAULT_RETAIN_SIZE); reduceRetainSize = conf.getLong(TaskLogsTruncater.REDUCE_USERLOG_RETAIN_SIZE, TaskLogsTruncater.DEFAULT_RETAIN_SIZE); }
From source file:org.apache.hadoop.mapred.TaskTracker.java
/** * Do the real constructor work here. It's in a separate method * so we can call it again and "recycle" the object after calling * close()./* ww w. j av a 2 s . com*/ */ synchronized void initialize() throws IOException, InterruptedException { this.fConf = new JobConf(originalConf); LOG.info("Starting tasktracker with owner as " + getMROwner().getShortUserName()); localFs = FileSystem.getLocal(fConf); if (fConf.get("slave.host.name") != null) { this.localHostname = fConf.get("slave.host.name"); } if (localHostname == null) { this.localHostname = DNS.getDefaultHost(fConf.get("mapred.tasktracker.dns.interface", "default"), fConf.get("mapred.tasktracker.dns.nameserver", "default")); } final String dirs = localStorage.getDirsString(); fConf.setStrings(JobConf.MAPRED_LOCAL_DIR_PROPERTY, dirs); LOG.info("Good mapred local directories are: " + dirs); taskController.setConf(fConf); // Setup task controller so that deletion of user dirs happens properly taskController.setup(localDirAllocator, localStorage); server.setAttribute("conf", fConf); deleteUserDirectories(fConf); // NB: deleteLocalFiles uses the configured local dirs, but does not // fail if a local directory has failed. fConf.deleteLocalFiles(SUBDIR); final FsPermission ttdir = FsPermission.createImmutable((short) 0755); for (String s : localStorage.getDirs()) { localFs.mkdirs(new Path(s, SUBDIR), ttdir); } fConf.deleteLocalFiles(TT_PRIVATE_DIR); final FsPermission priv = FsPermission.createImmutable((short) 0700); for (String s : localStorage.getDirs()) { localFs.mkdirs(new Path(s, TT_PRIVATE_DIR), priv); } fConf.deleteLocalFiles(TT_LOG_TMP_DIR); final FsPermission pub = FsPermission.createImmutable((short) 0755); for (String s : localStorage.getDirs()) { localFs.mkdirs(new Path(s, TT_LOG_TMP_DIR), pub); } // Create userlogs directory under all good mapred-local-dirs for (String s : localStorage.getDirs()) { Path userLogsDir = new Path(s, TaskLog.USERLOGS_DIR_NAME); if (!localFs.exists(userLogsDir)) { localFs.mkdirs(userLogsDir, pub); } } // Clear out state tables this.tasks.clear(); this.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>(); this.runningJobs = new TreeMap<JobID, RunningJob>(); this.mapTotal = 0; this.reduceTotal = 0; this.acceptNewTasks = true; this.status = null; this.minSpaceStart = this.fConf.getLong("mapred.local.dir.minspacestart", 0L); this.minSpaceKill = this.fConf.getLong("mapred.local.dir.minspacekill", 0L); //tweak the probe sample size (make it a function of numCopiers) probe_sample_size = this.fConf.getInt("mapred.tasktracker.events.batchsize", 500); createInstrumentation(); // bind address String address = NetUtils.getServerAddress(fConf, "mapred.task.tracker.report.bindAddress", "mapred.task.tracker.report.port", "mapred.task.tracker.report.address"); InetSocketAddress socAddr = NetUtils.createSocketAddr(address); String bindAddress = socAddr.getHostName(); int tmpPort = socAddr.getPort(); this.jvmManager = new JvmManager(this); // Set service-level authorization security policy if (this.fConf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { PolicyProvider policyProvider = (PolicyProvider) (ReflectionUtils .newInstance(this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, MapReducePolicyProvider.class, PolicyProvider.class), this.fConf)); ServiceAuthorizationManager.refresh(fConf, policyProvider); } // RPC initialization int max = maxMapSlots > maxReduceSlots ? maxMapSlots : maxReduceSlots; //set the num handlers to max*2 since canCommit may wait for the duration //of a heartbeat RPC this.taskReportServer = RPC.getServer(this, bindAddress, tmpPort, 2 * max, false, this.fConf, this.jobTokenSecretManager); this.taskReportServer.start(); // get the assigned address this.taskReportAddress = taskReportServer.getListenerAddress(); this.fConf.set("mapred.task.tracker.report.address", taskReportAddress.getHostName() + ":" + taskReportAddress.getPort()); LOG.info("TaskTracker up at: " + this.taskReportAddress); this.taskTrackerName = "tracker_" + localHostname + ":" + taskReportAddress; LOG.info("Starting tracker " + taskTrackerName); // Initialize DistributedCache this.distributedCacheManager = new TrackerDistributedCacheManager(this.fConf, taskController); this.distributedCacheManager.startCleanupThread(); this.jobClient = (InterTrackerProtocol) UserGroupInformation.getLoginUser() .doAs(new PrivilegedExceptionAction<Object>() { public Object run() throws IOException { return RPC.waitForProxy(InterTrackerProtocol.class, InterTrackerProtocol.versionID, jobTrackAddr, fConf); } }); this.justInited = true; this.running = true; // start the thread that will fetch map task completion events this.mapEventsFetcher = new MapEventsFetcherThread(); mapEventsFetcher.setDaemon(true); mapEventsFetcher.setName("Map-events fetcher for all reduce tasks " + "on " + taskTrackerName); mapEventsFetcher.start(); Class<? extends ResourceCalculatorPlugin> clazz = fConf.getClass(TT_RESOURCE_CALCULATOR_PLUGIN, null, ResourceCalculatorPlugin.class); resourceCalculatorPlugin = ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, fConf); LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculatorPlugin); initializeMemoryManagement(); getUserLogManager().clearOldUserLogs(fConf); setIndexCache(new IndexCache(this.fConf)); mapLauncher = new TaskLauncher(TaskType.MAP, maxMapSlots); reduceLauncher = new TaskLauncher(TaskType.REDUCE, maxReduceSlots); mapLauncher.start(); reduceLauncher.start(); // create a localizer instance setLocalizer(new Localizer(localFs, localStorage.getDirs())); //Start up node health checker service. if (shouldStartHealthMonitor(this.fConf)) { startHealthMonitor(this.fConf); } // Start thread to monitor jetty bugs startJettyBugMonitor(); oobHeartbeatOnTaskCompletion = fConf.getBoolean(TT_OUTOFBAND_HEARBEAT, false); oobHeartbeatDamper = fConf.getInt(TT_OUTOFBAND_HEARTBEAT_DAMPER, DEFAULT_OOB_HEARTBEAT_DAMPER); }