Example usage for java.net InetSocketAddress getPort

List of usage examples for java.net InetSocketAddress getPort

Introduction

In this page you can find the example usage for java.net InetSocketAddress getPort.

Prototype

public final int getPort() 

Source Link

Document

Gets the port number.

Usage

From source file:edu.umass.cs.gigapaxos.PaxosManager.java

/**
 * @param myAddress/*from ww w .ja  v  a 2s. c o  m*/
 * @param ssl
 * @return {@code this}
 */
private PaxosManager<NodeIDType> initClientMessenger(InetSocketAddress myAddress, boolean ssl,
        InterfaceNIOTransport<NodeIDType, ?> nioTransport) {
    Messenger<InetSocketAddress, JSONObject> cMsgr = null;
    SSLMessenger<NodeIDType, ?> msgr = (nioTransport instanceof Messenger
            ? (SSLMessenger<NodeIDType, ?>) nioTransport
            : null);

    try {
        int clientPortOffset = ssl ? Config.getGlobalInt(PC.CLIENT_PORT_SSL_OFFSET)
                : Config.getGlobalInt(PC.CLIENT_PORT_OFFSET);

        if (clientPortOffset > 0) {
            InetSocketAddress myAddressOffsetted = new InetSocketAddress(myAddress.getAddress(),
                    myAddress.getPort() + clientPortOffset);
            log.log(Level.INFO, "{0} creating client messenger at {1}; (offset={2}{3})",
                    new Object[] { this, myAddressOffsetted, clientPortOffset, ssl ? "/SSL" : "" });

            MessageNIOTransport<InetSocketAddress, JSONObject> createdNIOTransport = null;

            cMsgr = new JSONMessenger<InetSocketAddress>(
                    createdNIOTransport = new MessageNIOTransport<InetSocketAddress, JSONObject>(
                            myAddressOffsetted.getAddress(), myAddressOffsetted.getPort(),
                            /* Client facing demultiplexer is single
                             * threaded to keep clients from overwhelming
                             * the system with request load. */
                            (Config.getGlobalString(PC.JSON_LIBRARY).equals("org.json")
                                    ? new JSONDemultiplexer(0, true)
                                    : new FastDemultiplexer(
                                            Config.getGlobalInt(PC.CLIENT_DEMULTIPLEXER_THREADS), true)),
                            ssl ? SSLDataProcessingWorker.SSL_MODES
                                    .valueOf(Config.getGlobalString(PC.CLIENT_SSL_MODE)) : SSL_MODES.CLEAR));
            if (Config.getGlobalBoolean(PC.STRICT_ADDRESS_CHECKS)
                    && !createdNIOTransport.getListeningSocketAddress().equals(myAddressOffsetted))
                // Note: will throw false positive exception on EC2
                throw new IOException("Unable to listen on specified socket address at " + myAddressOffsetted
                        + " != " + createdNIOTransport.getListeningSocketAddress());
            assert (msgr != null);
            if (ssl)
                msgr.setSSLClientMessenger(cMsgr);
            else
                msgr.setClientMessenger(cMsgr);
        }
    } catch (IOException e) {
        e.printStackTrace();
        log.severe(e.getMessage());
        System.exit(1);
    }
    return this;
}

From source file:org.apache.bookkeeper.proto.BookieNettyServer.java

private void listenOn(InetSocketAddress address, BookieSocketAddress bookieAddress)
        throws InterruptedException {
    if (!conf.isDisableServerSocketBind()) {
        ServerBootstrap bootstrap = new ServerBootstrap();
        bootstrap.childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true));
        bootstrap.group(eventLoopGroup, eventLoopGroup);
        bootstrap.childOption(ChannelOption.TCP_NODELAY, conf.getServerTcpNoDelay());
        bootstrap.childOption(ChannelOption.SO_LINGER, conf.getServerSockLinger());
        bootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR,
                new AdaptiveRecvByteBufAllocator(conf.getRecvByteBufAllocatorSizeMin(),
                        conf.getRecvByteBufAllocatorSizeInitial(), conf.getRecvByteBufAllocatorSizeMax()));
        bootstrap.option(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(
                conf.getServerWriteBufferLowWaterMark(), conf.getServerWriteBufferHighWaterMark()));

        if (eventLoopGroup instanceof EpollEventLoopGroup) {
            bootstrap.channel(EpollServerSocketChannel.class);
        } else {//from w w w . ja  v a  2  s .  c  o  m
            bootstrap.channel(NioServerSocketChannel.class);
        }

        bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
            @Override
            protected void initChannel(SocketChannel ch) throws Exception {
                synchronized (suspensionLock) {
                    while (suspended) {
                        suspensionLock.wait();
                    }
                }

                BookieSideConnectionPeerContextHandler contextHandler = new BookieSideConnectionPeerContextHandler();
                ChannelPipeline pipeline = ch.pipeline();

                // For ByteBufList, skip the usual LengthFieldPrepender and have the encoder itself to add it
                pipeline.addLast("bytebufList", ByteBufList.ENCODER_WITH_SIZE);

                pipeline.addLast("lengthbaseddecoder",
                        new LengthFieldBasedFrameDecoder(maxFrameSize, 0, 4, 0, 4));
                pipeline.addLast("lengthprepender", new LengthFieldPrepender(4));

                pipeline.addLast("bookieProtoDecoder", new BookieProtoEncoding.RequestDecoder(registry));
                pipeline.addLast("bookieProtoEncoder", new BookieProtoEncoding.ResponseEncoder(registry));
                pipeline.addLast("bookieAuthHandler", new AuthHandler.ServerSideHandler(
                        contextHandler.getConnectionPeer(), authProviderFactory));

                ChannelInboundHandler requestHandler = isRunning.get()
                        ? new BookieRequestHandler(conf, requestProcessor, allChannels)
                        : new RejectRequestHandler();
                pipeline.addLast("bookieRequestHandler", requestHandler);

                pipeline.addLast("contextHandler", contextHandler);
            }
        });

        // Bind and start to accept incoming connections
        Channel listen = bootstrap.bind(address.getAddress(), address.getPort()).sync().channel();
        if (listen.localAddress() instanceof InetSocketAddress) {
            if (conf.getBookiePort() == 0) {
                conf.setBookiePort(((InetSocketAddress) listen.localAddress()).getPort());
            }
        }
    }

    if (conf.isEnableLocalTransport()) {
        ServerBootstrap jvmBootstrap = new ServerBootstrap();
        jvmBootstrap.childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true));
        jvmBootstrap.group(jvmEventLoopGroup, jvmEventLoopGroup);
        jvmBootstrap.childOption(ChannelOption.TCP_NODELAY, conf.getServerTcpNoDelay());
        jvmBootstrap.childOption(ChannelOption.SO_KEEPALIVE, conf.getServerSockKeepalive());
        jvmBootstrap.childOption(ChannelOption.SO_LINGER, conf.getServerSockLinger());
        jvmBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR,
                new AdaptiveRecvByteBufAllocator(conf.getRecvByteBufAllocatorSizeMin(),
                        conf.getRecvByteBufAllocatorSizeInitial(), conf.getRecvByteBufAllocatorSizeMax()));
        jvmBootstrap.option(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(
                conf.getServerWriteBufferLowWaterMark(), conf.getServerWriteBufferHighWaterMark()));

        if (jvmEventLoopGroup instanceof DefaultEventLoopGroup) {
            jvmBootstrap.channel(LocalServerChannel.class);
        } else if (jvmEventLoopGroup instanceof EpollEventLoopGroup) {
            jvmBootstrap.channel(EpollServerSocketChannel.class);
        } else {
            jvmBootstrap.channel(NioServerSocketChannel.class);
        }

        jvmBootstrap.childHandler(new ChannelInitializer<LocalChannel>() {
            @Override
            protected void initChannel(LocalChannel ch) throws Exception {
                synchronized (suspensionLock) {
                    while (suspended) {
                        suspensionLock.wait();
                    }
                }

                BookieSideConnectionPeerContextHandler contextHandler = new BookieSideConnectionPeerContextHandler();
                ChannelPipeline pipeline = ch.pipeline();

                pipeline.addLast("lengthbaseddecoder",
                        new LengthFieldBasedFrameDecoder(maxFrameSize, 0, 4, 0, 4));
                pipeline.addLast("lengthprepender", new LengthFieldPrepender(4));

                pipeline.addLast("bookieProtoDecoder", new BookieProtoEncoding.RequestDecoder(registry));
                pipeline.addLast("bookieProtoEncoder", new BookieProtoEncoding.ResponseEncoder(registry));
                pipeline.addLast("bookieAuthHandler", new AuthHandler.ServerSideHandler(
                        contextHandler.getConnectionPeer(), authProviderFactory));

                ChannelInboundHandler requestHandler = isRunning.get()
                        ? new BookieRequestHandler(conf, requestProcessor, allChannels)
                        : new RejectRequestHandler();
                pipeline.addLast("bookieRequestHandler", requestHandler);

                pipeline.addLast("contextHandler", contextHandler);
            }
        });

        // use the same address 'name', so clients can find local Bookie still discovering them using ZK
        jvmBootstrap.bind(bookieAddress.getLocalAddress()).sync();
        LocalBookiesRegistry.registerLocalBookieAddress(bookieAddress);
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.FileSystemProvider.java

/**
 * ? ? ?? ? ? ? ? ?    ./*from w w  w .ja  va 2 s  . c  om*/
 *
 * @param contentsMap {
 *
 *                          bestNode                    ? ? ? ?? ? 
 *                          buttonType                  ?  
 *                          chunkSizeToView              ? ? ?  
 *                          clusterName                 ?
 *                          currentContentsBlockSize     ? ?  Block Size
 *                          currentPage                  ?
 *                          dfsBlockSize                DFS Block Size
 *                          dfsBlockStartOffset         DFS Block Start Offset
 *                          filePath                    ? 
 *                          fileSize                    ?  ?
 *                          lastDfsBlockSize            Last DFS Block Size
 *                          startOffset                 Start Offset
 *                          totalPage                    ?
 *                    }
 * @return contentsMap
 */
public Map view(Map contentsMap) {

    try {
        String filePath = (String) contentsMap.get("filePath");
        FileSystem fs = FileSystem.get(Namenode2Agent.configuration);
        ContentSummary summary = fs.getContentSummary(new Path(filePath));
        long fileSize = summary.getLength();
        long dfsBlockSize = Long.parseLong(String.valueOf(contentsMap.get("dfsBlockSize")));
        long startOffset = Long.parseLong(String.valueOf(contentsMap.get("startOffset")));
        long dfsBlockStartOffset = Long.parseLong(String.valueOf(contentsMap.get("dfsBlockStartOffset")));
        int currentContentsBlockSize = Integer
                .parseInt(String.valueOf(contentsMap.get("currentContentsBlockSize")));
        int currentPage = (int) contentsMap.get("currentPage");
        int totalPage = Integer.parseInt(String.valueOf(contentsMap.get("totalPage")));
        String buttonType = (String) contentsMap.get("buttonType");
        long chunkSizeToView = contentsMap.containsKey("chunkSizeToView")
                ? Long.parseLong(String.valueOf(contentsMap.get("chunkSizeToView")))
                : DEFAULT_CHUNK_SIZE;
        long lastDfsBlockSize = 0;

        if (fileSize > dfsBlockSize) {
            if (contentsMap.containsKey("lastDfsBlockSize")) {
                lastDfsBlockSize = Long.parseLong(String.valueOf(contentsMap.get("lastDfsBlockSize")));
            }
        }

        DFSClient dfsClient = new DFSClient(fs.getUri(), Namenode2Agent.configuration);

        if (!FileUtils.pathValidator(filePath)) {
            throw new ServiceException("Invalid path. Please check the path.");
        }

        if (chunkSizeToView <= 0) {
            chunkSizeToView = DEFAULT_CHUNK_SIZE;
        }

        long lastPageChunkSizeToView = fileSize % chunkSizeToView;

        if (currentPage == 0) {
            if (fileSize > chunkSizeToView) {
                totalPage = (int) (fileSize / chunkSizeToView);
                if (lastPageChunkSizeToView > 0) {
                    totalPage++;
                }
            } else {
                totalPage = 1;
            }

            if (fileSize > dfsBlockSize) {
                long lastDfsBlockStartOffset = fileSize;
                LocatedBlocks locatedBlocks = dfsClient.getNamenode().getBlockLocations(filePath,
                        lastDfsBlockStartOffset, chunkSizeToView);
                lastDfsBlockSize = locatedBlocks.getLastLocatedBlock().getBlockSize();
                contentsMap.put("lastDfsBlockSize", lastDfsBlockSize);
            }
        }

        //  ? ? ?(chunkSizeToView)  ? ??  ? ?
        contentsMap.put("totalPage", totalPage);

        // BlockPool?  DFS Block? ? 
        int dfsBlockCount = (int) (fileSize / dfsBlockSize);
        long dfsBlockResidue = fileSize / dfsBlockSize;
        if (dfsBlockResidue > 0) {
            dfsBlockCount++;
        }

        int moveToPage;
        long viewSize = chunkSizeToView; // File contents range to view for DFS Block in BlockPool

        /**
         * CurrentPage?   ? ? FirstButton?   ?? ? 0  .
         *
         * Case 1. Next Button
         * Case 1.1. ? ? ?? 
         * Case 1.2.  ? ?? 
         *
         * Case 2. Last Button
         * Case 2.1.  ? ?? 
         *
         * Case 3. Previous Button
         * Case 3.1. ? ? ?? 
         * Case 3.2.  ?? ? ? ?? 
         * Case 3.2.1 ?? ?  ?? 
         * Case 3.2.2 ?? ?  ? ? 
         *
         * Case 4 Custom Page
         * Case 4.1.  ? ?? 
         * Case 4.2.  ? ?? 
         * Case 4.2.  ? ?? 
         *
         * Case 5. Default Page
         * Case 5.1  ?   ? ?? 
         */
        switch (buttonType) {
        case "nextButton":
            moveToPage = currentPage + 1;
            if (moveToPage < totalPage) {
                startOffset += chunkSizeToView;
            } else if (moveToPage == totalPage) {
                startOffset = fileSize - lastPageChunkSizeToView;
                viewSize = lastPageChunkSizeToView;
            }
            break;
        case "lastButton":
            moveToPage = totalPage;
            startOffset = fileSize - lastPageChunkSizeToView;
            viewSize = lastPageChunkSizeToView;
            break;
        case "prevButton":
            moveToPage = currentPage - 1;
            if (currentPage < totalPage) {
                startOffset -= chunkSizeToView;
            } else if (currentPage == totalPage) {
                if (moveToPage == 1) {
                    startOffset = 0;
                } else {
                    startOffset -= chunkSizeToView;
                }
            }
            break;
        case "customPage":
            moveToPage = currentPage;
            if (moveToPage == 1) {
                startOffset = (long) 0;
            } else if (moveToPage < totalPage) {
                startOffset = chunkSizeToView * moveToPage;
            } else if (moveToPage == totalPage) {
                startOffset = fileSize - lastPageChunkSizeToView;
                viewSize = lastPageChunkSizeToView;
            }
            break;
        default:
            moveToPage = 1;
            startOffset = (long) 0;
            //  ? chunkSizeToView  ??  ? ?? ?.
            if (fileSize < chunkSizeToView) {
                viewSize = fileSize;
            }
            break;
        }

        // ??? ?   ?
        contentsMap.put("currentPage", moveToPage);
        contentsMap.put("startOffset", startOffset);

        /**
         * ? ??  ? ? ??   
         * ??? ??  (fileSize, blockSize, blockCount, genStamp, location...) .
         * ? ? ??  DFS Client   ? ?.
         *  DFS Pool? startOffset  Pool? ? ??? DFS ? ? startOffset ? ? ?  ? ?  ?.
         */
        LocatedBlocks locatedBlocks = dfsClient.getNamenode().getBlockLocations(filePath, startOffset,
                viewSize);
        int nextContentsBlockSize = locatedBlocks.locatedBlockCount();

        // DFS Block Size ? chunkSizeToView ?? ?    ? 
        long dfsBlockViewCount = dfsBlockSize / chunkSizeToView;
        long dfsBlockViewResidueSize = dfsBlockSize % chunkSizeToView;
        if (dfsBlockViewResidueSize > 0) {
            dfsBlockViewCount++;
        }

        List<Long> startOffsetPerDfsBlocks = new ArrayList<>();
        List<Long> accumulatedStartOffsetPerDfsBlocks = new ArrayList<>();
        List<Long> lastStartOffsetPerDfsBlocks = new ArrayList<>();
        List<Long> lastChunkSizePerDfsBlocks = new ArrayList<>();
        List<Long> pageCheckPoints = new ArrayList<>();

        /**
         * ? ? DFS Block Size  ? 
         * ? ? ?? ?  ? ?? Block ID ?.
         *  ID ? startOffset ? locatedBlockList ? ? Block ID  ?.
         * ?  LocatedBlockSize ? 2.
         * ? ?(ChunkSizeToView)? ?  ?? ? DFS Block?  ??
         *  ?  ? ? ?(currentBlockChunkSizeToView)
         * ? ? ?  ? ?(nextBlockChunkSizeToView)?    .
         * ? Block ID ? ?? ? Block ID? ? ?  
         * ? ? startOffset  ? ?   Merge .
         *  DFS Block Pool? ??  ?? ?    startOffset ? DFS Block?    startOffset .
         *
         * DFS Block Size = 128 MB (134,217,728 B), StartOffset Range Per DFS Block = 0 ~ 134217727, ChunkSizeToView : 10000
         * ex. moveToPage == 13421, locatedBlocks size == 2
         * First DFS Block's Last StartOffset           : 134210000
         * Second DFS Block's First(Accumulated) Offset : 0 ~ 2271
         * Second DFS Block's Second StartOffset        : 2272
         * Second DFS Block's Last StartOffset          : 134212272
         * Third DFS Block's First(Accumulated) Offset  : 0 ~ 4543
         * Third DFS Block's Second StartOffset         : 4544
         */
        if (fileSize > dfsBlockSize) {
            long accumulatedStartOffset;
            long startOffsetForDfsBlock;
            long startOffsetForSecondDfsBlock = chunkSizeToView - dfsBlockViewResidueSize;
            long dfsBlockLastChunkSize = chunkSizeToView;
            for (int i = 0; i < dfsBlockCount; i++) {
                accumulatedStartOffset = startOffsetForSecondDfsBlock * i;
                accumulatedStartOffsetPerDfsBlocks.add(i, accumulatedStartOffset);

                if (dfsBlockLastChunkSize < startOffsetForSecondDfsBlock) {
                    dfsBlockLastChunkSize += chunkSizeToView;
                }

                //  ? ?  ?    ?  .
                long lastDfsBlockLastStartOffset = 0;
                if (i == dfsBlockCount - 1) {
                    long lastDfsBlockViewCount = lastDfsBlockSize / chunkSizeToView;
                    long lastDfsBlockResidue = lastDfsBlockSize % chunkSizeToView;

                    if (lastDfsBlockResidue < dfsBlockLastChunkSize) {
                        lastDfsBlockViewCount--;
                    }

                    lastDfsBlockLastStartOffset = (lastDfsBlockViewCount * chunkSizeToView)
                            + (chunkSizeToView - dfsBlockLastChunkSize); //47841808
                    dfsBlockLastChunkSize = lastDfsBlockSize - lastDfsBlockLastStartOffset;
                } else {
                    dfsBlockLastChunkSize -= startOffsetForSecondDfsBlock;
                }
                lastChunkSizePerDfsBlocks.add(i, dfsBlockLastChunkSize);

                long dfsBlockLastStartOffset;
                if (i == dfsBlockCount - 1) {
                    dfsBlockLastStartOffset = lastDfsBlockLastStartOffset;
                } else {
                    dfsBlockLastStartOffset = dfsBlockSize - dfsBlockLastChunkSize;
                }
                lastStartOffsetPerDfsBlocks.add(i, dfsBlockLastStartOffset);

                startOffsetForDfsBlock = dfsBlockLastStartOffset % chunkSizeToView;
                startOffsetPerDfsBlocks.add(i, startOffsetForDfsBlock);
            }

            // ? DFS Block?  ?   
            contentsMap.put("accumulatedStartOffsetPerDfsBlocks", accumulatedStartOffsetPerDfsBlocks);
            contentsMap.put("lastStartOffsetPerDfsBlocks", lastStartOffsetPerDfsBlocks);
            contentsMap.put("lastChunkSizePerDfsBlocks", lastChunkSizePerDfsBlocks);
            contentsMap.put("startOffsetPerDfsBlocks", startOffsetPerDfsBlocks);

            long firstPageCheckPoint = dfsBlockSize / chunkSizeToView;
            long pageCheckPoint = 0;
            long pageCheckChunkSizeToView = chunkSizeToView;
            for (int i = 0; i < 15; i++) {
                pageCheckPoint += firstPageCheckPoint;
                int j = i;
                j++;
                if (j < accumulatedStartOffsetPerDfsBlocks.size()) {
                    if (accumulatedStartOffsetPerDfsBlocks.get(j) > pageCheckChunkSizeToView) {
                        pageCheckChunkSizeToView += chunkSizeToView;
                        pageCheckPoint -= 1;
                    }
                    pageCheckPoints.add(i, pageCheckPoint);
                    pageCheckPoint++;
                }
            }

            // CustomPage   ? DFS Block Size ? ?  .
            contentsMap.put("pageCheckPoints", pageCheckPoints);
        }

        /**
         * locatedBlocks ? ?   : moveToPage >= dfsBlockViewCount - 1
         *
         * ex.
         * offsetRange 0    >> moveToPage < dfsBlockViewCount - 1 : 13420 - (13422-1)
         * offsetRange 1    >> moveToPage == dfsBlockViewCount - 1 : 13421 - (13422-1)
         * offsetRange 2    >> moveToPage > dfsBlockViewCount - 1 : 13422 - (13422-1)
         */
        int offsetRange = (int) (moveToPage / (dfsBlockViewCount - 1));

        LocatedBlock locatedBlock;
        LocatedBlock nextLocatedBlock = null;
        long currentBlockLastStartOffset = 0;
        long currentBlockLastChunkSizeToView = 0;
        long nextBlockFirstStartOffset = 0;
        long nextBlockFirstChunkSizeToView = 0;
        boolean splitViewFlag = false;

        /**
         * ?? ? ? ? ? DFS  ? 
         * Criteria : DFS Block Size(128MB) and ChunkSizeToView(10000B)
         *
         *  ?  StartOffset  ? ?  StartOffset(0)? ?? ChunkSizeToView  .
         * currentBlockLastStartOffset ~ nextBlockAccumulatedStartOffset
         * ex. 134210000 ~ 2272             */
        if (nextContentsBlockSize > 1) {
            splitViewFlag = true;
            locatedBlock = locatedBlocks.get(0);
            nextLocatedBlock = locatedBlocks.get(1);

            dfsBlockStartOffset = startOffsetPerDfsBlocks.get(offsetRange);
            contentsMap.put("dfsBlockStartOffset", dfsBlockStartOffset); // ? ? startOffset    

            currentBlockLastStartOffset = lastStartOffsetPerDfsBlocks.get(offsetRange - 1);
            currentBlockLastChunkSizeToView = lastChunkSizePerDfsBlocks.get(offsetRange - 1);
            nextBlockFirstStartOffset = 0;
            nextBlockFirstChunkSizeToView = chunkSizeToView - currentBlockLastChunkSizeToView;
        } else {
            locatedBlock = locatedBlocks.get(0);
        }

        //  DFS Block?  ?  ?    ? ?    .
        if (offsetRange < pageCheckPoints.size()) {
            contentsMap.put("dfsBlockSize", dfsBlockSize);
        }

        //  ? ? ?  ? 
        boolean currentPageSplitViewFlag = false;
        if (currentContentsBlockSize > 1) {
            currentPageSplitViewFlag = true;
        }

        /**
         * DFS1 -> DFS0  ?? 
         * currentPageSplitViewFlag true ?  dfsBlockStartOffset  
         * ex. 13421 -> 13420
         */
        if (moveToPage < (dfsBlockViewCount - 1) && (moveToPage + 1) == (dfsBlockViewCount - 1)) {
            dfsBlockStartOffset = startOffset;
        }

        //  DFS Block Size  ?   ? DFS Block ?? ?? StartOffset ?
        boolean dfsBlockStartOffsetRangeFlag = false;
        if (fileSize > dfsBlockSize && moveToPage >= dfsBlockViewCount && !splitViewFlag) {
            dfsBlockStartOffsetRangeFlag = true;
        }

        if (dfsBlockStartOffsetRangeFlag) {
            if (buttonType.equalsIgnoreCase("nextButton")) {
                if (moveToPage == totalPage) {
                    dfsBlockStartOffset = lastStartOffsetPerDfsBlocks.get(offsetRange);
                    chunkSizeToView = lastChunkSizePerDfsBlocks.get(offsetRange);
                } else {
                    /**
                     * ?  DFS Block  startOffset ? ?? ?
                     * ex) DFS Block Size : 128 MB
                     * Second DFS Block StartOffset : 2272
                     *
                     * ?? ? ?  DFS Block? ?   startOffset ?    .
                     * moveToPage range per DFS block
                     *     0 ~ 13421 : First DFS Block
                     * 13422 ~ 26843
                     * 26844 ~ 53687
                     */
                    if (currentContentsBlockSize < 2) {
                        dfsBlockStartOffset += chunkSizeToView;
                    }
                }
            } else if (buttonType.equalsIgnoreCase("prevButton")) {
                //  ?? ? ? ? DFS Block  ? ?? ? ? ?? 
                if (currentPageSplitViewFlag) {
                    dfsBlockStartOffset = lastStartOffsetPerDfsBlocks.get(offsetRange - 1);
                    dfsBlockStartOffset -= chunkSizeToView;
                } else {
                    dfsBlockStartOffset -= chunkSizeToView;
                }
            } else if (buttonType.equalsIgnoreCase("customPage")) { // DFS Block Size ? ? splitView   .
                if (moveToPage == totalPage) {
                    dfsBlockStartOffset = lastStartOffsetPerDfsBlocks.get(offsetRange);
                    chunkSizeToView = lastChunkSizePerDfsBlocks.get(offsetRange);
                } else {
                    long dfsBlockAccumulatedStartOffset = startOffsetPerDfsBlocks.get(offsetRange);
                    long pageCheckPoint = pageCheckPoints.get(offsetRange - 1);
                    long currentPageCount = moveToPage - pageCheckPoint;// 50000-40265=9735

                    // ?? ? DFS Block ? ?   ? ?? 
                    if (currentPageCount == 1) {
                        dfsBlockStartOffset = dfsBlockAccumulatedStartOffset;
                    } else {
                        long pageRange = chunkSizeToView;
                        currentPageCount--;
                        if (currentPageCount > 0) {
                            pageRange *= currentPageCount; //97340000, 134210000
                        }
                        dfsBlockStartOffset = pageRange + dfsBlockAccumulatedStartOffset; // 97346816
                    }
                }
            } else if (buttonType.equalsIgnoreCase("lastButton")) {
                dfsBlockStartOffset = lastStartOffsetPerDfsBlocks.get(offsetRange);
                chunkSizeToView = lastChunkSizePerDfsBlocks.get(offsetRange);
            }
            contentsMap.put("dfsBlockStartOffset", dfsBlockStartOffset);
        }

        contentsMap.put("currentContentsBlockSize", nextContentsBlockSize);
        contentsMap.put("offsetRange", offsetRange);

        if (fileSize < dfsBlockSize) {
            if (moveToPage == totalPage) {
                chunkSizeToView = lastPageChunkSizeToView;
            }
        }

        /**
         * Case 1. BestNode  , Block ID ?   URL  .
         * Case 2. DataNode? BestNode ?.
         */
        InetSocketAddress address;
        InetSocketAddress nextAddress = null;
        DatanodeInfo chosenNode;
        DatanodeInfo nextChosenNode;

        if (contentsMap.containsKey("bestNode") && !splitViewFlag && !currentPageSplitViewFlag
                && !dfsBlockStartOffsetRangeFlag && !buttonType.equalsIgnoreCase("customPage")) {
            String bestNode = (String) contentsMap.get("bestNode");
            address = NetUtils.createSocketAddr(bestNode);
            contentsMap.put("bestNode", bestNode);
        } else {
            chosenNode = bestNode(locatedBlock);
            address = NetUtils.createSocketAddr(chosenNode.getName());
            contentsMap.put("bestNode", chosenNode.getName());
            if (splitViewFlag) {
                nextChosenNode = bestNode(nextLocatedBlock);
                nextAddress = NetUtils.createSocketAddr(nextChosenNode.getName());
                contentsMap.put("bestNode", nextChosenNode.getName());
            }
        }

        /**
         * DFS File Block Size in HDFS
         *
         *  ??  DFS  ? ?? HDFS?    ?
         * ??  ? ? ? .
         * ? ? ?  locatedBlockCount ? 1 ?.
         *
         *  ?  DFS Block Size 
         * 64 (MB) >> 67,108,864 (B)
         * 128 (MB) >> 134,217,728 (B)
         */

        String poolId = locatedBlock.getBlock().getBlockPoolId();
        long blockId = locatedBlock.getBlock().getBlockId();
        long genStamp = locatedBlock.getBlock().getGenerationStamp();

        Token<BlockTokenIdentifier> blockToken = locatedBlock.getBlockToken();
        DatanodeID datanodeID = new DatanodeID(address.getAddress().getHostAddress(), address.getHostName(),
                poolId, address.getPort(), 0, 0, 0);
        Peer peer = dfsClient.newConnectedPeer(address, blockToken, datanodeID);
        CachingStrategy cachingStrategy = dfsClient.getDefaultReadCachingStrategy();
        ExtendedBlock extendedBlock = new ExtendedBlock(poolId, blockId, fileSize, genStamp);

        String contents;

        if (splitViewFlag) {
            String currentBlockContents = streamBlockInAscii(address, blockToken, fileSize,
                    currentBlockLastStartOffset, currentBlockLastChunkSizeToView, fs.getConf(), filePath,
                    dfsClient.getClientName(), extendedBlock, false, peer, datanodeID, cachingStrategy);

            long nextBlockId = nextLocatedBlock.getBlock().getBlockId();
            long nextGenStamp = nextLocatedBlock.getBlock().getGenerationStamp();

            Token<BlockTokenIdentifier> nextBlockToken = nextLocatedBlock.getBlockToken();
            DatanodeID nextDatanodeID = new DatanodeID(nextAddress.getAddress().getHostAddress(),
                    nextAddress.getHostName(), poolId, nextAddress.getPort(), 0, 0, 0);
            Peer nextPeer = dfsClient.newConnectedPeer(nextAddress, nextBlockToken, nextDatanodeID);
            CachingStrategy nextCachingStrategy = dfsClient.getDefaultReadCachingStrategy();
            ExtendedBlock nextExtendedBlock = new ExtendedBlock(poolId, nextBlockId, fileSize, nextGenStamp);

            String nextBlockContents = streamBlockInAscii(nextAddress, nextBlockToken, fileSize,
                    nextBlockFirstStartOffset, nextBlockFirstChunkSizeToView, fs.getConf(), filePath,
                    dfsClient.getClientName(), nextExtendedBlock, false, nextPeer, nextDatanodeID,
                    nextCachingStrategy);

            // Merge two block's contents
            contents = currentBlockContents + nextBlockContents;

            contentsMap.put("startOffset", startOffset);
        } else {
            startOffset = dfsBlockStartOffsetRangeFlag || currentPageSplitViewFlag ? dfsBlockStartOffset
                    : startOffset;

            contents = streamBlockInAscii(address, blockToken, fileSize, startOffset, chunkSizeToView,
                    fs.getConf(), filePath, dfsClient.getClientName(), extendedBlock, false, peer, datanodeID,
                    cachingStrategy);
        }

        contentsMap.put("chunkSizeToView", chunkSizeToView);
        contentsMap.put("lastPageChunkSizeToView", lastPageChunkSizeToView);
        contentsMap.put("contents", contents);
    } catch (IOException e) {
        e.printStackTrace();
    }
    return contentsMap;
}

From source file:org.apache.hadoop.mapred.TaskTracker.java

/**
 * Start with the local machine name, and the default JobTracker
 *///  w ww  .  ja  va 2s.  c o  m
public TaskTracker(JobConf conf) throws IOException, InterruptedException {
    originalConf = conf;
    FILE_CACHE_SIZE = conf.getInt("mapred.tasktracker.file.cache.size", 2000);
    maxMapSlots = conf.getInt("mapred.tasktracker.map.tasks.maximum", 2);
    maxReduceSlots = conf.getInt("mapred.tasktracker.reduce.tasks.maximum", 2);
    diskHealthCheckInterval = conf.getLong(DISK_HEALTH_CHECK_INTERVAL_PROPERTY,
            DEFAULT_DISK_HEALTH_CHECK_INTERVAL);
    UserGroupInformation.setConfiguration(originalConf);
    aclsManager = new ACLsManager(conf, new JobACLsManager(conf), null);
    this.jobTrackAddr = JobTracker.getAddress(conf);
    String infoAddr = NetUtils.getServerAddress(conf, "tasktracker.http.bindAddress", "tasktracker.http.port",
            "mapred.task.tracker.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    String httpBindAddress = infoSocAddr.getHostName();
    int httpPort = infoSocAddr.getPort();
    this.server = new HttpServer("task", httpBindAddress, httpPort, httpPort == 0, conf,
            aclsManager.getAdminsAcl());
    workerThreads = conf.getInt("tasktracker.http.threads", 40);
    server.setThreads(1, workerThreads);
    // let the jsp pages get to the task tracker, config, and other relevant
    // objects
    FileSystem local = FileSystem.getLocal(conf);
    this.localDirAllocator = new LocalDirAllocator("mapred.local.dir");
    Class<? extends TaskController> taskControllerClass = conf.getClass("mapred.task.tracker.task-controller",
            DefaultTaskController.class, TaskController.class);

    fConf = new JobConf(conf);
    localStorage = new LocalStorage(fConf.getLocalDirs());
    localStorage.checkDirs();
    taskController = (TaskController) ReflectionUtils.newInstance(taskControllerClass, fConf);
    taskController.setup(localDirAllocator, localStorage);
    lastNumFailures = localStorage.numFailures();

    // create user log manager
    setUserLogManager(new UserLogManager(conf, taskController));
    SecurityUtil.login(originalConf, TT_KEYTAB_FILE, TT_USER_NAME);

    initialize();
    this.shuffleServerMetrics = ShuffleServerInstrumentation.create(this);
    server.setAttribute("task.tracker", this);
    server.setAttribute("local.file.system", local);

    server.setAttribute("log", LOG);
    server.setAttribute("localDirAllocator", localDirAllocator);
    server.setAttribute("shuffleServerMetrics", shuffleServerMetrics);

    String exceptionStackRegex = conf.get("mapreduce.reduce.shuffle.catch.exception.stack.regex");
    String exceptionMsgRegex = conf.get("mapreduce.reduce.shuffle.catch.exception.message.regex");
    // Percent of shuffle exceptions (out of sample size) seen before it's
    // fatal - acceptable values are from 0 to 1.0, 0 disables the check.
    // ie. 0.3 = 30% of the last X number of requests matched the exception,
    // so abort.
    float shuffleExceptionLimit = conf.getFloat("mapreduce.reduce.shuffle.catch.exception.percent.limit.fatal",
            0);
    if ((shuffleExceptionLimit > 1) || (shuffleExceptionLimit < 0)) {
        throw new IllegalArgumentException(
                "mapreduce.reduce.shuffle.catch.exception.percent.limit.fatal " + " must be between 0 and 1.0");
    }

    // The number of trailing requests we track, used for the fatal
    // limit calculation
    int shuffleExceptionSampleSize = conf.getInt("mapreduce.reduce.shuffle.catch.exception.sample.size", 1000);
    if (shuffleExceptionSampleSize <= 0) {
        throw new IllegalArgumentException(
                "mapreduce.reduce.shuffle.catch.exception.sample.size " + " must be greater than 0");
    }
    shuffleExceptionTracking = new ShuffleExceptionTracker(shuffleExceptionSampleSize, exceptionStackRegex,
            exceptionMsgRegex, shuffleExceptionLimit);

    server.setAttribute("shuffleExceptionTracking", shuffleExceptionTracking);

    server.addInternalServlet("mapOutput", "/mapOutput", MapOutputServlet.class);
    server.addServlet("taskLog", "/tasklog", TaskLogServlet.class);
    server.start();
    this.httpPort = server.getPort();
    checkJettyPort(httpPort);
    LOG.info("FILE_CACHE_SIZE for mapOutputServlet set to : " + FILE_CACHE_SIZE);
    mapRetainSize = conf.getLong(TaskLogsTruncater.MAP_USERLOG_RETAIN_SIZE,
            TaskLogsTruncater.DEFAULT_RETAIN_SIZE);
    reduceRetainSize = conf.getLong(TaskLogsTruncater.REDUCE_USERLOG_RETAIN_SIZE,
            TaskLogsTruncater.DEFAULT_RETAIN_SIZE);
}

From source file:org.apache.hadoop.mapred.TaskTracker.java

/**
 * Do the real constructor work here.  It's in a separate method
 * so we can call it again and "recycle" the object after calling
 * close()./*from   ww w. j av  a2 s. c o  m*/
 */
synchronized void initialize() throws IOException, InterruptedException {
    this.fConf = new JobConf(originalConf);

    LOG.info("Starting tasktracker with owner as " + getMROwner().getShortUserName());

    localFs = FileSystem.getLocal(fConf);
    if (fConf.get("slave.host.name") != null) {
        this.localHostname = fConf.get("slave.host.name");
    }
    if (localHostname == null) {
        this.localHostname = DNS.getDefaultHost(fConf.get("mapred.tasktracker.dns.interface", "default"),
                fConf.get("mapred.tasktracker.dns.nameserver", "default"));
    }

    final String dirs = localStorage.getDirsString();
    fConf.setStrings(JobConf.MAPRED_LOCAL_DIR_PROPERTY, dirs);
    LOG.info("Good mapred local directories are: " + dirs);
    taskController.setConf(fConf);
    // Setup task controller so that deletion of user dirs happens properly
    taskController.setup(localDirAllocator, localStorage);
    server.setAttribute("conf", fConf);

    deleteUserDirectories(fConf);

    // NB: deleteLocalFiles uses the configured local dirs, but does not 
    // fail if a local directory has failed. 
    fConf.deleteLocalFiles(SUBDIR);
    final FsPermission ttdir = FsPermission.createImmutable((short) 0755);
    for (String s : localStorage.getDirs()) {
        localFs.mkdirs(new Path(s, SUBDIR), ttdir);
    }
    fConf.deleteLocalFiles(TT_PRIVATE_DIR);
    final FsPermission priv = FsPermission.createImmutable((short) 0700);
    for (String s : localStorage.getDirs()) {
        localFs.mkdirs(new Path(s, TT_PRIVATE_DIR), priv);
    }
    fConf.deleteLocalFiles(TT_LOG_TMP_DIR);
    final FsPermission pub = FsPermission.createImmutable((short) 0755);
    for (String s : localStorage.getDirs()) {
        localFs.mkdirs(new Path(s, TT_LOG_TMP_DIR), pub);
    }
    // Create userlogs directory under all good mapred-local-dirs
    for (String s : localStorage.getDirs()) {
        Path userLogsDir = new Path(s, TaskLog.USERLOGS_DIR_NAME);
        if (!localFs.exists(userLogsDir)) {
            localFs.mkdirs(userLogsDir, pub);
        }
    }
    // Clear out state tables
    this.tasks.clear();
    this.runningTasks = new LinkedHashMap<TaskAttemptID, TaskInProgress>();
    this.runningJobs = new TreeMap<JobID, RunningJob>();
    this.mapTotal = 0;
    this.reduceTotal = 0;
    this.acceptNewTasks = true;
    this.status = null;

    this.minSpaceStart = this.fConf.getLong("mapred.local.dir.minspacestart", 0L);
    this.minSpaceKill = this.fConf.getLong("mapred.local.dir.minspacekill", 0L);
    //tweak the probe sample size (make it a function of numCopiers)
    probe_sample_size = this.fConf.getInt("mapred.tasktracker.events.batchsize", 500);

    createInstrumentation();

    // bind address
    String address = NetUtils.getServerAddress(fConf, "mapred.task.tracker.report.bindAddress",
            "mapred.task.tracker.report.port", "mapred.task.tracker.report.address");
    InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
    String bindAddress = socAddr.getHostName();
    int tmpPort = socAddr.getPort();

    this.jvmManager = new JvmManager(this);

    // Set service-level authorization security policy
    if (this.fConf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        PolicyProvider policyProvider = (PolicyProvider) (ReflectionUtils
                .newInstance(this.fConf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
                        MapReducePolicyProvider.class, PolicyProvider.class), this.fConf));
        ServiceAuthorizationManager.refresh(fConf, policyProvider);
    }

    // RPC initialization
    int max = maxMapSlots > maxReduceSlots ? maxMapSlots : maxReduceSlots;
    //set the num handlers to max*2 since canCommit may wait for the duration
    //of a heartbeat RPC
    this.taskReportServer = RPC.getServer(this, bindAddress, tmpPort, 2 * max, false, this.fConf,
            this.jobTokenSecretManager);
    this.taskReportServer.start();

    // get the assigned address
    this.taskReportAddress = taskReportServer.getListenerAddress();
    this.fConf.set("mapred.task.tracker.report.address",
            taskReportAddress.getHostName() + ":" + taskReportAddress.getPort());
    LOG.info("TaskTracker up at: " + this.taskReportAddress);

    this.taskTrackerName = "tracker_" + localHostname + ":" + taskReportAddress;
    LOG.info("Starting tracker " + taskTrackerName);

    // Initialize DistributedCache
    this.distributedCacheManager = new TrackerDistributedCacheManager(this.fConf, taskController);
    this.distributedCacheManager.startCleanupThread();

    this.jobClient = (InterTrackerProtocol) UserGroupInformation.getLoginUser()
            .doAs(new PrivilegedExceptionAction<Object>() {
                public Object run() throws IOException {
                    return RPC.waitForProxy(InterTrackerProtocol.class, InterTrackerProtocol.versionID,
                            jobTrackAddr, fConf);
                }
            });
    this.justInited = true;
    this.running = true;
    // start the thread that will fetch map task completion events
    this.mapEventsFetcher = new MapEventsFetcherThread();
    mapEventsFetcher.setDaemon(true);
    mapEventsFetcher.setName("Map-events fetcher for all reduce tasks " + "on " + taskTrackerName);
    mapEventsFetcher.start();

    Class<? extends ResourceCalculatorPlugin> clazz = fConf.getClass(TT_RESOURCE_CALCULATOR_PLUGIN, null,
            ResourceCalculatorPlugin.class);
    resourceCalculatorPlugin = ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, fConf);
    LOG.info(" Using ResourceCalculatorPlugin : " + resourceCalculatorPlugin);
    initializeMemoryManagement();

    getUserLogManager().clearOldUserLogs(fConf);

    setIndexCache(new IndexCache(this.fConf));

    mapLauncher = new TaskLauncher(TaskType.MAP, maxMapSlots);
    reduceLauncher = new TaskLauncher(TaskType.REDUCE, maxReduceSlots);
    mapLauncher.start();
    reduceLauncher.start();

    // create a localizer instance
    setLocalizer(new Localizer(localFs, localStorage.getDirs()));

    //Start up node health checker service.
    if (shouldStartHealthMonitor(this.fConf)) {
        startHealthMonitor(this.fConf);
    }

    // Start thread to monitor jetty bugs
    startJettyBugMonitor();

    oobHeartbeatOnTaskCompletion = fConf.getBoolean(TT_OUTOFBAND_HEARBEAT, false);
    oobHeartbeatDamper = fConf.getInt(TT_OUTOFBAND_HEARTBEAT_DAMPER, DEFAULT_OOB_HEARTBEAT_DAMPER);
}

From source file:org.apache.hadoop.dfs.FSNamesystem.java

/**
 * Initialize FSNamesystem./*from   w w  w .ja  v  a  2 s . com*/
 */
private void initialize(NameNode nn, Configuration conf) throws IOException {
    this.systemStart = now();
    this.startTime = new Date(systemStart);
    setConfigurationParameters(conf);

    this.localMachine = nn.getNameNodeAddress().getHostName();
    this.port = nn.getNameNodeAddress().getPort();
    this.registerMBean(conf); // register the MBean for the FSNamesystemStutus
    this.dir = new FSDirectory(this, conf);
    StartupOption startOpt = NameNode.getStartupOption(conf);
    this.dir.loadFSImage(getNamespaceDirs(conf), startOpt);
    long timeTakenToLoadFSImage = now() - systemStart;
    LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
    NameNode.getNameNodeMetrics().fsImageLoadTime.set((int) timeTakenToLoadFSImage);
    this.safeMode = new SafeModeInfo(conf);
    setBlockTotal();
    pendingReplications = new PendingReplicationBlocks(
            conf.getInt("dfs.replication.pending.timeout.sec", -1) * 1000L);
    this.hbthread = new Daemon(new HeartbeatMonitor());
    this.lmthread = new Daemon(leaseManager.new Monitor());
    this.replthread = new Daemon(new ReplicationMonitor());
    this.resthread = new Daemon(new ResolutionMonitor());
    hbthread.start();
    lmthread.start();
    replthread.start();
    resthread.start();

    this.hostsReader = new HostsFileReader(conf.get("dfs.hosts", ""), conf.get("dfs.hosts.exclude", ""));
    this.dnthread = new Daemon(
            new DecommissionManager(this).new Monitor(conf.getInt("dfs.namenode.decommission.interval", 30),
                    conf.getInt("dfs.namenode.decommission.nodes.per.interval", 5)));
    dnthread.start();

    this.dnsToSwitchMapping = (DNSToSwitchMapping) ReflectionUtils.newInstance(conf.getClass(
            "topology.node.switch.mapping.impl", ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);

    String infoAddr = NetUtils.getServerAddress(conf, "dfs.info.bindAddress", "dfs.info.port",
            "dfs.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    String infoHost = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = new StatusHttpServer("dfs", infoHost, tmpInfoPort, tmpInfoPort == 0);
    InetSocketAddress secInfoSocAddr = NetUtils
            .createSocketAddr(conf.get("dfs.https.address", infoHost + ":" + 0));
    Configuration sslConf = new Configuration(conf);
    sslConf.addResource(conf.get("https.keystore.info.rsrc", "sslinfo.xml"));
    String keyloc = sslConf.get("https.keystore.location");
    if (null != keyloc) {
        this.infoServer.addSslListener(secInfoSocAddr, keyloc, sslConf.get("https.keystore.password", ""),
                sslConf.get("https.keystore.keypassword", ""));
    }
    // assume same ssl port for all datanodes
    InetSocketAddress datanodeSslPort = NetUtils
            .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 50475));
    this.infoServer.setAttribute("datanode.https.port", datanodeSslPort.getPort());
    this.infoServer.setAttribute("name.node", nn);
    this.infoServer.setAttribute("name.system.image", getFSImage());
    this.infoServer.setAttribute("name.conf", conf);
    this.infoServer.addServlet("fsck", "/fsck", FsckServlet.class);
    this.infoServer.addServlet("getimage", "/getimage", GetImageServlet.class);
    this.infoServer.addServlet("listPaths", "/listPaths/*", ListPathsServlet.class);
    this.infoServer.addServlet("data", "/data/*", FileDataServlet.class);
    this.infoServer.start();

    // The web-server port can be ephemeral... ensure we have the correct info
    this.infoPort = this.infoServer.getPort();
    conf.set("dfs.http.address", infoHost + ":" + infoPort);
    LOG.info("Web-server up at: " + infoHost + ":" + infoPort);
}

From source file:org.apache.hadoop.mapred.JobTracker.java

JobTracker(final JobConf conf, String identifier, Clock clock, QueueManager qm)
        throws IOException, InterruptedException {
    this.queueManager = qm;
    this.clock = clock;
    // Set ports, start RPC servers, setup security policy etc.
    InetSocketAddress addr = getAddress(conf);
    this.localMachine = addr.getHostName();
    this.port = addr.getPort();
    // find the owner of the process
    // get the desired principal to load
    UserGroupInformation.setConfiguration(conf);
    SecurityUtil.login(conf, JT_KEYTAB_FILE, JT_USER_NAME, localMachine);

    long secretKeyInterval = conf.getLong(DELEGATION_KEY_UPDATE_INTERVAL_KEY,
            DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
    long tokenMaxLifetime = conf.getLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY,
            DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
    long tokenRenewInterval = conf.getLong(DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
            DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
    secretManager = new DelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
            DELEGATION_TOKEN_GC_INTERVAL);
    secretManager.startThreads();//from   w w  w .j a  va 2 s . com

    MAX_JOBCONF_SIZE = conf.getLong(MAX_USER_JOBCONF_SIZE_KEY, MAX_JOBCONF_SIZE);
    //
    // Grab some static constants
    //
    TASKTRACKER_EXPIRY_INTERVAL = conf.getLong("mapred.tasktracker.expiry.interval", 10 * 60 * 1000);
    RETIRE_JOB_INTERVAL = conf.getLong("mapred.jobtracker.retirejob.interval", 24 * 60 * 60 * 1000);
    RETIRE_JOB_CHECK_INTERVAL = conf.getLong("mapred.jobtracker.retirejob.check", 60 * 1000);
    retiredJobsCacheSize = conf.getInt("mapred.job.tracker.retiredjobs.cache.size", 1000);
    MAX_COMPLETE_USER_JOBS_IN_MEMORY = conf.getInt("mapred.jobtracker.completeuserjobs.maximum", 100);

    // values related to heuristic graylisting (a "fault" is a per-job
    // blacklisting; too many faults => node is graylisted across all jobs):
    TRACKER_FAULT_TIMEOUT_WINDOW = // 3 hours
            conf.getInt("mapred.jobtracker.blacklist.fault-timeout-window", 3 * 60);
    TRACKER_FAULT_BUCKET_WIDTH = // 15 minutes
            conf.getInt("mapred.jobtracker.blacklist.fault-bucket-width", 15);
    TRACKER_FAULT_THRESHOLD = conf.getInt("mapred.max.tracker.blacklists", 4);
    // future:  rename to "mapred.jobtracker.blacklist.fault-threshold" for
    // namespace consistency

    if (TRACKER_FAULT_BUCKET_WIDTH > TRACKER_FAULT_TIMEOUT_WINDOW) {
        TRACKER_FAULT_BUCKET_WIDTH = TRACKER_FAULT_TIMEOUT_WINDOW;
    }
    TRACKER_FAULT_BUCKET_WIDTH_MSECS = (long) TRACKER_FAULT_BUCKET_WIDTH * 60 * 1000;

    // ideally, TRACKER_FAULT_TIMEOUT_WINDOW should be an integral multiple of
    // TRACKER_FAULT_BUCKET_WIDTH, but round up just in case:
    NUM_FAULT_BUCKETS = (TRACKER_FAULT_TIMEOUT_WINDOW + TRACKER_FAULT_BUCKET_WIDTH - 1)
            / TRACKER_FAULT_BUCKET_WIDTH;

    NUM_HEARTBEATS_IN_SECOND = conf.getInt(JT_HEARTBEATS_IN_SECOND, DEFAULT_NUM_HEARTBEATS_IN_SECOND);
    if (NUM_HEARTBEATS_IN_SECOND < MIN_NUM_HEARTBEATS_IN_SECOND) {
        NUM_HEARTBEATS_IN_SECOND = DEFAULT_NUM_HEARTBEATS_IN_SECOND;
    }

    HEARTBEATS_SCALING_FACTOR = conf.getFloat(JT_HEARTBEATS_SCALING_FACTOR, DEFAULT_HEARTBEATS_SCALING_FACTOR);
    if (HEARTBEATS_SCALING_FACTOR < MIN_HEARTBEATS_SCALING_FACTOR) {
        HEARTBEATS_SCALING_FACTOR = DEFAULT_HEARTBEATS_SCALING_FACTOR;
    }

    // This configuration is there solely for tuning purposes and
    // once this feature has been tested in real clusters and an appropriate
    // value for the threshold has been found, this config might be taken out.
    AVERAGE_BLACKLIST_THRESHOLD = conf.getFloat("mapred.cluster.average.blacklist.threshold", 0.5f);

    // This is a directory of temporary submission files.  We delete it
    // on startup, and can delete any files that we're done with
    this.conf = conf;
    JobConf jobConf = new JobConf(conf);

    initializeTaskMemoryRelatedConfig();

    // Read the hosts/exclude files to restrict access to the jobtracker.
    this.hostsReader = new HostsFileReader(conf.get("mapred.hosts", ""), conf.get("mapred.hosts.exclude", ""));
    aclsManager = new ACLsManager(conf, new JobACLsManager(conf), queueManager);

    LOG.info("Starting jobtracker with owner as " + getMROwner().getShortUserName());

    // Create the scheduler
    Class<? extends TaskScheduler> schedulerClass = conf.getClass("mapred.jobtracker.taskScheduler",
            JobQueueTaskScheduler.class, TaskScheduler.class);
    taskScheduler = (TaskScheduler) ReflectionUtils.newInstance(schedulerClass, conf);

    // Set service-level authorization security policy
    if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        ServiceAuthorizationManager.refresh(conf, new MapReducePolicyProvider());
    }

    int handlerCount = conf.getInt("mapred.job.tracker.handler.count", 10);
    this.interTrackerServer = RPC.getServer(this, addr.getHostName(), addr.getPort(), handlerCount, false, conf,
            secretManager);
    if (LOG.isDebugEnabled()) {
        Properties p = System.getProperties();
        for (Iterator it = p.keySet().iterator(); it.hasNext();) {
            String key = (String) it.next();
            String val = p.getProperty(key);
            LOG.debug("Property '" + key + "' is " + val);
        }
    }

    String infoAddr = NetUtils.getServerAddress(conf, "mapred.job.tracker.info.bindAddress",
            "mapred.job.tracker.info.port", "mapred.job.tracker.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    String infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.startTime = clock.getTime();
    infoServer = new HttpServer("job", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf,
            aclsManager.getAdminsAcl());
    infoServer.setAttribute("job.tracker", this);
    // initialize history parameters.
    final JobTracker jtFinal = this;
    getMROwner().doAs(new PrivilegedExceptionAction<Boolean>() {
        @Override
        public Boolean run() throws Exception {
            JobHistory.init(jtFinal, conf, jtFinal.localMachine, jtFinal.startTime);
            return true;
        }
    });

    infoServer.addServlet("reducegraph", "/taskgraph", TaskGraphServlet.class);
    infoServer.start();

    this.trackerIdentifier = identifier;

    createInstrumentation();

    // The rpc/web-server ports can be ephemeral ports... 
    // ... ensure we have the correct info
    this.port = interTrackerServer.getListenerAddress().getPort();
    this.conf.set("mapred.job.tracker", (this.localMachine + ":" + this.port));
    this.localFs = FileSystem.getLocal(conf);
    LOG.info("JobTracker up at: " + this.port);
    this.infoPort = this.infoServer.getPort();
    this.conf.set("mapred.job.tracker.http.address", infoBindAddress + ":" + this.infoPort);
    LOG.info("JobTracker webserver: " + this.infoServer.getPort());

    // start the recovery manager
    recoveryManager = new RecoveryManager();

    while (!Thread.currentThread().isInterrupted()) {
        try {
            // if we haven't contacted the namenode go ahead and do it
            if (fs == null) {
                fs = getMROwner().doAs(new PrivilegedExceptionAction<FileSystem>() {
                    public FileSystem run() throws IOException {
                        return FileSystem.get(conf);
                    }
                });
            }
            // clean up the system dir, which will only work if hdfs is out of 
            // safe mode
            if (systemDir == null) {
                systemDir = new Path(getSystemDir());
            }
            try {
                FileStatus systemDirStatus = fs.getFileStatus(systemDir);
                if (!systemDirStatus.getOwner().equals(getMROwner().getShortUserName())) {
                    throw new AccessControlException("The systemdir " + systemDir + " is not owned by "
                            + getMROwner().getShortUserName());
                }
                if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
                    LOG.warn("Incorrect permissions on " + systemDir + ". Setting it to "
                            + SYSTEM_DIR_PERMISSION);
                    fs.setPermission(systemDir, new FsPermission(SYSTEM_DIR_PERMISSION));
                }
            } catch (FileNotFoundException fnf) {
            } //ignore
            // Make sure that the backup data is preserved
            FileStatus[] systemDirData = fs.listStatus(this.systemDir);
            // Check if the history is enabled .. as we cant have persistence with 
            // history disabled
            if (conf.getBoolean("mapred.jobtracker.restart.recover", false) && systemDirData != null) {
                for (FileStatus status : systemDirData) {
                    try {
                        recoveryManager.checkAndAddJob(status);
                    } catch (Throwable t) {
                        LOG.warn("Failed to add the job " + status.getPath().getName(), t);
                    }
                }

                // Check if there are jobs to be recovered
                hasRestarted = recoveryManager.shouldRecover();
                if (hasRestarted) {
                    break; // if there is something to recover else clean the sys dir
                }
            }
            LOG.info("Cleaning up the system directory");
            fs.delete(systemDir, true);
            if (FileSystem.mkdirs(fs, systemDir, new FsPermission(SYSTEM_DIR_PERMISSION))) {
                break;
            }
            LOG.error("Mkdirs failed to create " + systemDir);
        } catch (AccessControlException ace) {
            LOG.warn("Failed to operate on mapred.system.dir (" + systemDir + ") because of permissions.");
            LOG.warn(
                    "Manually delete the mapred.system.dir (" + systemDir + ") and then start the JobTracker.");
            LOG.warn("Bailing out ... ", ace);
            throw ace;
        } catch (IOException ie) {
            LOG.info("problem cleaning system directory: " + systemDir, ie);
        }
        Thread.sleep(FS_ACCESS_RETRY_PERIOD);
    }

    if (Thread.currentThread().isInterrupted()) {
        throw new InterruptedException();
    }

    // Same with 'localDir' except it's always on the local disk.
    if (!hasRestarted) {
        jobConf.deleteLocalFiles(SUBDIR);
    }

    // Initialize history DONE folder
    FileSystem historyFS = getMROwner().doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws IOException {
            JobHistory.initDone(conf, fs);
            final String historyLogDir = JobHistory.getCompletedJobHistoryLocation().toString();
            infoServer.setAttribute("historyLogDir", historyLogDir);

            infoServer.setAttribute("serialNumberDirectoryDigits",
                    Integer.valueOf(JobHistory.serialNumberDirectoryDigits()));

            infoServer.setAttribute("serialNumberTotalDigits",
                    Integer.valueOf(JobHistory.serialNumberTotalDigits()));

            return new Path(historyLogDir).getFileSystem(conf);
        }
    });
    infoServer.setAttribute("fileSys", historyFS);
    infoServer.setAttribute("jobConf", conf);
    infoServer.setAttribute("aclManager", aclsManager);

    if (JobHistoryServer.isEmbedded(conf)) {
        LOG.info("History server being initialized in embedded mode");
        jobHistoryServer = new JobHistoryServer(conf, aclsManager, infoServer);
        jobHistoryServer.start();
        LOG.info("Job History Server web address: " + JobHistoryServer.getAddress(conf));
    }

    this.dnsToSwitchMapping = ReflectionUtils.newInstance(conf.getClass("topology.node.switch.mapping.impl",
            ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);
    this.numTaskCacheLevels = conf.getInt("mapred.task.cache.levels", NetworkTopology.DEFAULT_HOST_LEVEL);

    //initializes the job status store
    completedJobStatusStore = new CompletedJobStatusStore(conf, aclsManager);
}

From source file:org.apache.hadoop.gateway.GatewayBasicFuncTest.java

@Test
public void testBasicHdfsUseCase() throws IOException {
    String root = "/tmp/GatewayBasicFuncTest/testBasicHdfsUseCase";
    String username = "hdfs";
    String password = "hdfs-password";
    InetSocketAddress gatewayAddress = driver.gateway.getAddresses()[0];

    // Attempt to delete the test directory in case a previous run failed.
    // Ignore any result.
    // Cleanup anything that might have been leftover because the test failed previously.
    driver.getMock("WEBHDFS").expect().method("DELETE").from("testBasicHdfsUseCase-1").pathInfo("/v1" + root)
            .queryParam("op", "DELETE").queryParam("user.name", username).queryParam("recursive", "true")
            .respond().status(HttpStatus.SC_OK);
    given()/*from  w  ww  . j  a va  2  s  . c o m*/
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "DELETE").queryParam("recursive", "true").expect().log().all()
            .statusCode(HttpStatus.SC_OK).when().delete(driver.getUrl("WEBHDFS") + "/v1" + root
                    + (driver.isUseGateway() ? "" : "?user.name=" + username));
    driver.assertComplete();

    /* Create a directory.
    curl -i -X PUT "http://<HOST>:<PORT>/<PATH>?op=MKDIRS[&permission=<OCTAL>]"
            
    The client receives a respond with a boolean JSON object:
    HTTP/1.1 HttpStatus.SC_OK OK
    Content-Type: application/json
    Transfer-Encoding: chunked
            
    {"boolean": true}
    */
    driver.getMock("WEBHDFS").expect().method("PUT").pathInfo("/v1" + root + "/dir").queryParam("op", "MKDIRS")
            .queryParam("user.name", username).respond().status(HttpStatus.SC_OK)
            .content(driver.getResourceBytes("webhdfs-success.json")).contentType("application/json");
    given()
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "MKDIRS").expect()
            //.log().all();
            .statusCode(HttpStatus.SC_OK).contentType("application/json").content("boolean", is(true)).when()
            .put(driver.getUrl("WEBHDFS") + "/v1" + root + "/dir");
    driver.assertComplete();

    driver.getMock("WEBHDFS").expect().method("GET").pathInfo("/v1" + root).queryParam("op", "LISTSTATUS")
            .queryParam("user.name", username).respond().status(HttpStatus.SC_OK)
            .content(driver.getResourceBytes("webhdfs-liststatus-test.json")).contentType("application/json");
    given()
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "LISTSTATUS").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_OK).content("FileStatuses.FileStatus[0].pathSuffix", is("dir")).when()
            .get(driver.getUrl("WEBHDFS") + "/v1" + root);
    driver.assertComplete();

    //NEGATIVE: Test a bad password.
    given()
            //.log().all()
            .auth().preemptive().basic(username, "invalid-password").header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "LISTSTATUS").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_UNAUTHORIZED).when().get(driver.getUrl("WEBHDFS") + "/v1" + root);
    driver.assertComplete();

    //NEGATIVE: Test a bad user.
    given()
            //.log().all()
            .auth().preemptive().basic("hdfs-user", "hdfs-password").header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "LISTSTATUS").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_UNAUTHORIZED).when().get(driver.getUrl("WEBHDFS") + "/v1" + root);
    driver.assertComplete();

    //NEGATIVE: Test a valid but unauthorized user.
    given()
            //.log().all()
            .auth().preemptive().basic("mapred-user", "mapred-password").header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "LISTSTATUS").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_UNAUTHORIZED).when().get(driver.getUrl("WEBHDFS") + "/v1" + root);

    /* Add a file.
    curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CREATE
               [&overwrite=<true|false>][&blocksize=<LONG>][&replication=<SHORT>]
             [&permission=<OCTAL>][&buffersize=<INT>]"
            
    The expect is redirected to a datanode where the file data is to be written:
    HTTP/1.1 307 TEMPORARY_REDIRECT
    Location: http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=CREATE...
    Content-Length: 0
            
    Step 2: Submit another HTTP PUT expect using the URL in the Location header with the file data to be written.
    curl -i -X PUT -T <LOCAL_FILE> "http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=CREATE..."
            
    The client receives a HttpStatus.SC_CREATED Created respond with zero content length and the WebHDFS URI of the file in the Location header:
    HTTP/1.1 HttpStatus.SC_CREATED Created
    Location: webhdfs://<HOST>:<PORT>/<PATH>
    Content-Length: 0
    */
    driver.getMock("WEBHDFS").expect().method("PUT").pathInfo("/v1" + root + "/dir/file")
            .queryParam("op", "CREATE").queryParam("user.name", username).respond()
            .status(HttpStatus.SC_TEMPORARY_REDIRECT).header("Location",
                    driver.getRealUrl("DATANODE") + "/v1" + root + "/dir/file?op=CREATE&user.name=hdfs");
    driver.getMock("DATANODE").expect().method("PUT").pathInfo("/v1" + root + "/dir/file")
            .queryParam("op", "CREATE").queryParam("user.name", username).contentType("text/plain")
            .content(driver.getResourceBytes("test.txt"))
            //.content( driver.gerResourceBytes( "hadoop-examples.jar" ) )
            .respond().status(HttpStatus.SC_CREATED)
            .header("Location", "webhdfs://" + driver.getRealAddr("DATANODE") + "/v1" + root + "/dir/file");
    Response response = given()
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "CREATE").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_TEMPORARY_REDIRECT).when()
            .put(driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/file");
    String location = response.getHeader("Location");
    log.debug("Redirect location: " + response.getHeader("Location"));
    if (driver.isUseGateway()) {
        MatcherAssert.assertThat(location,
                startsWith("http://" + gatewayAddress.getHostName() + ":" + gatewayAddress.getPort() + "/"));
        MatcherAssert.assertThat(location,
                startsWith("http://" + gatewayAddress.getHostName() + ":" + gatewayAddress.getPort() + "/"));
        MatcherAssert.assertThat(location, containsString("?_="));
    }
    MatcherAssert.assertThat(location, not(containsString("host=")));
    MatcherAssert.assertThat(location, not(containsString("port=")));
    response = given()
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .content(driver.getResourceBytes("test.txt")).contentType("text/plain").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_CREATED).when().put(location);
    location = response.getHeader("Location");
    log.debug("Created location: " + location);
    if (driver.isUseGateway()) {
        MatcherAssert.assertThat(location,
                startsWith("http://" + gatewayAddress.getHostName() + ":" + gatewayAddress.getPort() + "/"));
    }
    driver.assertComplete();

    /* Get the file.
    curl -i -L "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=OPEN
               [&offset=<LONG>][&length=<LONG>][&buffersize=<INT>]"
            
    The expect is redirected to a datanode where the file data can be read:
    HTTP/1.1 307 TEMPORARY_REDIRECT
    Location: http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=OPEN...
    Content-Length: 0
            
    The client follows the redirect to the datanode and receives the file data:
    HTTP/1.1 HttpStatus.SC_OK OK
    Content-Type: application/octet-stream
    Content-Length: 22
            
    Hello, webhdfs user!
    */
    driver.getMock("WEBHDFS").expect().method("GET").pathInfo("/v1" + root + "/dir/file")
            .queryParam("op", "OPEN").queryParam("user.name", username).respond()
            .status(HttpStatus.SC_TEMPORARY_REDIRECT).header("Location",
                    driver.getRealUrl("DATANODE") + "/v1" + root + "/dir/file?op=OPEN&user.name=hdfs");
    driver.getMock("DATANODE").expect().method("GET").pathInfo("/v1" + root + "/dir/file")
            .queryParam("op", "OPEN").queryParam("user.name", username).respond().status(HttpStatus.SC_OK)
            .contentType("text/plain").content(driver.getResourceBytes("test.txt"));
    given()
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "OPEN").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_OK).content(is("TEST")).when()
            .get(driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/file");
    driver.assertComplete();

    /* Delete the directory.
    curl -i -X DELETE "http://<host>:<port>/webhdfs/v1/<path>?op=DELETE
                         [&recursive=<true|false>]"
            
    The client receives a respond with a boolean JSON object:
    HTTP/1.1 HttpStatus.SC_OK OK
    Content-Type: application/json
    Transfer-Encoding: chunked
            
    {"boolean": true}
    */
    // Mock the interaction with the namenode.
    driver.getMock("WEBHDFS").expect().from("testBasicHdfsUseCase-1").method("DELETE").pathInfo("/v1" + root)
            .queryParam("op", "DELETE").queryParam("user.name", username).queryParam("recursive", "true")
            .respond().status(HttpStatus.SC_OK);
    given().auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "DELETE").queryParam("recursive", "true").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_OK).when().delete(driver.getUrl("WEBHDFS") + "/v1" + root);
    driver.assertComplete();
}