Example usage for java.io DataOutputStream writeShort

List of usage examples for java.io DataOutputStream writeShort

Introduction

In this page you can find the example usage for java.io DataOutputStream writeShort.

Prototype

public final void writeShort(int v) throws IOException 

Source Link

Document

Writes a short to the underlying output stream as two bytes, high byte first.

Usage

From source file:org.exist.dom.ElementImpl.java

/**
 * Serializes a (persistent DOM) Element to a byte array
 *
 * data = signature childCount nodeIdUnitsLength nodeId attributesCount localNameId namespace? prefixData?
 *
 * signature = 0x20 | localNameLength | hasNamespace? | isDirty?
 *
 * localNameLength = noContent OR intContent OR shortContent OR byteContent
 * noContent = 0x0/*from w w w .  ja va  2  s .c o  m*/
 * intContent = 0x1
 * shortContent = 0x2
 * byteContent = 0x3
 *
 * hasNamespace = 0x10
 *
 * isDirty = 0x8
 *
 * childCount = [int] (4 bytes) The number of child nodes
 * nodeIdUnitsLength = [short] (2 bytes) The number of units of the element's NodeId
 * nodeId = @see org.exist.numbering.DLNBase#serialize(byte[], int)
 * attributesCount = [short] (2 bytes) The number of attributes
 *
 * localNameId = [int] (4 bytes) | [short] (2 bytes) | [byte] 1 byte. The Id of the element's local name from SymbolTable (symbols.dbx)
 *
 * namespace = namespaceUriId namespacePrefixLength elementNamespacePrefix?
 * namespaceUriId = [short] (2 bytes) The Id of the namespace URI from SymbolTable (symbols.dbx)
 * namespacePrefixLength = [short] (2 bytes)
 * elementNamespacePrefix = eUtf8
 *
 * eUtf8 = @see org.exist.util.UTF8#encode(java.lang.String, byte[], int)
 *
 * prefixData = namespaceMappingsCount namespaceMapping+
 * namespaceMappingsCount = [short] (2 bytes)
 * namespaceMapping = namespacePrefix namespaceUriId
 * namespacePrefix = jUtf8
 *
 * jUtf8 = @see java.io.DataOutputStream#writeUTF(java.lang.String)
 */
@Override
public byte[] serialize() {
    if (nodeId == null) {
        throw new RuntimeException("nodeId = null for element: " + getQName().getStringValue());
    }
    try {
        final SymbolTable symbols = ownerDocument.getBrokerPool().getSymbols();
        byte[] prefixData = null;
        // serialize namespace prefixes declared in this element
        if (declaresNamespacePrefixes()) {
            final ByteArrayOutputStream bout = new ByteArrayOutputStream();
            final DataOutputStream out = new DataOutputStream(bout);
            out.writeShort(namespaceMappings.size());
            for (final Iterator<Map.Entry<String, String>> i = namespaceMappings.entrySet().iterator(); i
                    .hasNext();) {
                final Map.Entry<String, String> entry = i.next();
                out.writeUTF(entry.getKey());
                final short nsId = symbols.getNSSymbol(entry.getValue());
                out.writeShort(nsId);
            }
            prefixData = bout.toByteArray();
        }

        final short id = symbols.getSymbol(this);
        final boolean hasNamespace = nodeName.needsNamespaceDecl();
        short nsId = 0;
        if (hasNamespace) {
            nsId = symbols.getNSSymbol(nodeName.getNamespaceURI());
        }
        final byte idSizeType = Signatures.getSizeType(id);
        byte signature = (byte) ((Signatures.Elem << 0x5) | idSizeType);
        int prefixLen = 0;
        if (hasNamespace) {
            if (nodeName.getPrefix() != null && nodeName.getPrefix().length() > 0) {
                prefixLen = UTF8.encoded(nodeName.getPrefix());
            }
            signature |= 0x10;
        }
        if (isDirty) {
            signature |= 0x8;
        }
        final int nodeIdLen = nodeId.size();
        final byte[] data = ByteArrayPool.getByteArray(
                StoredNode.LENGTH_SIGNATURE_LENGTH + LENGTH_ELEMENT_CHILD_COUNT + NodeId.LENGTH_NODE_ID_UNITS
                        + nodeIdLen + LENGTH_ATTRIBUTES_COUNT + Signatures.getLength(idSizeType)
                        + (hasNamespace ? prefixLen + 4 : 0) + (prefixData != null ? prefixData.length : 0));
        int next = 0;
        data[next] = signature;
        next += StoredNode.LENGTH_SIGNATURE_LENGTH;
        ByteConversion.intToByte(children, data, next);
        next += LENGTH_ELEMENT_CHILD_COUNT;
        ByteConversion.shortToByte((short) nodeId.units(), data, next);
        next += NodeId.LENGTH_NODE_ID_UNITS;
        nodeId.serialize(data, next);
        next += nodeIdLen;
        ByteConversion.shortToByte(attributes, data, next);
        next += LENGTH_ATTRIBUTES_COUNT;
        Signatures.write(idSizeType, id, data, next);
        next += Signatures.getLength(idSizeType);
        if (hasNamespace) {
            ByteConversion.shortToByte(nsId, data, next);
            next += LENGTH_NS_ID;
            ByteConversion.shortToByte((short) prefixLen, data, next);
            next += LENGTH_PREFIX_LENGTH;
            if (nodeName.getPrefix() != null && nodeName.getPrefix().length() > 0) {
                UTF8.encode(nodeName.getPrefix(), data, next);
            }
            next += prefixLen;
        }
        if (prefixData != null) {
            System.arraycopy(prefixData, 0, data, next, prefixData.length);
        }
        return data;
    } catch (final IOException e) {
        return null;
    }
}

From source file:org.apache.hadoop.dfs.DataNode.java

private static void sendResponse(Socket s, short opStatus, long timeout) throws IOException {
    DataOutputStream reply = new DataOutputStream(NetUtils.getOutputStream(s, timeout));
    try {//from   w  w  w  .j a  va  2 s  . c om
        reply.writeShort(opStatus);
        reply.flush();
    } finally {
        IOUtils.closeStream(reply);
    }
}

From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java

/**
 * append a ProxyCrawlHistoryItem to the active log
 * //from   ww  w  .j av a 2s. co m
 * @param item
 * @throws IOException
 */
void appendItemToLog(ProxyCrawlHistoryItem item) throws IOException {

    try {
        // open the log file ...
        DataOutputStream logStream = new DataOutputStream(new FileOutputStream(getActiveLogFilePath(), true));

        try {
            // reset crc calculator (single thread so no worries on synchronization)
            _crc16Out.reset();
            // reset output stream
            _outputBuffer.reset();
            // create checked stream
            CheckedOutputStream checkedStream = new CheckedOutputStream(_outputBuffer, _crc16Out);
            DataOutputStream dataOutputStream = new DataOutputStream(checkedStream);
            // write out item
            item.serialize(dataOutputStream, new BinaryProtocol());
            dataOutputStream.flush();

            // ok now write out sync,crc,length then data
            logStream.write(getLocalLogSyncBytes());
            logStream.writeInt((int) checkedStream.getChecksum().getValue());
            logStream.writeShort((short) _outputBuffer.getLength());
            logStream.write(_outputBuffer.getData(), 0, _outputBuffer.getLength());

            logStream.flush();
            logStream.close();
            logStream = null;

            // now we need to update the file header
            updateLogFileHeader(getActiveLogFilePath(), 1, LOG_ITEM_HEADER_SIZE + _outputBuffer.getLength());

            URLFP fingerprint = URLUtils.getURLFPFromURL(item.getOriginalURL(), true);
            // update local log
            synchronized (_localLogItems) {
                if (fingerprint != null) {
                    _localLogItems.put(fingerprint, item);
                }
            }

            ImmutableSet<CrawlList> lists = null;
            // and now walk lists updating them as necessary
            synchronized (_crawlLists) {
                lists = new ImmutableSet.Builder<CrawlList>().addAll(_crawlLists.values()).build();
            }
            for (CrawlList list : lists) {
                try {
                    list.updateItemState(fingerprint, item);
                } catch (Exception e) {
                    // ok, IF an error occurs updating the list metadata.. we need to
                    // coninue along.
                    // it is critical for this thread to not die in such a circumstane
                    LOG.fatal("Error Updating List(" + list.getListId() + "):"
                            + CCStringUtils.stringifyException(e));
                    System.out.println("Exception in List Update(" + list.getListId() + "):"
                            + CCStringUtils.stringifyException(e));
                }
            }

        } finally {
            if (logStream != null) {
                logStream.close();
            }
        }
    } finally {

    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.DataWriter.java

/**
 * Write a block to disk.//from  w  w w.  j a  v a  2s .  co  m
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock() throws IOException {
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    long startTime = System.currentTimeMillis();
    int namespaceid = in.readInt();
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);
    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client
    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }
    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }
    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    DataOutputStream replyOut = null; // stream to prev target
    Socket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup

    updateCurrentThreadName("receiving block " + block + " client=" + client);
    try {
        // open a block receiver and check if the block does not exist
        blockReceiver = new BlockReceiver(namespaceid, block, in, s.getRemoteSocketAddress().toString(),
                s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode);

        // get a connection back to the previous target
        replyOut = new DataOutputStream(new BufferedOutputStream(
                NetUtils.getOutputStream(s, datanode.socketWriteTimeout), SMALL_BUFFER_SIZE));

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getName();
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                int timeoutValue = datanode.socketTimeout + (datanode.socketReadExtentionTimeout * numTargets);
                int writeTimeout = datanode.socketWriteTimeout
                        + (datanode.socketWriteExtentionTimeout * numTargets);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                mirrorOut = new DataOutputStream(new BufferedOutputStream(
                        NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeInt(namespaceid);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);
                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }
                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (IOException e) {
                if (client.length() != 0) {
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.getDatanodeInfo() + ":Exception transfering block " + block
                            + " to mirror " + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        long totalReceiveSize = blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null,
                targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(namespaceid, block, null);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        } else {
            // Log the fact that the block has been received by this datanode and
            // has been written to the local disk on this datanode.
            LOG.info("Received Block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes() + " and written to local disk");
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(namespaceid, block);
        }

        long writeDuration = System.currentTimeMillis() - startTime;
        datanode.myMetrics.bytesWrittenLatency.inc(writeDuration);
        if (totalReceiveSize > KB_RIGHT_SHIFT_MIN) {
            datanode.myMetrics.bytesWrittenRate.inc((int) (totalReceiveSize >> KB_RIGHT_SHIFT_BITS),
                    writeDuration);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}

From source file:org.apache.hadoop.hdfs.AvatarClient.java

/**
 * Get the checksum of a file./*  w w  w  . j  a  v a  2s.c  o  m*/
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, AvatarProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE)
            .getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = 3000 * datanodes.length + socketTimeout;
        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            //connect to a datanode
            final Socket sock = socketFactory.createSocket();
            NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
            sock.setSoTimeout(timeout);

            DataOutputStream out = new DataOutputStream(
                    new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE));
            DataInputStream in = new DataInputStream(NetUtils.getInputStream(sock));

            // get block MD5
            try {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    throw new IOException("Bad response " + reply + " for block " + block + " from datanode "
                            + datanodes[j].getName());
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(sock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:ml.shifu.shifu.core.dtrain.dt.BinaryDTSerializer.java

public static void save(ModelConfig modelConfig, List<ColumnConfig> columnConfigList,
        List<List<TreeNode>> baggingTrees, String loss, int inputCount, OutputStream output)
        throws IOException {
    DataOutputStream fos = null;

    try {//from  ww  w. jav  a 2 s .co m
        fos = new DataOutputStream(new GZIPOutputStream(output));
        // version
        fos.writeInt(CommonConstants.TREE_FORMAT_VERSION);
        fos.writeUTF(modelConfig.getAlgorithm());
        fos.writeUTF(loss);
        fos.writeBoolean(modelConfig.isClassification());
        fos.writeBoolean(modelConfig.getTrain().isOneVsAll());
        fos.writeInt(inputCount);

        Map<Integer, String> columnIndexNameMapping = new HashMap<Integer, String>();
        Map<Integer, List<String>> columnIndexCategoricalListMapping = new HashMap<Integer, List<String>>();
        Map<Integer, Double> numericalMeanMapping = new HashMap<Integer, Double>();
        for (ColumnConfig columnConfig : columnConfigList) {
            if (columnConfig.isFinalSelect()) {
                columnIndexNameMapping.put(columnConfig.getColumnNum(), columnConfig.getColumnName());
            }
            if (columnConfig.isCategorical() && CollectionUtils.isNotEmpty(columnConfig.getBinCategory())) {
                columnIndexCategoricalListMapping.put(columnConfig.getColumnNum(),
                        columnConfig.getBinCategory());
            }

            if (columnConfig.isNumerical() && columnConfig.getMean() != null) {
                numericalMeanMapping.put(columnConfig.getColumnNum(), columnConfig.getMean());
            }
        }

        if (columnIndexNameMapping.size() == 0) {
            boolean hasCandidates = CommonUtils.hasCandidateColumns(columnConfigList);
            for (ColumnConfig columnConfig : columnConfigList) {
                if (CommonUtils.isGoodCandidate(columnConfig, hasCandidates)) {
                    columnIndexNameMapping.put(columnConfig.getColumnNum(), columnConfig.getColumnName());
                }
            }
        }

        // serialize numericalMeanMapping
        fos.writeInt(numericalMeanMapping.size());
        for (Entry<Integer, Double> entry : numericalMeanMapping.entrySet()) {
            fos.writeInt(entry.getKey());
            // for some feature, it is null mean value, it is not selected, just set to 0d to avoid NPE
            fos.writeDouble(entry.getValue() == null ? 0d : entry.getValue());
        }
        // serialize columnIndexNameMapping
        fos.writeInt(columnIndexNameMapping.size());
        for (Entry<Integer, String> entry : columnIndexNameMapping.entrySet()) {
            fos.writeInt(entry.getKey());
            fos.writeUTF(entry.getValue());
        }
        // serialize columnIndexCategoricalListMapping
        fos.writeInt(columnIndexCategoricalListMapping.size());
        for (Entry<Integer, List<String>> entry : columnIndexCategoricalListMapping.entrySet()) {
            List<String> categories = entry.getValue();
            if (categories != null) {
                fos.writeInt(entry.getKey());
                fos.writeInt(categories.size());
                for (String category : categories) {
                    // There is 16k limitation when using writeUTF() function.
                    // if the category value is larger than 10k, write a marker -1 and write bytes instead of
                    // writeUTF;
                    // in read part logic should be changed also to readByte not readUTF according to the marker
                    if (category.length() < Constants.MAX_CATEGORICAL_VAL_LEN) {
                        fos.writeUTF(category);
                    } else {
                        fos.writeShort(UTF_BYTES_MARKER); // marker here
                        byte[] bytes = category.getBytes("UTF-8");
                        fos.writeInt(bytes.length);
                        for (int i = 0; i < bytes.length; i++) {
                            fos.writeByte(bytes[i]);
                        }
                    }
                }
            }
        }

        Map<Integer, Integer> columnMapping = getColumnMapping(columnConfigList);
        fos.writeInt(columnMapping.size());
        for (Entry<Integer, Integer> entry : columnMapping.entrySet()) {
            fos.writeInt(entry.getKey());
            fos.writeInt(entry.getValue());
        }

        // after model version 4 (>=4), IndependentTreeModel support bagging, here write a default RF/GBT size 1
        fos.writeInt(baggingTrees.size());
        for (int i = 0; i < baggingTrees.size(); i++) {
            List<TreeNode> trees = baggingTrees.get(i);
            int treeLength = trees.size();
            fos.writeInt(treeLength);
            for (TreeNode treeNode : trees) {
                treeNode.write(fos);
            }
        }
    } catch (IOException e) {
        LOG.error("Error in writing output.", e);
    } finally {
        IOUtils.closeStream(fos);
    }
}

From source file:org.apache.jxtadoop.hdfs.DFSClient.java

/**
 * Get the checksum of a file./*  w ww.jav a  2  s.  com*/
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE)
            .getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = 3000 * datanodes.length + socketTimeout;
        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            //connect to a datanode
            /*final Socket sock = socketFactory.createSocket();
            NetUtils.connect(sock, 
                 NetUtils.createSocketAddr(datanodes[j].getName()),
                 timeout);
            sock.setSoTimeout(timeout);*/
            JxtaSocket jsock = DFSClient.getDfsClient().getDfsClientPeer()
                    .getInfoSocket(datanodes[j].getName());
            // jsock.setSoTimeout(timeout);
            jsock.setSoTimeout(Integer.parseInt(conf.get("hadoop.p2p.info.timeout")));

            /*DataOutputStream out = new DataOutputStream(
                new BufferedOutputStream(NetUtils.getOutputStream(jsock), 
                             DataNode.SMALL_BUFFER_SIZE));
            DataInputStream in = new DataInputStream(NetUtils.getInputStream(jsock));*/
            DataOutputStream out = new DataOutputStream(new BufferedOutputStream(jsock.getOutputStream()));
            DataInputStream in = new DataInputStream(jsock.getInputStream());

            // get block MD5
            try {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    throw new IOException("Bad response " + reply + " for block " + block + " from datanode "
                            + datanodes[j].getName());
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(jsock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.apache.hadoop.hdfs.DFSClient.java

/**
 * Get the checksum of a file.//from   ww  w  . j  a v  a  2 s  . c  o m
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
    if (null == blockLocations) {
        throw new FileNotFoundException("File does not exist: " + src);
    }
    List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;
    boolean refetchBlocks = false;
    int lastRetriedIndex = -1;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        if (refetchBlocks) { // refetch to get fresh tokens
            blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
            if (null == blockLocations) {
                throw new FileNotFoundException("File does not exist: " + src);
            }
            locatedblocks = blockLocations.getLocatedBlocks();
            refetchBlocks = false;
        }
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = (socketTimeout > 0)
                ? (socketTimeout + HdfsConstants.READ_TIMEOUT_EXTENSION * datanodes.length)
                : 0;

        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            Socket sock = null;
            DataOutputStream out = null;
            DataInputStream in = null;

            try {
                //connect to a datanode
                sock = socketFactory.createSocket();
                NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
                sock.setSoTimeout(timeout);

                out = new DataOutputStream(
                        new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE));
                in = new DataInputStream(NetUtils.getInputStream(sock));

                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }

                // get block MD5
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                lb.getBlockToken().write(out);
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    if (reply == DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN && i > lastRetriedIndex) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " + "for file "
                                    + src + " for block " + block + " from datanode " + datanodes[j].getName()
                                    + ". Will retry the block once.");
                        }
                        lastRetriedIndex = i;
                        done = true; // actually it's not done; but we'll retry
                        i--; // repeat at i-th block
                        refetchBlocks = true;
                        break;
                    } else {
                        throw new IOException("Bad response " + reply + " for block " + block
                                + " from datanode " + datanodes[j].getName());
                    }
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(sock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.apache.jxtadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Write a block to disk.//ww w  .ja va  2  s. c o  m
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock(DataInputStream in) throws IOException {
    LOG.debug("Mathod called : writeBlock()");
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);

    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client

    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }

    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }

    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    DataOutputStream replyOut = null; // stream to prev target
    JxtaSocket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup

    try {
        // open a block receiver and check if the block does not exist
        /*blockReceiver = new BlockReceiver(block, in, 
            s.getRemoteSocketAddress().toString(),
            s.getLocalSocketAddress().toString(),
            isRecovery, client, srcDataNode, datanode);*/
        blockReceiver = new BlockReceiver(block, in,
                ((JxtaSocketAddress) s.getRemoteSocketAddress()).getPeerId().toString(),
                ((JxtaSocketAddress) s.getLocalSocketAddress()).getPeerId().toString(), isRecovery, client,
                srcDataNode, datanode);

        // get a connection back to the previous target
        //replyOut = new DataOutputStream(
        //     NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
        ReliableOutputStream replyOutRos = (ReliableOutputStream) s.getOutputStream();
        replyOut = new DataOutputStream(replyOutRos);

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            // JxtaSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getPeerId();
            // mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            // mirrorSock = datanode.newSocket();

            try {
                //int timeoutValue = numTargets * datanode.socketTimeout;
                //int writeTimeout = datanode.socketWriteTimeout + 
                //                   (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets);
                // NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock = datanode.getDnPeer().getInfoSocket(mirrorNode.toString());
                if (mirrorSock == null)
                    throw new IOException("Failed to get a mirror socket");
                //mirrorSock.setSoTimeout(timeoutValue);
                //mirrorSock.setTcpNoDelay(true);
                //mirrorSock.setSoTimeout(Integer.parseInt(datanode.getConf().get("hadoop.p2p.info.timeout")));
                //mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                /*mirrorOut = new DataOutputStream(
                   new BufferedOutputStream(
                         NetUtils.getOutputStream(mirrorSock, writeTimeout),
                 SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
                */
                mirrorOut = new DataOutputStream((ReliableOutputStream) mirrorSock.getOutputStream());
                mirrorIn = new DataInputStream((ReliableInputStream) mirrorSock.getInputStream());

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);

                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }

                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (SocketTimeoutException ste) {
                LOG.debug("Time out while receiving data on DataXceiver");
                LOG.debug(ste);
                ste.printStackTrace();
            } catch (IOException e) {
                LOG.debug("IOException occurred : " + e.getMessage());
                if (client.length() != 0) {
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                if (mirrorSock != null) {
                    IOUtils.closeSocket(mirrorSock);
                    mirrorSock = null;
                }
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror "
                            + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(block);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } catch (Exception e) {
        LOG.warn("Exception occurred in writting block : " + e.getMessage());
    } finally {
        // close all opened streams

        LOG.debug("Finalizing : writeBlock()");
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}

From source file:org.openymsg.network.Session.java

/**
 * Transmit a FILETRANSFER packet, to send a binary file to a friend.
 *///ww w  .j  a  v  a2  s  .c om
protected void transmitFileTransfer(final String to, final String message, final File file)
        throws FileTransferFailedException, IOException {
    if (file == null)
        throw new IllegalArgumentException("Argument 'file' cannot be null.");

    if (!file.isFile())
        throw new IllegalArgumentException(
                "The provided file object does not denote a normal file (but possibly a directory).");

    if (file.length() == 0L)
        throw new FileTransferFailedException("File transfer: empty file");

    final String cookie = this.cookieY + "; " + this.cookieT;

    final byte[] marker = { '2', '9', (byte) 0xc0, (byte) 0x80 };

    // Create a Yahoo packet into 'packet'
    final PacketBodyBuffer body = new PacketBodyBuffer();
    body.addElement("0", this.primaryID.getId());
    body.addElement("5", to);
    body.addElement("28", Long.toString(file.length()));
    body.addElement("27", file.getName());
    body.addElement("14", message);
    byte[] packet = body.getBuffer();

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(baos);
    dos.write(NetworkConstants.MAGIC, 0, 4);
    dos.write(NetworkConstants.VERSION, 0, 4);
    dos.writeShort((packet.length + 4) & 0xFFFF);
    dos.writeShort(ServiceType.FILETRANSFER.getValue() & 0xFFFF);
    dos.writeInt((int) (this.status.getValue() & 0xFFFFFFFF));
    dos.writeInt((int) (this.sessionId & 0xFFFFFFFF));
    dos.write(packet, 0, packet.length);
    dos.write(marker, 0, 4); // Extra 4 bytes : marker before file data
    // (?)

    packet = baos.toByteArray();

    // Send to Yahoo using POST
    String ftHost = Util.fileTransferHost();
    String ftURL = "http://" + ftHost + NetworkConstants.FILE_TF_PORTPATH;
    HttpURLConnection uConn = (HttpURLConnection) (new URL(ftURL).openConnection());
    uConn.setRequestMethod("POST");
    uConn.setDoOutput(true); // POST, not GET
    Util.initURLConnection(uConn);
    uConn.setRequestProperty("Content-Length", Long.toString(file.length() + packet.length));
    uConn.setRequestProperty("User-Agent", NetworkConstants.USER_AGENT);
    // uConn.setRequestProperty("Host",ftHost);
    uConn.setRequestProperty("Cookie", cookie);
    uConn.connect();

    final BufferedOutputStream bos = new BufferedOutputStream(uConn.getOutputStream());
    try {
        bos.write(packet);
        bos.write(Util.getBytesFromFile(file));
        bos.flush();
    } finally {
        bos.close();
    }

    final int ret = uConn.getResponseCode();
    uConn.disconnect();

    if (ret != 200)
        throw new FileTransferFailedException("Server rejected upload");
}