Example usage for java.io DataOutputStream writeLong

List of usage examples for java.io DataOutputStream writeLong

Introduction

In this page you can find the example usage for java.io DataOutputStream writeLong.

Prototype

public final void writeLong(long v) throws IOException 

Source Link

Document

Writes a long to the underlying output stream as eight bytes, high byte first.

Usage

From source file:org.apache.jxtadoop.hdfs.DFSClient.java

/**
 * Get the checksum of a file./*from w ww  .j  a v  a 2  s .  c o m*/
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE)
            .getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = 3000 * datanodes.length + socketTimeout;
        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            //connect to a datanode
            /*final Socket sock = socketFactory.createSocket();
            NetUtils.connect(sock, 
                 NetUtils.createSocketAddr(datanodes[j].getName()),
                 timeout);
            sock.setSoTimeout(timeout);*/
            JxtaSocket jsock = DFSClient.getDfsClient().getDfsClientPeer()
                    .getInfoSocket(datanodes[j].getName());
            // jsock.setSoTimeout(timeout);
            jsock.setSoTimeout(Integer.parseInt(conf.get("hadoop.p2p.info.timeout")));

            /*DataOutputStream out = new DataOutputStream(
                new BufferedOutputStream(NetUtils.getOutputStream(jsock), 
                             DataNode.SMALL_BUFFER_SIZE));
            DataInputStream in = new DataInputStream(NetUtils.getInputStream(jsock));*/
            DataOutputStream out = new DataOutputStream(new BufferedOutputStream(jsock.getOutputStream()));
            DataInputStream in = new DataInputStream(jsock.getInputStream());

            // get block MD5
            try {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    throw new IOException("Bad response " + reply + " for block " + block + " from datanode "
                            + datanodes[j].getName());
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(jsock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex.java

@Test
public void testSecondaryIndexBinarySearch() throws IOException {
    int numTotalKeys = 99;
    assertTrue(numTotalKeys % 2 == 1); // Ensure no one made this even.

    // We only add odd-index keys into the array that we will binary-search.
    int numSearchedKeys = (numTotalKeys - 1) / 2;

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(baos);

    dos.writeInt(numSearchedKeys);/*w  w w .  j  av a 2s .  c om*/
    int curAllEntriesSize = 0;
    int numEntriesAdded = 0;

    // Only odd-index elements of this array are used to keep the secondary
    // index entries of the corresponding keys.
    int secondaryIndexEntries[] = new int[numTotalKeys];

    for (int i = 0; i < numTotalKeys; ++i) {
        byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i * 2);
        KeyValue cell = new KeyValue(k, Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("val"));
        //KeyValue cell = new KeyValue.KeyOnlyKeyValue(k, 0, k.length);
        keys.add(cell.getKey());
        String msgPrefix = "Key #" + i + " (" + Bytes.toStringBinary(k) + "): ";
        StringBuilder padding = new StringBuilder();
        while (msgPrefix.length() + padding.length() < 70)
            padding.append(' ');
        msgPrefix += padding;
        if (i % 2 == 1) {
            dos.writeInt(curAllEntriesSize);
            secondaryIndexEntries[i] = curAllEntriesSize;
            LOG.info(msgPrefix + "secondary index entry #" + ((i - 1) / 2) + ", offset " + curAllEntriesSize);
            curAllEntriesSize += cell.getKey().length + HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD;
            ++numEntriesAdded;
        } else {
            secondaryIndexEntries[i] = -1;
            LOG.info(msgPrefix + "not in the searched array");
        }
    }

    // Make sure the keys are increasing.
    for (int i = 0; i < keys.size() - 1; ++i)
        assertTrue(KeyValue.COMPARATOR.compare(new KeyValue.KeyOnlyKeyValue(keys.get(i), 0, keys.get(i).length),
                new KeyValue.KeyOnlyKeyValue(keys.get(i + 1), 0, keys.get(i + 1).length)) < 0);

    dos.writeInt(curAllEntriesSize);
    assertEquals(numSearchedKeys, numEntriesAdded);
    int secondaryIndexOffset = dos.size();
    assertEquals(Bytes.SIZEOF_INT * (numSearchedKeys + 2), secondaryIndexOffset);

    for (int i = 1; i <= numTotalKeys - 1; i += 2) {
        assertEquals(dos.size(), secondaryIndexOffset + secondaryIndexEntries[i]);
        long dummyFileOffset = getDummyFileOffset(i);
        int dummyOnDiskSize = getDummyOnDiskSize(i);
        LOG.debug("Storing file offset=" + dummyFileOffset + " and onDiskSize=" + dummyOnDiskSize
                + " at offset " + dos.size());
        dos.writeLong(dummyFileOffset);
        dos.writeInt(dummyOnDiskSize);
        LOG.debug("Stored key " + ((i - 1) / 2) + " at offset " + dos.size());
        dos.write(keys.get(i));
    }

    dos.writeInt(curAllEntriesSize);

    ByteBuffer nonRootIndex = ByteBuffer.wrap(baos.toByteArray());
    for (int i = 0; i < numTotalKeys; ++i) {
        byte[] searchKey = keys.get(i);
        byte[] arrayHoldingKey = new byte[searchKey.length + searchKey.length / 2];

        // To make things a bit more interesting, store the key we are looking
        // for at a non-zero offset in a new array.
        System.arraycopy(searchKey, 0, arrayHoldingKey, searchKey.length / 2, searchKey.length);

        KeyValue.KeyOnlyKeyValue cell = new KeyValue.KeyOnlyKeyValue(arrayHoldingKey, searchKey.length / 2,
                searchKey.length);
        int searchResult = BlockIndexReader.binarySearchNonRootIndex(cell, nonRootIndex, KeyValue.COMPARATOR);
        String lookupFailureMsg = "Failed to look up key #" + i + " (" + Bytes.toStringBinary(searchKey) + ")";

        int expectedResult;
        int referenceItem;

        if (i % 2 == 1) {
            // This key is in the array we search as the element (i - 1) / 2. Make
            // sure we find it.
            expectedResult = (i - 1) / 2;
            referenceItem = i;
        } else {
            // This key is not in the array but between two elements on the array,
            // in the beginning, or in the end. The result should be the previous
            // key in the searched array, or -1 for i = 0.
            expectedResult = i / 2 - 1;
            referenceItem = i - 1;
        }

        assertEquals(lookupFailureMsg, expectedResult, searchResult);

        // Now test we can get the offset and the on-disk-size using a
        // higher-level API function.s
        boolean locateBlockResult = (BlockIndexReader.locateNonRootIndexEntry(nonRootIndex, cell,
                KeyValue.COMPARATOR) != -1);

        if (i == 0) {
            assertFalse(locateBlockResult);
        } else {
            assertTrue(locateBlockResult);
            String errorMsg = "i=" + i + ", position=" + nonRootIndex.position();
            assertEquals(errorMsg, getDummyFileOffset(referenceItem), nonRootIndex.getLong());
            assertEquals(errorMsg, getDummyOnDiskSize(referenceItem), nonRootIndex.getInt());
        }
    }

}

From source file:org.commoncrawl.service.listcrawler.CacheWriterThread.java

@Override
public void run() {

    boolean shutdown = false;

    while (!shutdown) {
        try {/* w  ww . j a va2s  . c  o m*/
            final CacheWriteRequest request = _writeRequestQueue.take();

            switch (request._requestType) {

            case ExitThreadRequest: {
                // shutdown condition ... 
                CacheManager.LOG.info("Disk Writer Thread Received Shutdown. Exiting!");
                shutdown = true;
            }
                break;

            case WriteRequest: {

                long timeStart = System.currentTimeMillis();

                try {
                    // reset crc calculator (single thread so no worries on synchronization)
                    _crc32Out.reset();

                    // figure out if we need to compress the item ... 
                    if ((request._item.getFlags() & CacheItem.Flags.Flag_IsCompressed) == 0
                            && request._item.getContent().getCount() != 0) {
                        LOG.info("Incoming Cache Request Content for:" + request._item.getUrl()
                                + " is not compressed. Compressing...");
                        ByteStream compressedBytesOut = new ByteStream(request._item.getContent().getCount());
                        ThriftyGZIPOutputStream gzipOutputStream = new ThriftyGZIPOutputStream(
                                compressedBytesOut);
                        gzipOutputStream.write(request._item.getContent().getReadOnlyBytes(), 0,
                                request._item.getContent().getCount());
                        gzipOutputStream.finish();
                        LOG.info("Finished Compressing Incoming Content for:" + request._item.getUrl()
                                + " BytesIn:" + request._item.getContent().getCount() + " BytesOut:"
                                + compressedBytesOut.size());
                        // replace buffer

                        request._item.setContent(
                                new FlexBuffer(compressedBytesOut.getBuffer(), 0, compressedBytesOut.size()));
                        request._item.setFlags((request._item.getFlags() | CacheItem.Flags.Flag_IsCompressed));
                    }

                    // create streams ...
                    ByteStream bufferOutputStream = new ByteStream(8192);

                    CheckedOutputStream checkedStream = new CheckedOutputStream(bufferOutputStream, _crc32Out);
                    DataOutputStream dataOutputStream = new DataOutputStream(checkedStream);

                    // remember if this item has content ... 
                    boolean hasContent = request._item.isFieldDirty(CacheItem.Field_CONTENT);
                    // now mark the content field as clean, so that it will not be serialized in our current serialization attempt ... 
                    request._item.setFieldClean(CacheItem.Field_CONTENT);
                    // and go ahead and write out the data to the intermediate buffer while also computing partial checksum 
                    request._item.write(dataOutputStream);

                    request._item.setFieldDirty(CacheItem.Field_CONTENT);

                    // ok, now ... write out file header ... 
                    CacheItemHeader itemHeader = new CacheItemHeader(_manager.getLocalLogSyncBytes());

                    itemHeader._status = CacheItemHeader.STATUS_ALIVE;
                    itemHeader._lastAccessTime = System.currentTimeMillis();
                    itemHeader._fingerprint = request._itemFingerprint;
                    // compute total length ... 

                    // first the header bytes in the cacheItem 
                    itemHeader._dataLength = bufferOutputStream.size();
                    // next the content length (encoded - as in size + bytes) ... 
                    itemHeader._dataLength += 4 + request._item.getContent().getCount();
                    // lastly the crc value iteself ... 
                    itemHeader._dataLength += 8;
                    // open the log file ... 
                    DataOutputBuffer logStream = new DataOutputBuffer();

                    // ok, go ahead and write the header 
                    itemHeader.writeHeader(logStream);
                    // ok now write out the item data minus content... 
                    logStream.write(bufferOutputStream.getBuffer(), 0, bufferOutputStream.size());
                    // now create a checked stream for the content ... 
                    CheckedOutputStream checkedStream2 = new CheckedOutputStream(logStream,
                            checkedStream.getChecksum());

                    dataOutputStream = new DataOutputStream(checkedStream2);

                    // content size 
                    dataOutputStream.writeInt(request._item.getContent().getCount());
                    // now write out the content (via checked stream so that we can calc checksum on content)
                    dataOutputStream.write(request._item.getContent().getReadOnlyBytes(), 0,
                            request._item.getContent().getCount());
                    // ok ... lastly write out the checksum bytes ... 
                    dataOutputStream.writeLong(checkedStream2.getChecksum().getValue());
                    // and FINALLY, write out the total item bytes (so that we can seek in reverse to read last request log 
                    logStream.writeInt(CacheItemHeader.SIZE + itemHeader._dataLength);

                    // ok flush everyting to the memory stream 
                    dataOutputStream.flush();

                    //ok - time to acquire the log semaphore 
                    //LOG.info("Acquiring Local Log Semaphore");
                    _manager.getLocalLogAccessSemaphore().acquireUninterruptibly();

                    try {

                        // now time to acquire the write semaphore ... 
                        _manager.getLocalLogWriteAccessSemaphore().acquireUninterruptibly();

                        // get the current file position 
                        long recordOffset = _manager.getLocalLogFilePos();

                        try {

                            long ioTimeStart = System.currentTimeMillis();

                            RandomAccessFile logFile = new RandomAccessFile(_manager.getActiveLogFilePath(),
                                    "rw");

                            try {
                                // seek to our known record offset 
                                logFile.seek(recordOffset);
                                // write out the data
                                logFile.write(logStream.getData(), 0, logStream.getLength());
                            } finally {
                                logFile.close();
                            }
                            // now we need to update the file header 
                            _manager.updateLogFileHeader(_manager.getActiveLogFilePath(), 1,
                                    CacheItemHeader.SIZE + itemHeader._dataLength + 4 /*trailing bytes*/);

                            CacheManager.LOG
                                    .info("#### Wrote Cache Item in:" + (System.currentTimeMillis() - timeStart)
                                            + " iotime:" + (System.currentTimeMillis() - ioTimeStart)
                                            + " QueueSize:" + _writeRequestQueue.size());

                        } finally {
                            // release write semaphore quickly 
                            _manager.getLocalLogWriteAccessSemaphore().release();
                        }

                        // now inform the manager of the completed request ... 
                        _manager.writeRequestComplete(request, recordOffset);
                    } finally {
                        //LOG.info("Releasing Local Log Semaphore");
                        _manager.getLocalLogAccessSemaphore().release();
                    }
                } catch (IOException e) {
                    CacheManager.LOG.error("### FUC# BATMAN! - GONNA LOSE THIS REQUEST!!!!:"
                            + CCStringUtils.stringifyException(e));
                    _manager.writeRequestFailed(request, e);
                }
            }
                break;
            }
        } catch (InterruptedException e) {

        }
    }
}

From source file:org.apache.hadoop.hdfs.DFSClient.java

/**
 * Get the checksum of a file.//from w  w  w.  j  a va2 s .c om
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
    if (null == blockLocations) {
        throw new FileNotFoundException("File does not exist: " + src);
    }
    List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;
    boolean refetchBlocks = false;
    int lastRetriedIndex = -1;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        if (refetchBlocks) { // refetch to get fresh tokens
            blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
            if (null == blockLocations) {
                throw new FileNotFoundException("File does not exist: " + src);
            }
            locatedblocks = blockLocations.getLocatedBlocks();
            refetchBlocks = false;
        }
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = (socketTimeout > 0)
                ? (socketTimeout + HdfsConstants.READ_TIMEOUT_EXTENSION * datanodes.length)
                : 0;

        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            Socket sock = null;
            DataOutputStream out = null;
            DataInputStream in = null;

            try {
                //connect to a datanode
                sock = socketFactory.createSocket();
                NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
                sock.setSoTimeout(timeout);

                out = new DataOutputStream(
                        new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE));
                in = new DataInputStream(NetUtils.getInputStream(sock));

                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }

                // get block MD5
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                lb.getBlockToken().write(out);
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    if (reply == DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN && i > lastRetriedIndex) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " + "for file "
                                    + src + " for block " + block + " from datanode " + datanodes[j].getName()
                                    + ". Will retry the block once.");
                        }
                        lastRetriedIndex = i;
                        done = true; // actually it's not done; but we'll retry
                        i--; // repeat at i-th block
                        refetchBlocks = true;
                        break;
                    } else {
                        throw new IOException("Bad response " + reply + " for block " + block
                                + " from datanode " + datanodes[j].getName());
                    }
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(sock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.apache.jxtadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Write a block to disk./*from  www. ja  v a 2s  . co  m*/
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock(DataInputStream in) throws IOException {
    LOG.debug("Mathod called : writeBlock()");
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);

    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client

    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }

    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }

    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    DataOutputStream replyOut = null; // stream to prev target
    JxtaSocket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup

    try {
        // open a block receiver and check if the block does not exist
        /*blockReceiver = new BlockReceiver(block, in, 
            s.getRemoteSocketAddress().toString(),
            s.getLocalSocketAddress().toString(),
            isRecovery, client, srcDataNode, datanode);*/
        blockReceiver = new BlockReceiver(block, in,
                ((JxtaSocketAddress) s.getRemoteSocketAddress()).getPeerId().toString(),
                ((JxtaSocketAddress) s.getLocalSocketAddress()).getPeerId().toString(), isRecovery, client,
                srcDataNode, datanode);

        // get a connection back to the previous target
        //replyOut = new DataOutputStream(
        //     NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
        ReliableOutputStream replyOutRos = (ReliableOutputStream) s.getOutputStream();
        replyOut = new DataOutputStream(replyOutRos);

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            // JxtaSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getPeerId();
            // mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            // mirrorSock = datanode.newSocket();

            try {
                //int timeoutValue = numTargets * datanode.socketTimeout;
                //int writeTimeout = datanode.socketWriteTimeout + 
                //                   (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets);
                // NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock = datanode.getDnPeer().getInfoSocket(mirrorNode.toString());
                if (mirrorSock == null)
                    throw new IOException("Failed to get a mirror socket");
                //mirrorSock.setSoTimeout(timeoutValue);
                //mirrorSock.setTcpNoDelay(true);
                //mirrorSock.setSoTimeout(Integer.parseInt(datanode.getConf().get("hadoop.p2p.info.timeout")));
                //mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                /*mirrorOut = new DataOutputStream(
                   new BufferedOutputStream(
                         NetUtils.getOutputStream(mirrorSock, writeTimeout),
                 SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
                */
                mirrorOut = new DataOutputStream((ReliableOutputStream) mirrorSock.getOutputStream());
                mirrorIn = new DataInputStream((ReliableInputStream) mirrorSock.getInputStream());

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);

                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }

                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (SocketTimeoutException ste) {
                LOG.debug("Time out while receiving data on DataXceiver");
                LOG.debug(ste);
                ste.printStackTrace();
            } catch (IOException e) {
                LOG.debug("IOException occurred : " + e.getMessage());
                if (client.length() != 0) {
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                if (mirrorSock != null) {
                    IOUtils.closeSocket(mirrorSock);
                    mirrorSock = null;
                }
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror "
                            + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || firstBadLink.length() > 0) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(block);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } catch (Exception e) {
        LOG.warn("Exception occurred in writting block : " + e.getMessage());
    } finally {
        // close all opened streams

        LOG.debug("Finalizing : writeBlock()");
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}

From source file:org.ramadda.repository.database.DatabaseManager.java

/**
 * _more_//from   www . j a  v  a 2  s  . c o m
 *
 * @param os _more_
 * @param all _more_
 * @param actionId _more_
 *
 * @throws Exception _more_
 */
public void makeDatabaseCopy(OutputStream os, boolean all, Object actionId) throws Exception {

    XmlEncoder encoder = new XmlEncoder();
    DataOutputStream dos = new DataOutputStream(os);
    Connection connection = getConnection();
    try {
        HashSet<String> skip = new HashSet<String>();
        skip.add(Tables.SESSIONS.NAME);

        List<TableInfo> tableInfos = getTableInfos(connection, false);
        String xml = encoder.toXml(tableInfos, false);
        writeString(dos, xml);

        int rowCnt = 0;
        System.err.println("Exporting database");
        for (TableInfo tableInfo : tableInfos) {
            if (tableInfo.getName().equalsIgnoreCase("base")) {
                continue;
            }
            if (tableInfo.getName().equalsIgnoreCase("agggregation")) {
                continue;
            }
            if (tableInfo.getName().equalsIgnoreCase("entry")) {
                continue;
            }
            System.err.println("Exporting table: " + tableInfo.getName());
            List<ColumnInfo> columns = tableInfo.getColumns();
            List valueList = new ArrayList();
            Statement statement = execute("select * from " + tableInfo.getName(), 10000000, 0);
            SqlUtil.Iterator iter = getIterator(statement);
            ResultSet results;
            dos.writeInt(DUMPTAG_TABLE);
            writeString(dos, tableInfo.getName());
            if (skip.contains(tableInfo.getName().toLowerCase())) {
                continue;
            }
            while ((results = iter.getNext()) != null) {
                dos.writeInt(DUMPTAG_ROW);
                rowCnt++;
                if ((rowCnt % 1000) == 0) {
                    if (actionId != null) {
                        getActionManager().setActionMessage(actionId, "Written " + rowCnt + " database rows");
                    }
                    System.err.println("rows:" + rowCnt);
                }
                for (int i = 1; i <= columns.size(); i++) {
                    ColumnInfo colInfo = columns.get(i - 1);
                    int type = colInfo.getType();
                    if (type == ColumnInfo.TYPE_TIMESTAMP) {
                        Timestamp ts = results.getTimestamp(i);
                        if (ts == null) {
                            dos.writeLong((long) -1);
                        } else {
                            dos.writeLong(ts.getTime());
                        }
                    } else if (type == ColumnInfo.TYPE_VARCHAR) {
                        writeString(dos, results.getString(i));
                    } else if (type == ColumnInfo.TYPE_TIME) {
                        //TODO: What is the format of a type time?
                        //                            writeString(dos, results.getString(i));
                    } else if (type == ColumnInfo.TYPE_INTEGER) {
                        writeInteger(dos, (Integer) results.getObject(i));
                    } else if (type == ColumnInfo.TYPE_DOUBLE) {
                        writeDouble(dos, (Double) results.getObject(i));
                    } else if (type == ColumnInfo.TYPE_CLOB) {
                        writeString(dos, results.getString(i));
                    } else if (type == ColumnInfo.TYPE_BLOB) {
                        writeString(dos, results.getString(i));
                    } else if (type == ColumnInfo.TYPE_BIGINT) {
                        writeLong(dos, results.getLong(i));
                    } else if (type == ColumnInfo.TYPE_SMALLINT) {
                        dos.writeShort(results.getShort(i));
                    } else if (type == ColumnInfo.TYPE_TINYINT) {
                        //TODO:
                        //dos.write(results.getChar(i));
                    } else {
                        Object object = results.getObject(i);

                        throw new IllegalArgumentException(
                                "Unknown type:" + type + "  c:" + object.getClass().getName());
                    }
                }
            }
        }
        System.err.println("Wrote " + rowCnt + " rows");
    } finally {
        closeConnection(connection);
    }
    //Write the end tag
    dos.writeInt(DUMPTAG_END);
    IOUtil.close(dos);

}

From source file:org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding.java

/**
 * Serializes a <code>PropertyState</code> to the data output stream
 *
 * @param out the output stream// www . jav a 2s  .  c  o  m
 * @param state the property entry to store
 * @throws IOException if an I/O error occurs.
 */
public void writeState(DataOutputStream out, NodePropBundle.PropertyEntry state) throws IOException {
    // type & mod count
    out.writeInt(state.getType() | (state.getModCount() << 16));
    // multiValued
    out.writeBoolean(state.isMultiValued());
    // definitionId
    out.writeUTF("");
    // values
    InternalValue[] values = state.getValues();
    out.writeInt(values.length); // count
    for (int i = 0; i < values.length; i++) {
        InternalValue val = values[i];
        switch (state.getType()) {
        case PropertyType.BINARY:
            BLOBFileValue blobVal = val.getBLOBFileValue();
            long size = blobVal.getLength();
            if (InternalValue.USE_DATA_STORE && dataStore != null) {
                int maxMemorySize = dataStore.getMinRecordLength() - 1;
                if (size < maxMemorySize) {
                    writeSmallBinary(out, blobVal, state, i);
                } else {
                    out.writeInt(BINARY_IN_DATA_STORE);
                    try {
                        val.store(dataStore);
                    } catch (RepositoryException e) {
                        String msg = "Error while storing blob. id=" + state.getId() + " idx=" + i + " size="
                                + val.getBLOBFileValue().getLength();
                        log.error(msg, e);
                        throw new IOException(msg);
                    }
                    out.writeUTF(val.toString());
                }
                break;
            }
            // special handling required for binary value:
            // spool binary value to file in blob store
            if (size < 0) {
                log.warn("Blob has negative size. Potential loss of data. " + "id={} idx={}", state.getId(),
                        String.valueOf(i));
                out.writeInt(0);
                values[i] = InternalValue.create(new byte[0]);
                blobVal.discard();
            } else if (size > minBlobSize) {
                out.writeInt(BINARY_IN_BLOB_STORE);
                String blobId = state.getBlobId(i);
                if (blobId == null) {
                    try {
                        InputStream in = blobVal.getStream();
                        try {
                            blobId = blobStore.createId(state.getId(), i);
                            blobStore.put(blobId, in, size);
                            state.setBlobId(blobId, i);
                        } finally {
                            IOUtils.closeQuietly(in);
                        }
                    } catch (Exception e) {
                        String msg = "Error while storing blob. id=" + state.getId() + " idx=" + i + " size="
                                + size;
                        log.error(msg, e);
                        throw new IOException(msg);
                    }
                    try {
                        // replace value instance with value
                        // backed by resource in blob store and delete temp file
                        if (blobStore instanceof ResourceBasedBLOBStore) {
                            values[i] = InternalValue
                                    .create(((ResourceBasedBLOBStore) blobStore).getResource(blobId));
                        } else {
                            values[i] = InternalValue.create(blobStore.get(blobId));
                        }
                    } catch (Exception e) {
                        log.error("Error while reloading blob. truncating. id=" + state.getId() + " idx=" + i
                                + " size=" + size, e);
                        values[i] = InternalValue.create(new byte[0]);
                    }
                    blobVal.discard();
                }
                // store id of blob as property value
                out.writeUTF(blobId); // value
            } else {
                // delete evt. blob
                byte[] data = writeSmallBinary(out, blobVal, state, i);
                // replace value instance with value
                // backed by resource in blob store and delete temp file
                values[i] = InternalValue.create(data);
                blobVal.discard();
            }
            break;
        case PropertyType.DOUBLE:
            out.writeDouble(val.getDouble());
            break;
        case PropertyType.LONG:
            out.writeLong(val.getLong());
            break;
        case PropertyType.BOOLEAN:
            out.writeBoolean(val.getBoolean());
            break;
        case PropertyType.NAME:
            writeQName(out, val.getQName());
            break;
        case PropertyType.REFERENCE:
            writeUUID(out, val.getUUID());
            break;
        default:
            // because writeUTF(String) has a size limit of 64k,
            // we're using write(byte[]) instead
            byte[] bytes = val.toString().getBytes("UTF-8");
            out.writeInt(bytes.length); // length of byte[]
            out.write(bytes); // byte[]
        }
    }
}

From source file:org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding.java

/**
 * Serializes a <code>PropertyState</code> to the data output stream
 *
 * @param out the output stream//  w  w w.ja  va2s.  c o m
 * @param state the property entry to store
 * @throws IOException if an I/O error occurs.
 */
public void writeState(DataOutputStream out, NodePropBundle.PropertyEntry state) throws IOException {
    // type & mod count
    out.writeInt(state.getType() | (state.getModCount() << 16));
    // multiValued
    out.writeBoolean(state.isMultiValued());
    // definitionId
    out.writeUTF(state.getPropDefId().toString());
    // values
    InternalValue[] values = state.getValues();
    out.writeInt(values.length); // count
    for (int i = 0; i < values.length; i++) {
        InternalValue val = values[i];
        switch (state.getType()) {
        case PropertyType.BINARY:
            try {
                long size = val.getLength();
                if (dataStore != null) {
                    int maxMemorySize = dataStore.getMinRecordLength() - 1;
                    if (size < maxMemorySize) {
                        writeSmallBinary(out, val, state, i);
                    } else {
                        out.writeInt(BINARY_IN_DATA_STORE);
                        val.store(dataStore);
                        out.writeUTF(val.toString());
                    }
                    break;
                }
                // special handling required for binary value:
                // spool binary value to file in blob store
                if (size < 0) {
                    log.warn("Blob has negative size. Potential loss of data. " + "id={} idx={}", state.getId(),
                            String.valueOf(i));
                    out.writeInt(0);
                    values[i] = InternalValue.create(new byte[0]);
                    val.discard();
                } else if (size > minBlobSize) {
                    out.writeInt(BINARY_IN_BLOB_STORE);
                    String blobId = state.getBlobId(i);
                    if (blobId == null) {
                        try {
                            InputStream in = val.getStream();
                            try {
                                blobId = blobStore.createId(state.getId(), i);
                                blobStore.put(blobId, in, size);
                                state.setBlobId(blobId, i);
                            } finally {
                                IOUtils.closeQuietly(in);
                            }
                        } catch (Exception e) {
                            String msg = "Error while storing blob. id=" + state.getId() + " idx=" + i
                                    + " size=" + size;
                            log.error(msg, e);
                            throw new IOException(msg);
                        }
                        try {
                            // replace value instance with value
                            // backed by resource in blob store and delete temp file
                            if (blobStore instanceof ResourceBasedBLOBStore) {
                                values[i] = InternalValue
                                        .create(((ResourceBasedBLOBStore) blobStore).getResource(blobId));
                            } else {
                                values[i] = InternalValue.create(blobStore.get(blobId));
                            }
                        } catch (Exception e) {
                            log.error("Error while reloading blob. truncating. id=" + state.getId() + " idx="
                                    + i + " size=" + size, e);
                            values[i] = InternalValue.create(new byte[0]);
                        }
                        val.discard();
                    }
                    // store id of blob as property value
                    out.writeUTF(blobId); // value
                } else {
                    // delete evt. blob
                    byte[] data = writeSmallBinary(out, val, state, i);
                    // replace value instance with value
                    // backed by resource in blob store and delete temp file
                    values[i] = InternalValue.create(data);
                    val.discard();
                }
            } catch (RepositoryException e) {
                String msg = "Error while storing blob. id=" + state.getId() + " idx=" + i + " value=" + val;
                log.error(msg, e);
                throw new IOException(msg);
            }
            break;
        case PropertyType.DOUBLE:
            try {
                out.writeDouble(val.getDouble());
            } catch (RepositoryException e) {
                // should never occur
                throw new IOException("Unexpected error while writing DOUBLE value.");
            }
            break;
        case PropertyType.DECIMAL:
            try {
                writeDecimal(out, val.getDecimal());
            } catch (RepositoryException e) {
                // should never occur
                throw new IOException("Unexpected error while writing DECIMAL value.");
            }
            break;
        case PropertyType.LONG:
            try {
                out.writeLong(val.getLong());
            } catch (RepositoryException e) {
                // should never occur
                throw new IOException("Unexpected error while writing LONG value.");
            }
            break;
        case PropertyType.BOOLEAN:
            try {
                out.writeBoolean(val.getBoolean());
            } catch (RepositoryException e) {
                // should never occur
                throw new IOException("Unexpected error while writing BOOLEAN value.");
            }
            break;
        case PropertyType.NAME:
            try {
                writeQName(out, val.getName());
            } catch (RepositoryException e) {
                // should never occur
                throw new IOException("Unexpected error while writing NAME value.");
            }
            break;
        case PropertyType.WEAKREFERENCE:
        case PropertyType.REFERENCE:
            writeID(out, val.getNodeId());
            break;
        default:
            // because writeUTF(String) has a size limit of 64k,
            // we're using write(byte[]) instead
            byte[] bytes = val.toString().getBytes("UTF-8");
            out.writeInt(bytes.length); // length of byte[]
            out.write(bytes); // byte[]
        }
    }
}

From source file:org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding.java

/**
 * Serializes a <code>PropertyState</code> to the data output stream
 *
 * @param out the output stream//from   www.  java2  s .co m
 * @param state the property entry to store
 * @throws IOException if an I/O error occurs.
 */
public void writeState(DataOutputStream out, NodePropBundle.PropertyEntry state) throws IOException {
    // type & mod count
    out.writeInt(state.getType() | (state.getModCount() << 16));
    // multiValued
    out.writeBoolean(state.isMultiValued());
    // definitionId
    out.writeUTF(state.getPropDefId().toString());
    // values
    InternalValue[] values = state.getValues();
    out.writeInt(values.length); // count
    for (int i = 0; i < values.length; i++) {
        InternalValue val = values[i];
        switch (state.getType()) {
        case PropertyType.BINARY:
            try {
                long size = val.getLength();
                if (dataStore != null) {
                    int maxMemorySize = dataStore.getMinRecordLength() - 1;
                    if (size < maxMemorySize) {
                        writeSmallBinary(out, val, state, i);
                    } else {
                        out.writeInt(BINARY_IN_DATA_STORE);
                        val.store(dataStore);
                        out.writeUTF(val.toString());
                    }
                    break;
                }
                // special handling required for binary value:
                // spool binary value to file in blob store
                if (size < 0) {
                    log.warn("Blob has negative size. Potential loss of data. " + "id={} idx={}", state.getId(),
                            String.valueOf(i));
                    out.writeInt(0);
                    values[i] = InternalValue.create(new byte[0]);
                    val.discard();
                } else if (size > minBlobSize) {
                    out.writeInt(BINARY_IN_BLOB_STORE);
                    String blobId = state.getBlobId(i);
                    if (blobId == null) {
                        try {
                            InputStream in = val.getStream();
                            try {
                                blobId = blobStore.createId(state.getId(), i);
                                blobStore.put(blobId, in, size);
                                state.setBlobId(blobId, i);
                            } finally {
                                IOUtils.closeQuietly(in);
                            }
                        } catch (Exception e) {
                            String msg = "Error while storing blob. id=" + state.getId() + " idx=" + i
                                    + " size=" + size;
                            log.error(msg, e);
                            throw new IOException(msg);
                        }
                        try {
                            // replace value instance with value
                            // backed by resource in blob store and delete temp file
                            if (blobStore instanceof ResourceBasedBLOBStore) {
                                values[i] = InternalValue
                                        .create(((ResourceBasedBLOBStore) blobStore).getResource(blobId));
                            } else {
                                values[i] = InternalValue.create(blobStore.get(blobId));
                            }
                        } catch (Exception e) {
                            log.error("Error while reloading blob. truncating. id=" + state.getId() + " idx="
                                    + i + " size=" + size, e);
                            values[i] = InternalValue.create(new byte[0]);
                        }
                        val.discard();
                    }
                    // store id of blob as property value
                    out.writeUTF(blobId); // value
                } else {
                    // delete evt. blob
                    byte[] data = writeSmallBinary(out, val, state, i);
                    // replace value instance with value
                    // backed by resource in blob store and delete temp file
                    values[i] = InternalValue.create(data);
                    val.discard();
                }
            } catch (RepositoryException e) {
                String msg = "Error while storing blob. id=" + state.getId() + " idx=" + i + " value=" + val;
                log.error(msg, e);
                throw new IOException(msg);
            }
            break;
        case PropertyType.DOUBLE:
            try {
                out.writeDouble(val.getDouble());
            } catch (RepositoryException e) {
                // should never occur
                throw new IOException("Unexpected error while writing DOUBLE value.");
            }
            break;
        case PropertyType.DECIMAL:
            try {
                writeDecimal(out, val.getDecimal());
            } catch (RepositoryException e) {
                // should never occur
                throw new IOException("Unexpected error while writing DECIMAL value.");
            }
            break;
        case PropertyType.LONG:
            try {
                out.writeLong(val.getLong());
            } catch (RepositoryException e) {
                // should never occur
                throw new IOException("Unexpected error while writing LONG value.");
            }
            break;
        case PropertyType.BOOLEAN:
            try {
                out.writeBoolean(val.getBoolean());
            } catch (RepositoryException e) {
                // should never occur
                throw new IOException("Unexpected error while writing BOOLEAN value.");
            }
            break;
        case PropertyType.NAME:
            try {
                writeQName(out, val.getName());
            } catch (RepositoryException e) {
                // should never occur
                throw new IOException("Unexpected error while writing NAME value.");
            }
            break;
        case PropertyType.WEAKREFERENCE:
        case PropertyType.REFERENCE:
            writeUUID(out, val.getUUID());
            break;
        default:
            // because writeUTF(String) has a size limit of 64k,
            // we're using write(byte[]) instead
            byte[] bytes = val.toString().getBytes("UTF-8");
            out.writeInt(bytes.length); // length of byte[]
            out.write(bytes); // byte[]
        }
    }
}

From source file:com.codename1.impl.android.AndroidImplementation.java

public static void appendNotification(String type, String body, String image, String category, Context a) {
    try {/*from ww  w .j a va 2 s  .com*/
        String[] fileList = a.fileList();
        byte[] data = null;
        for (int iter = 0; iter < fileList.length; iter++) {
            if (fileList[iter].equals("CN1$AndroidPendingNotifications")) {
                InputStream is = a.openFileInput("CN1$AndroidPendingNotifications");
                if (is != null) {
                    data = readInputStream(is);
                    sCleanup(a);
                    break;
                }
            }
        }
        DataOutputStream os = new DataOutputStream(a.openFileOutput("CN1$AndroidPendingNotifications", 0));
        if (data != null) {
            data[0]++;
            os.write(data);
        } else {
            os.writeByte(1);
        }
        String bodyType = type;
        if (image != null || category != null) {
            type = "99";
        }
        if (type != null) {
            os.writeBoolean(true);
            os.writeUTF(type);
        } else {
            os.writeBoolean(false);
        }
        if ("99".equals(type)) {
            String msg = "body=" + java.net.URLEncoder.encode(body, "UTF-8") + "&type="
                    + java.net.URLEncoder.encode(bodyType, "UTF-8");
            if (category != null) {
                msg += "&category=" + java.net.URLEncoder.encode(category, "UTF-8");
            }
            if (image != null) {
                image += "&image=" + java.net.URLEncoder.encode(image, "UTF-8");
            }
            os.writeUTF(msg);

        } else {
            os.writeUTF(body);
        }
        os.writeLong(System.currentTimeMillis());
    } catch (IOException err) {
        err.printStackTrace();
    }
}