List of usage examples for java.io DataOutputStream writeLong
public final void writeLong(long v) throws IOException
long
to the underlying output stream as eight bytes, high byte first. From source file:com.codefollower.lealone.omid.tso.TSOHandler.java
/** * Handle the CommitRequest message// w w w .jav a 2s . c o m */ private void handle(CommitRequest msg, ChannelHandlerContext ctx) { CommitResponse reply = new CommitResponse(msg.startTimestamp); DataOutputStream toWAL = sharedState.toWAL; synchronized (sharedState) { // 0. check if it should abort if (msg.startTimestamp < timestampOracle.first()) { reply.committed = false; LOG.warn("Aborting transaction after restarting TSO"); } else if (msg.rows.length > 0 && msg.startTimestamp < sharedState.largestDeletedTimestamp) { // Too old and not read only reply.committed = false;// set as abort LOG.warn("Too old startTimestamp: ST " + msg.startTimestamp + " MAX " + sharedState.largestDeletedTimestamp); } else { // 1. check the write-write conflicts for (RowKey r : msg.rows) { long value = sharedState.hashmap.getLatestWriteForRow(r.hashCode()); if (value != 0 && value > msg.startTimestamp) { reply.committed = false;// set as abort break; } } } if (reply.committed) { // 2. commit try { long commitTimestamp = timestampOracle.next(toWAL); sharedState.uncommited.commit(commitTimestamp); sharedState.uncommited.commit(msg.startTimestamp); reply.commitTimestamp = commitTimestamp; if (msg.rows.length > 0) { if (LOG.isTraceEnabled()) { LOG.trace("Adding commit to WAL"); } toWAL.writeByte(LoggerProtocol.COMMIT); toWAL.writeLong(msg.startTimestamp); toWAL.writeLong(commitTimestamp); long oldLargestDeletedTimestamp = sharedState.largestDeletedTimestamp; for (RowKey r : msg.rows) { sharedState.hashmap.putLatestWriteForRow(r.hashCode(), commitTimestamp); } sharedState.largestDeletedTimestamp = sharedState.hashmap.getLargestDeletedTimestamp(); sharedState.processCommit(msg.startTimestamp, commitTimestamp); if (sharedState.largestDeletedTimestamp > oldLargestDeletedTimestamp) { toWAL.writeByte(LoggerProtocol.LARGEST_DELETED_TIMESTAMP); toWAL.writeLong(sharedState.largestDeletedTimestamp); Set<Long> toAbort = sharedState.uncommited .raiseLargestDeletedTransaction(sharedState.largestDeletedTimestamp); if (LOG.isWarnEnabled() && !toAbort.isEmpty()) { LOG.warn("Slow transactions after raising max: " + toAbort.size()); } synchronized (sharedMsgBufLock) { for (Long id : toAbort) { sharedState.hashmap.setHalfAborted(id); queueHalfAbort(id); } queueLargestDeletedTimestamp(sharedState.largestDeletedTimestamp); } } if (sharedState.largestDeletedTimestamp > sharedState.previousLargestDeletedTimestamp + TSOState.MAX_ITEMS) { // schedule snapshot executor.submit(createAbortedSnaphostTask); sharedState.previousLargestDeletedTimestamp = sharedState.largestDeletedTimestamp; } synchronized (sharedMsgBufLock) { queueCommit(msg.startTimestamp, commitTimestamp); } } } catch (IOException e) { LOG.error("failed to handle CommitRequest", e); } } else { // add it to the aborted list handleHalfAbort(msg.startTimestamp); } commitCounter.incrementAndGet(); nextBatch.add(new ChannelAndMessage(ctx, reply)); if (sharedState.baos.size() >= batchSize) { if (LOG.isTraceEnabled()) { LOG.trace("Going to add record of size " + sharedState.baos.size()); } addRecord(); } } }
From source file:com.epam.catgenome.manager.FileManager.java
private void fillSimpleIndexFile(final BlockCompressedDataInputStream stream, final DataOutputStream indexStream) throws IOException { do {/* w ww . j av a 2s . c o m*/ final long seekPos = stream.available(); final long filePosition = stream.getFilePointer(); indexStream.writeLong(filePosition); indexStream.writeLong(seekPos); stream.seek(filePosition + seekPos - 1); //it's need to get next block stream.read(); } while (stream.available() != 0); }
From source file:mp.teardrop.PlaybackService.java
/** * Save the service state to disk.//from w w w . j a v a2 s. c om * * @param pendingSeek The pendingSeek to store. Should be the current * MediaPlayer position or 0. */ public void saveState(int pendingSeek) { try { DataOutputStream out = new DataOutputStream(openFileOutput(STATE_FILE, 0)); Song song = mCurrentSong; out.writeLong(STATE_FILE_MAGIC); out.writeInt(STATE_VERSION); out.writeInt(pendingSeek); out.writeLong(song == null ? -1 : song.id); mTimeline.writeState(getSharedPreferences(PREFS_SAVED_SONGS, 0)); out.close(); } catch (IOException e) { Log.w("OrchidMP", "Failed to save state", e); } catch (JSONException e) { Log.w("OrchidMP", "Failed to save state", e); } }
From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java
/** * Get block checksum (MD5 of CRC32)./*from w w w . ja v a2s. c o m*/ * @param in */ void getBlockChecksum(DataInputStream in) throws IOException { final Block block = new Block(in.readLong(), 0, in.readLong()); Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>(); accessToken.readFields(in); DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); if (datanode.isBlockTokenEnabled) { try { datanode.blockTokenSecretManager.checkAccess(accessToken, null, block, BlockTokenSecretManager.AccessMode.READ); } catch (InvalidToken e) { try { out.writeShort(DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN); out.flush(); throw new IOException("Access token verification failed, for client " + remoteAddress + " for OP_BLOCK_CHECKSUM for block " + block); } finally { IOUtils.closeStream(out); } } } final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block); final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(metadataIn, BUFFER_SIZE)); try { //read metadata file final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); final DataChecksum checksum = header.getChecksum(); final int bytesPerCRC = checksum.getBytesPerChecksum(); final long crcPerBlock = (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / checksum.getChecksumSize(); //compute block checksum final MD5Hash md5 = MD5Hash.digest(checksumIn); if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5); } //write reply out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); out.writeInt(bytesPerCRC); out.writeLong(crcPerBlock); md5.write(out); out.flush(); } finally { IOUtils.closeStream(out); IOUtils.closeStream(checksumIn); IOUtils.closeStream(metadataIn); } }
From source file:se.llbit.chunky.renderer.scene.Scene.java
private synchronized void saveDump(RenderContext context, ProgressListener progressListener) { String fileName = name + ".dump"; DataOutputStream out = null; try {//from w w w . j av a 2 s. co m String task = "Saving render dump"; progressListener.setProgress(task, 1, 0, 2); Log.info("Saving render dump " + fileName); out = new DataOutputStream(new GZIPOutputStream(context.getSceneFileOutputStream(fileName))); out.writeInt(width); out.writeInt(height); out.writeInt(spp); out.writeLong(renderTime); for (int x = 0; x < width; ++x) { progressListener.setProgress(task, x + 1, 0, width); for (int y = 0; y < height; ++y) { out.writeDouble(samples[(y * width + x) * 3 + 0]); out.writeDouble(samples[(y * width + x) * 3 + 1]); out.writeDouble(samples[(y * width + x) * 3 + 2]); } } Log.info("Render dump saved"); } catch (IOException e) { Log.warn("IO exception while saving render dump!", e); } finally { if (out != null) { try { out.close(); } catch (IOException e) { } } } }
From source file:org.ramadda.repository.database.DatabaseManager.java
/** * _more_/* ww w . ja va2 s. c om*/ * * @param dos _more_ * @param i _more_ * * @throws Exception _more_ */ private void writeLong(DataOutputStream dos, long i) throws Exception { dos.writeLong(i); }
From source file:com.yahoo.omid.tso.TSOHandler.java
/** * Handle the CommitRequest message//from ww w .j ava 2 s .c om */ public void handle(CommitRequest msg, ChannelHandlerContext ctx) { CommitResponse reply = new CommitResponse(msg.startTimestamp); ByteArrayOutputStream baos = sharedState.baos; DataOutputStream toWAL = sharedState.toWAL; synchronized (sharedState) { // 0. check if it should abort if (msg.startTimestamp < timestampOracle.first()) { reply.committed = false; LOG.warn("Aborting transaction after restarting TSO"); } else if (msg.rows.length > 0 && msg.startTimestamp < sharedState.largestDeletedTimestamp) { // Too old and not read only reply.committed = false;// set as abort LOG.warn("Too old starttimestamp: ST " + msg.startTimestamp + " MAX " + sharedState.largestDeletedTimestamp); } else { // 1. check the write-write conflicts for (RowKey r : msg.rows) { long value; value = sharedState.hashmap.getLatestWriteForRow(r.hashCode()); if (value != 0 && value > msg.startTimestamp) { reply.committed = false;// set as abort break; } else if (value == 0 && sharedState.largestDeletedTimestamp > msg.startTimestamp) { // then it could have been committed after start // timestamp but deleted by recycling LOG.warn("Old transaction {Start timestamp " + msg.startTimestamp + "} {Largest deleted timestamp " + sharedState.largestDeletedTimestamp + "}"); reply.committed = false;// set as abort break; } } } if (reply.committed) { // 2. commit try { long commitTimestamp = timestampOracle.next(toWAL); sharedState.uncommited.commit(commitTimestamp); sharedState.uncommited.commit(msg.startTimestamp); reply.commitTimestamp = commitTimestamp; if (msg.rows.length > 0) { if (LOG.isTraceEnabled()) { LOG.trace("Adding commit to WAL"); } toWAL.writeByte(LoggerProtocol.COMMIT); toWAL.writeLong(msg.startTimestamp); toWAL.writeLong(commitTimestamp); long oldLargestDeletedTimestamp = sharedState.largestDeletedTimestamp; for (RowKey r : msg.rows) { sharedState.hashmap.putLatestWriteForRow(r.hashCode(), commitTimestamp); } sharedState.largestDeletedTimestamp = sharedState.hashmap.getLargestDeletedTimestamp(); sharedState.processCommit(msg.startTimestamp, commitTimestamp); if (sharedState.largestDeletedTimestamp > oldLargestDeletedTimestamp) { toWAL.writeByte(LoggerProtocol.LARGESTDELETEDTIMESTAMP); toWAL.writeLong(sharedState.largestDeletedTimestamp); Set<Long> toAbort = sharedState.uncommited .raiseLargestDeletedTransaction(sharedState.largestDeletedTimestamp); if (LOG.isWarnEnabled() && !toAbort.isEmpty()) { LOG.warn("Slow transactions after raising max: " + toAbort.size()); } synchronized (sharedMsgBufLock) { for (Long id : toAbort) { sharedState.hashmap.setHalfAborted(id); queueHalfAbort(id); } queueLargestIncrease(sharedState.largestDeletedTimestamp); } } if (sharedState.largestDeletedTimestamp > sharedState.previousLargestDeletedTimestamp + TSOState.MAX_ITEMS) { // schedule snapshot executor.submit(createAbortedSnaphostTask); sharedState.previousLargestDeletedTimestamp = sharedState.largestDeletedTimestamp; } synchronized (sharedMsgBufLock) { queueCommit(msg.startTimestamp, commitTimestamp); } } } catch (IOException e) { e.printStackTrace(); } } else { // add it to the aborted list abortCount++; try { toWAL.writeByte(LoggerProtocol.ABORT); toWAL.writeLong(msg.startTimestamp); } catch (IOException e) { e.printStackTrace(); } sharedState.processAbort(msg.startTimestamp); synchronized (sharedMsgBufLock) { queueHalfAbort(msg.startTimestamp); } } TSOHandler.transferredBytes.incrementAndGet(); ChannelandMessage cam = new ChannelandMessage(ctx, reply); sharedState.nextBatch.add(cam); if (sharedState.baos.size() >= TSOState.BATCH_SIZE) { if (LOG.isTraceEnabled()) { LOG.trace("Going to add record of size " + sharedState.baos.size()); } // sharedState.lh.asyncAddEntry(baos.toByteArray(), this, // sharedState.nextBatch); sharedState.addRecord(baos.toByteArray(), new AddRecordCallback() { @Override public void addRecordComplete(int rc, Object ctx) { if (rc != Code.OK) { LOG.warn("Write failed: " + LoggerException.getMessage(rc)); } else { synchronized (callbackLock) { @SuppressWarnings("unchecked") ArrayList<ChannelandMessage> theBatch = (ArrayList<ChannelandMessage>) ctx; for (ChannelandMessage cam : theBatch) { Channels.write(cam.ctx, Channels.succeededFuture(cam.ctx.getChannel()), cam.msg); } } } } }, sharedState.nextBatch); sharedState.nextBatch = new ArrayList<ChannelandMessage>(sharedState.nextBatch.size() + 5); sharedState.baos.reset(); } } }
From source file:org.apache.hadoop.hdfs.AvatarClient.java
/** * Get the checksum of a file.//from w w w .ja v a 2s . c o m * @param src The file path * @return The checksum */ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, AvatarProtocol namenode, SocketFactory socketFactory, int socketTimeout) throws IOException { //get all block locations final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE) .getLocatedBlocks(); final DataOutputBuffer md5out = new DataOutputBuffer(); int bytesPerCRC = 0; long crcPerBlock = 0; //get block checksum for each block for (int i = 0; i < locatedblocks.size(); i++) { LocatedBlock lb = locatedblocks.get(i); final Block block = lb.getBlock(); final DatanodeInfo[] datanodes = lb.getLocations(); //try each datanode location of the block final int timeout = 3000 * datanodes.length + socketTimeout; boolean done = false; for (int j = 0; !done && j < datanodes.length; j++) { //connect to a datanode final Socket sock = socketFactory.createSocket(); NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout); sock.setSoTimeout(timeout); DataOutputStream out = new DataOutputStream( new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE)); DataInputStream in = new DataInputStream(NetUtils.getInputStream(sock)); // get block MD5 try { if (LOG.isDebugEnabled()) { LOG.debug("write to " + datanodes[j].getName() + ": " + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block); } out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM); out.writeLong(block.getBlockId()); out.writeLong(block.getGenerationStamp()); out.flush(); final short reply = in.readShort(); if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) { throw new IOException("Bad response " + reply + " for block " + block + " from datanode " + datanodes[j].getName()); } //read byte-per-checksum final int bpc = in.readInt(); if (i == 0) { //first block bytesPerCRC = bpc; } else if (bpc != bytesPerCRC) { throw new IOException( "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC); } //read crc-per-block final long cpb = in.readLong(); if (locatedblocks.size() > 1 && i == 0) { crcPerBlock = cpb; } //read md5 final MD5Hash md5 = MD5Hash.read(in); md5.write(md5out); done = true; if (LOG.isDebugEnabled()) { if (i == 0) { LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock); } LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5); } } catch (IOException ie) { LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie); } finally { IOUtils.closeStream(in); IOUtils.closeStream(out); IOUtils.closeSocket(sock); } } if (!done) { throw new IOException("Fail to get block MD5 for " + block); } } //compute file MD5 final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5); }
From source file:org.apache.hadoop.hdfs.server.datanode.DataWriter.java
/** * Write a block to disk./*from w w w .j a v a2 s. c om*/ * * @param in The stream to read from * @throws IOException */ private void writeBlock() throws IOException { DatanodeInfo srcDataNode = null; LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay()); // // Read in the header // long startTime = System.currentTimeMillis(); int namespaceid = in.readInt(); Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong()); LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress); int pipelineSize = in.readInt(); // num of datanodes in entire pipeline boolean isRecovery = in.readBoolean(); // is this part of recovery? String client = Text.readString(in); // working on behalf of this client boolean hasSrcDataNode = in.readBoolean(); // is src node info present if (hasSrcDataNode) { srcDataNode = new DatanodeInfo(); srcDataNode.readFields(in); } int numTargets = in.readInt(); if (numTargets < 0) { throw new IOException("Mislabelled incoming datastream."); } DatanodeInfo targets[] = new DatanodeInfo[numTargets]; for (int i = 0; i < targets.length; i++) { DatanodeInfo tmp = new DatanodeInfo(); tmp.readFields(in); targets[i] = tmp; } DataOutputStream mirrorOut = null; // stream to next target DataInputStream mirrorIn = null; // reply from next target DataOutputStream replyOut = null; // stream to prev target Socket mirrorSock = null; // socket to next target BlockReceiver blockReceiver = null; // responsible for data handling String mirrorNode = null; // the name:port of next target String firstBadLink = ""; // first datanode that failed in connection setup updateCurrentThreadName("receiving block " + block + " client=" + client); try { // open a block receiver and check if the block does not exist blockReceiver = new BlockReceiver(namespaceid, block, in, s.getRemoteSocketAddress().toString(), s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode); // get a connection back to the previous target replyOut = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, datanode.socketWriteTimeout), SMALL_BUFFER_SIZE)); // // Open network conn to backup machine, if // appropriate // if (targets.length > 0) { InetSocketAddress mirrorTarget = null; // Connect to backup machine mirrorNode = targets[0].getName(); mirrorTarget = NetUtils.createSocketAddr(mirrorNode); mirrorSock = datanode.newSocket(); try { int timeoutValue = datanode.socketTimeout + (datanode.socketReadExtentionTimeout * numTargets); int writeTimeout = datanode.socketWriteTimeout + (datanode.socketWriteExtentionTimeout * numTargets); NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue); mirrorSock.setSoTimeout(timeoutValue); mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE); mirrorOut = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE)); mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock)); // Write header: Copied from DFSClient.java! mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK); mirrorOut.writeInt(namespaceid); mirrorOut.writeLong(block.getBlockId()); mirrorOut.writeLong(block.getGenerationStamp()); mirrorOut.writeInt(pipelineSize); mirrorOut.writeBoolean(isRecovery); Text.writeString(mirrorOut, client); mirrorOut.writeBoolean(hasSrcDataNode); if (hasSrcDataNode) { // pass src node information srcDataNode.write(mirrorOut); } mirrorOut.writeInt(targets.length - 1); for (int i = 1; i < targets.length; i++) { targets[i].write(mirrorOut); } blockReceiver.writeChecksumHeader(mirrorOut); mirrorOut.flush(); // read connect ack (only for clients, not for replication req) if (client.length() != 0) { firstBadLink = Text.readString(mirrorIn); if (LOG.isDebugEnabled() || firstBadLink.length() > 0) { LOG.info("Datanode " + targets.length + " got response for connect ack " + " from downstream datanode with firstbadlink as " + firstBadLink); } } } catch (IOException e) { if (client.length() != 0) { Text.writeString(replyOut, mirrorNode); replyOut.flush(); } IOUtils.closeStream(mirrorOut); mirrorOut = null; IOUtils.closeStream(mirrorIn); mirrorIn = null; IOUtils.closeSocket(mirrorSock); mirrorSock = null; if (client.length() > 0) { throw e; } else { LOG.info(datanode.getDatanodeInfo() + ":Exception transfering block " + block + " to mirror " + mirrorNode + ". continuing without the mirror.\n" + StringUtils.stringifyException(e)); } } } // send connect ack back to source (only for clients) if (client.length() != 0) { if (LOG.isDebugEnabled() || firstBadLink.length() > 0) { LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is " + firstBadLink); } Text.writeString(replyOut, firstBadLink); replyOut.flush(); } // receive the block and mirror to the next target String mirrorAddr = (mirrorSock == null) ? null : mirrorNode; long totalReceiveSize = blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length); // if this write is for a replication request (and not // from a client), then confirm block. For client-writes, // the block is finalized in the PacketResponder. if (client.length() == 0) { datanode.notifyNamenodeReceivedBlock(namespaceid, block, null); LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes()); } else { // Log the fact that the block has been received by this datanode and // has been written to the local disk on this datanode. LOG.info("Received Block " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes() + " and written to local disk"); } if (datanode.blockScanner != null) { datanode.blockScanner.addBlock(namespaceid, block); } long writeDuration = System.currentTimeMillis() - startTime; datanode.myMetrics.bytesWrittenLatency.inc(writeDuration); if (totalReceiveSize > KB_RIGHT_SHIFT_MIN) { datanode.myMetrics.bytesWrittenRate.inc((int) (totalReceiveSize >> KB_RIGHT_SHIFT_BITS), writeDuration); } } catch (IOException ioe) { LOG.info("writeBlock " + block + " received exception " + ioe); throw ioe; } finally { // close all opened streams IOUtils.closeStream(mirrorOut); IOUtils.closeStream(mirrorIn); IOUtils.closeStream(replyOut); IOUtils.closeSocket(mirrorSock); IOUtils.closeStream(blockReceiver); } }
From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java
/** * Write a block to disk./*from www . ja v a 2s . com*/ * * @param in The stream to read from * @throws IOException */ private void writeBlock(DataInputStream in) throws IOException { DatanodeInfo srcDataNode = null; LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay()); // // Read in the header // Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong()); LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress); int pipelineSize = in.readInt(); // num of datanodes in entire pipeline boolean isRecovery = in.readBoolean(); // is this part of recovery? String client = Text.readString(in); // working on behalf of this client boolean hasSrcDataNode = in.readBoolean(); // is src node info present if (hasSrcDataNode) { srcDataNode = new DatanodeInfo(); srcDataNode.readFields(in); } int numTargets = in.readInt(); if (numTargets < 0) { throw new IOException("Mislabelled incoming datastream."); } DatanodeInfo targets[] = new DatanodeInfo[numTargets]; for (int i = 0; i < targets.length; i++) { DatanodeInfo tmp = new DatanodeInfo(); tmp.readFields(in); targets[i] = tmp; } Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>(); accessToken.readFields(in); DataOutputStream replyOut = null; // stream to prev target replyOut = new DataOutputStream(NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); if (datanode.isBlockTokenEnabled) { try { datanode.blockTokenSecretManager.checkAccess(accessToken, null, block, BlockTokenSecretManager.AccessMode.WRITE); } catch (InvalidToken e) { try { if (client.length() != 0) { replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN); Text.writeString(replyOut, datanode.dnRegistration.getName()); replyOut.flush(); } throw new IOException("Access token verification failed, for client " + remoteAddress + " for OP_WRITE_BLOCK for block " + block); } finally { IOUtils.closeStream(replyOut); } } } DataOutputStream mirrorOut = null; // stream to next target DataInputStream mirrorIn = null; // reply from next target Socket mirrorSock = null; // socket to next target BlockReceiver blockReceiver = null; // responsible for data handling String mirrorNode = null; // the name:port of next target String firstBadLink = ""; // first datanode that failed in connection setup short mirrorInStatus = (short) DataTransferProtocol.OP_STATUS_SUCCESS; try { // open a block receiver and check if the block does not exist blockReceiver = new BlockReceiver(block, in, s.getRemoteSocketAddress().toString(), s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode); // // Open network conn to backup machine, if // appropriate // if (targets.length > 0) { InetSocketAddress mirrorTarget = null; // Connect to backup machine mirrorNode = targets[0].getName(); mirrorTarget = NetUtils.createSocketAddr(mirrorNode); mirrorSock = datanode.newSocket(); try { int timeoutValue = datanode.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * numTargets); int writeTimeout = datanode.socketWriteTimeout + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets); NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue); mirrorSock.setSoTimeout(timeoutValue); mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE); mirrorOut = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE)); mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock)); // Write header: Copied from DFSClient.java! mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK); mirrorOut.writeLong(block.getBlockId()); mirrorOut.writeLong(block.getGenerationStamp()); mirrorOut.writeInt(pipelineSize); mirrorOut.writeBoolean(isRecovery); Text.writeString(mirrorOut, client); mirrorOut.writeBoolean(hasSrcDataNode); if (hasSrcDataNode) { // pass src node information srcDataNode.write(mirrorOut); } mirrorOut.writeInt(targets.length - 1); for (int i = 1; i < targets.length; i++) { targets[i].write(mirrorOut); } accessToken.write(mirrorOut); blockReceiver.writeChecksumHeader(mirrorOut); mirrorOut.flush(); // read connect ack (only for clients, not for replication req) if (client.length() != 0) { mirrorInStatus = mirrorIn.readShort(); firstBadLink = Text.readString(mirrorIn); if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) { LOG.info("Datanode " + targets.length + " got response for connect ack " + " from downstream datanode with firstbadlink as " + firstBadLink); } } } catch (IOException e) { if (client.length() != 0) { replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR); Text.writeString(replyOut, mirrorNode); replyOut.flush(); } IOUtils.closeStream(mirrorOut); mirrorOut = null; IOUtils.closeStream(mirrorIn); mirrorIn = null; IOUtils.closeSocket(mirrorSock); mirrorSock = null; if (client.length() > 0) { throw e; } else { LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror " + mirrorNode + ". continuing without the mirror.\n" + StringUtils.stringifyException(e)); } } } // send connect ack back to source (only for clients) if (client.length() != 0) { if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) { LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is " + firstBadLink); } replyOut.writeShort(mirrorInStatus); Text.writeString(replyOut, firstBadLink); replyOut.flush(); } // receive the block and mirror to the next target String mirrorAddr = (mirrorSock == null) ? null : mirrorNode; blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length); // if this write is for a replication request (and not // from a client), then confirm block. For client-writes, // the block is finalized in the PacketResponder. if (client.length() == 0) { datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT); LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes()); } if (datanode.blockScanner != null) { datanode.blockScanner.addBlock(block); } } catch (IOException ioe) { LOG.info("writeBlock " + block + " received exception " + ioe); throw ioe; } finally { // close all opened streams IOUtils.closeStream(mirrorOut); IOUtils.closeStream(mirrorIn); IOUtils.closeStream(replyOut); IOUtils.closeSocket(mirrorSock); IOUtils.closeStream(blockReceiver); } }