List of usage examples for java.io DataOutputStream writeLong
public final void writeLong(long v) throws IOException
long
to the underlying output stream as eight bytes, high byte first. From source file:org.apache.jmeter.protocol.mqtt.client.MqttPublisher.java
public byte[] createPayload(String message, String useTimeStamp, String useNumSeq, String type_value, String format, String charset) throws IOException, NumberFormatException { ByteArrayOutputStream b = new ByteArrayOutputStream(); DataOutputStream d = new DataOutputStream(b); // flags byte flags = 0x00; if ("TRUE".equals(useTimeStamp)) flags |= 0x80;/* w ww . jav a 2 s . c om*/ if ("TRUE".equals(useNumSeq)) flags |= 0x40; if (MQTTPublisherGui.INT.equals(type_value)) flags |= 0x20; if (MQTTPublisherGui.LONG.equals(type_value)) flags |= 0x10; if (MQTTPublisherGui.FLOAT.equals(type_value)) flags |= 0x08; if (MQTTPublisherGui.DOUBLE.equals(type_value)) flags |= 0x04; if (MQTTPublisherGui.STRING.equals(type_value)) flags |= 0x02; if (!"TEXT".equals(type_value)) { d.writeByte(flags); } // TimeStamp if ("TRUE".equals(useTimeStamp)) { Date date = new java.util.Date(); d.writeLong(date.getTime()); } // Number Sequence if ("TRUE".equals(useNumSeq)) { d.writeInt(numSeq++); } // Value if (MQTTPublisherGui.INT.equals(type_value)) { d.writeInt(Integer.parseInt(message)); } else if (MQTTPublisherGui.LONG.equals(type_value)) { d.writeLong(Long.parseLong(message)); } else if (MQTTPublisherGui.DOUBLE.equals(type_value)) { d.writeDouble(Double.parseDouble(message)); } else if (MQTTPublisherGui.FLOAT.equals(type_value)) { d.writeDouble(Float.parseFloat(message)); } else if (MQTTPublisherGui.STRING.equals(type_value)) { d.write(message.getBytes()); } else if ("TEXT".equals(type_value)) { d.write(message.getBytes()); } // Format: Encoding if (MQTTPublisherGui.BINARY.equals(format)) { BinaryCodec encoder = new BinaryCodec(); return encoder.encode(b.toByteArray()); } else if (MQTTPublisherGui.BASE64.equals(format)) { return Base64.encodeBase64(b.toByteArray()); } else if (MQTTPublisherGui.BINHEX.equals(format)) { Hex encoder = new Hex(); return encoder.encode(b.toByteArray()); } else if (MQTTPublisherGui.PLAIN_TEXT.equals(format)) { String s = new String(b.toByteArray(), charset); return s.getBytes(); } else return b.toByteArray(); }
From source file:org.apache.jmeter.protocol.mqtt.client.MqttPublisher.java
public byte[] createRandomPayload(String Seed, String min, String max, String type_random, String useTimeStamp, String useNumSeq, String type_value, String format, String charset) throws IOException, NumberFormatException { ByteArrayOutputStream b = new ByteArrayOutputStream(); DataOutputStream d = new DataOutputStream(b); // flags byte flags = 0x00; if ("TRUE".equals(useTimeStamp)) flags |= 0x80;//from w w w . j a v a 2 s.c om if ("TRUE".equals(useNumSeq)) flags |= 0x40; if (MQTTPublisherGui.INT.equals(type_value)) flags |= 0x20; if (MQTTPublisherGui.LONG.equals(type_value)) flags |= 0x10; if (MQTTPublisherGui.FLOAT.equals(type_value)) flags |= 0x08; if (MQTTPublisherGui.DOUBLE.equals(type_value)) flags |= 0x04; if (MQTTPublisherGui.STRING.equals(type_value)) flags |= 0x02; if (!"TEXT".equals(type_value)) { d.writeByte(flags); } // TimeStamp if ("TRUE".equals(useTimeStamp)) { Date date = new java.util.Date(); d.writeLong(date.getTime()); } // Number Sequence if ("TRUE".equals(useNumSeq)) { d.writeInt(numSeq++); } // Value if (MQTTPublisherGui.PSEUDO.equals(type_random)) { generator.setSeed(Long.parseLong(Seed)); if (MQTTPublisherGui.INT.equals(type_value)) { d.writeInt( generator.nextInt(Integer.parseInt(max) - Integer.parseInt(min)) + Integer.parseInt(min)); } else if (MQTTPublisherGui.LONG.equals(type_value)) { long Max = Long.parseLong(max); long Min = Long.parseLong(min); d.writeLong((Math.abs(generator.nextLong() % (Max - Min)) + Min)); } else if (MQTTPublisherGui.DOUBLE.equals(type_value)) { double Max = Double.parseDouble(max); double Min = Double.parseDouble(min); d.writeDouble((Min + (Max - Min) * generator.nextDouble())); } else if (MQTTPublisherGui.FLOAT.equals(type_value)) { float Max = Float.parseFloat(max); float Min = Float.parseFloat(min); d.writeFloat((Min + (Max - Min) * generator.nextFloat())); } } else if (MQTTPublisherGui.SECURE.equals(type_random)) { secureGenerator.setSeed(Long.parseLong(Seed)); if (MQTTPublisherGui.INT.equals(type_value)) { d.writeInt(secureGenerator.nextInt(Integer.parseInt(max) - Integer.parseInt(min)) + Integer.parseInt(min)); } else if (MQTTPublisherGui.LONG.equals(type_value)) { long Max = Long.parseLong(max); long Min = Long.parseLong(min); d.writeLong((Math.abs(secureGenerator.nextLong() % (Max - Min)) + Min)); } else if (MQTTPublisherGui.DOUBLE.equals(type_value)) { double Max = Double.parseDouble(max); double Min = Double.parseDouble(min); d.writeDouble((Min + (Max - Min) * secureGenerator.nextDouble())); } else if (MQTTPublisherGui.FLOAT.equals(type_value)) { float Max = Float.parseFloat(max); float Min = Float.parseFloat(min); d.writeFloat((Min + (Max - Min) * secureGenerator.nextFloat())); } } // Format: Encoding if (MQTTPublisherGui.BINARY.equals(format)) { BinaryCodec encoder = new BinaryCodec(); return encoder.encode(b.toByteArray()); } else if (MQTTPublisherGui.BASE64.equals(format)) { return Base64.encodeBase64(b.toByteArray()); } else if (MQTTPublisherGui.BINHEX.equals(format)) { Hex encoder = new Hex(); return encoder.encode(b.toByteArray()); } else if (MQTTPublisherGui.PLAIN_TEXT.equals(format)) { String s = new String(b.toByteArray(), charset); return s.getBytes(); } else return b.toByteArray(); }
From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java
/** * Receive a block and write it to disk, it then notifies the namenode to * remove the copy from the source.//from ww w . j a v a2 s .c o m * * @param in The stream to read from * @throws IOException */ private void replaceBlock(DataInputStream in) throws IOException { /* read header */ long blockId = in.readLong(); Block block = new Block(blockId, dataXceiverServer.estimateBlockSize, in.readLong()); // block id & generation stamp String sourceID = Text.readString(in); // read del hint DatanodeInfo proxySource = new DatanodeInfo(); // read proxy source proxySource.readFields(in); Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>(); accessToken.readFields(in); if (datanode.isBlockTokenEnabled) { try { datanode.blockTokenSecretManager.checkAccess(accessToken, null, block, BlockTokenSecretManager.AccessMode.REPLACE); } catch (InvalidToken e) { LOG.warn("Invalid access token in request from " + remoteAddress + " for OP_REPLACE_BLOCK for block " + block); sendResponse(s, (short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN, datanode.socketWriteTimeout); return; } } if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start LOG.warn("Not able to receive block " + blockId + " from " + s.getRemoteSocketAddress() + " because threads quota is exceeded."); sendResponse(s, (short) DataTransferProtocol.OP_STATUS_ERROR, datanode.socketWriteTimeout); return; } Socket proxySock = null; DataOutputStream proxyOut = null; short opStatus = DataTransferProtocol.OP_STATUS_SUCCESS; BlockReceiver blockReceiver = null; DataInputStream proxyReply = null; try { // get the output stream to the proxy InetSocketAddress proxyAddr = NetUtils.createSocketAddr(proxySource.getName()); proxySock = datanode.newSocket(); NetUtils.connect(proxySock, proxyAddr, datanode.socketTimeout); proxySock.setSoTimeout(datanode.socketTimeout); OutputStream baseStream = NetUtils.getOutputStream(proxySock, datanode.socketWriteTimeout); proxyOut = new DataOutputStream(new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE)); /* send request to the proxy */ proxyOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); // transfer version proxyOut.writeByte(DataTransferProtocol.OP_COPY_BLOCK); // op code proxyOut.writeLong(block.getBlockId()); // block id proxyOut.writeLong(block.getGenerationStamp()); // block id accessToken.write(proxyOut); proxyOut.flush(); // receive the response from the proxy proxyReply = new DataInputStream( new BufferedInputStream(NetUtils.getInputStream(proxySock), BUFFER_SIZE)); short status = proxyReply.readShort(); if (status != DataTransferProtocol.OP_STATUS_SUCCESS) { if (status == DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN) { throw new IOException("Copy block " + block + " from " + proxySock.getRemoteSocketAddress() + " failed due to access token error"); } throw new IOException( "Copy block " + block + " from " + proxySock.getRemoteSocketAddress() + " failed"); } // open a block receiver and check if the block does not exist blockReceiver = new BlockReceiver(block, proxyReply, proxySock.getRemoteSocketAddress().toString(), proxySock.getLocalSocketAddress().toString(), false, "", null, datanode); // receive a block blockReceiver.receiveBlock(null, null, null, null, dataXceiverServer.balanceThrottler, -1); // notify name node datanode.notifyNamenodeReceivedBlock(block, sourceID); LOG.info("Moved block " + block + " from " + s.getRemoteSocketAddress()); } catch (IOException ioe) { opStatus = DataTransferProtocol.OP_STATUS_ERROR; throw ioe; } finally { // receive the last byte that indicates the proxy released its thread resource if (opStatus == DataTransferProtocol.OP_STATUS_SUCCESS) { try { proxyReply.readChar(); } catch (IOException ignored) { } } // now release the thread resource dataXceiverServer.balanceThrottler.release(); // send response back try { sendResponse(s, opStatus, datanode.socketWriteTimeout); } catch (IOException ioe) { LOG.warn("Error writing reply back to " + s.getRemoteSocketAddress()); } IOUtils.closeStream(proxyOut); IOUtils.closeStream(blockReceiver); IOUtils.closeStream(proxyReply); } }
From source file:com.jivesoftware.os.amza.service.replication.http.HttpRowsTaker.java
private void flushQueues(RingHost ringHost, Ackable ackable, long currentVersion) throws Exception { Map<VersionedPartitionName, RowsTakenPayload> rowsTaken; PongPayload pong;/* w ww .j a va 2 s .co m*/ ackable.semaphore.acquire(Short.MAX_VALUE); try { rowsTaken = ackable.rowsTakenPayloads.getAndSet(Maps.newConcurrentMap()); pong = ackable.pongPayloads.getAndSet(null); } finally { ackable.semaphore.release(Short.MAX_VALUE); } if (rowsTaken != null && !rowsTaken.isEmpty()) { LOG.inc("flush>rowsTaken>pow>" + UIO.chunkPower(rowsTaken.size(), 0)); } if (rowsTaken != null && !rowsTaken.isEmpty() || pong != null) { flushExecutor.submit(() -> { try { String endpoint = "/amza/ackBatch"; ringClient.call("", new ConnectionDescriptorSelectiveStrategy( new HostPort[] { new HostPort(ringHost.getHost(), ringHost.getPort()) }), "ackBatch", httpClient -> { HttpResponse response = httpClient.postStreamableRequest(endpoint, out -> { try { DataOutputStream dos = new DataOutputStream(out); if (rowsTaken.isEmpty()) { dos.write((byte) 0); // hasMore for rowsTaken stream } else { for (Entry<VersionedPartitionName, RowsTakenPayload> e : rowsTaken .entrySet()) { dos.write((byte) 1); // hasMore for rowsTaken stream VersionedPartitionName versionedPartitionName = e.getKey(); byte[] bytes = versionedPartitionName.toBytes(); dos.writeShort(bytes.length); dos.write(bytes); RowsTakenPayload rowsTakenPayload = e.getValue(); bytes = rowsTakenPayload.ringMember.toBytes(); dos.writeShort(bytes.length); dos.write(bytes); dos.writeLong(rowsTakenPayload.takeSessionId); dos.writeLong(rowsTakenPayload.takeSharedKey); dos.writeLong(rowsTakenPayload.txId); dos.writeLong(rowsTakenPayload.leadershipToken); } dos.write((byte) 0); // EOS for rowsTaken stream } if (pong == null) { dos.write((byte) 0); // has pong } else { dos.write((byte) 1); // has pong byte[] bytes = pong.ringMember.toBytes(); dos.writeShort(bytes.length); dos.write(bytes); dos.writeLong(pong.takeSessionId); dos.writeLong(pong.takeSharedKey); } } catch (Exception x) { throw new RuntimeException("Failed while streaming ackBatch.", x); } finally { out.flush(); out.close(); } }, null); if (response.getStatusCode() < 200 || response.getStatusCode() >= 300) { throw new NonSuccessStatusCodeException(response.getStatusCode(), response.getStatusReasonPhrase()); } Boolean result = (Boolean) conf.asObject(response.getResponseBody()); return new ClientResponse<>(result, true); }); } catch (Exception x) { LOG.warn("Failed to deliver acks for remote:{}", new Object[] { ringHost }, x); } finally { ackable.running.set(false); LOG.inc("flush>version>consume>" + name); synchronized (flushVersion) { if (currentVersion != flushVersion.get()) { flushVersion.notify(); } } } }); } else { ackable.running.set(false); LOG.inc("flush>version>consume>" + name); synchronized (flushVersion) { if (currentVersion != flushVersion.get()) { flushVersion.notify(); } } } }
From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageDecompressor.java
/** * Process image file.//from ww w . j a v a2 s . c o m */ private void go() throws IOException { long start = System.currentTimeMillis(); System.out.println("Decompressing image file: " + inputFile + " to " + outputFile); DataInputStream in = null; DataOutputStream out = null; try { // setup in PositionTrackingInputStream ptis = new PositionTrackingInputStream( new FileInputStream(new File(inputFile))); in = new DataInputStream(ptis); // read header information int imgVersion = in.readInt(); if (!LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imgVersion)) { System.out.println("Image is not compressed. No output will be produced."); return; } int namespaceId = in.readInt(); long numFiles = in.readLong(); long genstamp = in.readLong(); long imgTxId = -1; if (LayoutVersion.supports(Feature.STORED_TXIDS, imgVersion)) { imgTxId = in.readLong(); } FSImageCompression compression = FSImageCompression.readCompressionHeader(new Configuration(), in); if (compression.isNoOpCompression()) { System.out.println("Image is not compressed. No output will be produced."); return; } in = BufferedByteInputStream.wrapInputStream(compression.unwrapInputStream(in), FSImage.LOAD_SAVE_BUFFER_SIZE, FSImage.LOAD_SAVE_CHUNK_SIZE); System.out.println("Starting decompression."); // setup output out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(outputFile))); // write back the uncompressed information out.writeInt(imgVersion); out.writeInt(namespaceId); out.writeLong(numFiles); out.writeLong(genstamp); if (LayoutVersion.supports(Feature.STORED_TXIDS, imgVersion)) { out.writeLong(imgTxId); } // no compression out.writeBoolean(false); // copy the data long size = new File(inputFile).length(); // read in 1MB chunks byte[] block = new byte[1024 * 1024]; while (true) { int bytesRead = in.read(block); if (bytesRead <= 0) break; out.write(block, 0, bytesRead); printProgress(ptis.getPos(), size); } out.close(); long stop = System.currentTimeMillis(); System.out.println("Input file : " + inputFile + " size: " + size); System.out.println("Output file: " + outputFile + " size: " + new File(outputFile).length()); System.out.println("Decompression completed in " + (stop - start) + " ms."); } finally { if (in != null) in.close(); if (out != null) out.close(); } }
From source file:org.hyperic.hq.agent.db.DiskList.java
/** * Add the string to the list of data being stored in the DiskList. * * @param data Data to add to the end of the list *//*w w w. ja v a 2s .com*/ public void addToList(String data) throws IOException { if (this.closed) { throw new IOException("Datafile already closed"); } ByteArrayOutputStream bOs = new ByteArrayOutputStream(this.recordSize); DataOutputStream dOs = new DataOutputStream(bOs); dOs.writeUTF(data); if (bOs.size() > this.recordSize) { throw new IOException( "Data length(" + bOs.size() + ") exceeds " + "maximum record length(" + this.recordSize + ")"); } final long start = now(); bOs.write(this.padBytes, 0, this.recordSize - bOs.size()); byte[] bytes = bOs.toByteArray(); synchronized (this.dataFile) { Long firstFreeL; long firstFree; this.modNum = this.rand.nextInt(); try { firstFreeL = (Long) this.freeList.first(); firstFree = firstFreeL.longValue(); this.freeList.remove(firstFreeL); } catch (NoSuchElementException exc) { // Else we're adding to the end firstFree = this.indexFile.length() / IDX_REC_LEN; } // Write the record to the data file this.dataFile.seek(firstFree * this.recordSize); this.dataFile.write(bytes); bOs.reset(); dOs.writeBoolean(true); // Is Used dOs.writeLong(this.lastRec); // Previous record idx dOs.writeLong(-1); // Next record idx // Write the index for the record we just made this.indexFile.seek(firstFree * IDX_REC_LEN); bytes = bOs.toByteArray(); this.indexFile.write(bytes, 0, bytes.length); // Update the previous 'last' record to point to us if (this.lastRec != -1) { this.indexFile.seek((this.lastRec * IDX_REC_LEN) + 1 + 8); this.indexFile.writeLong(firstFree); } this.lastRec = firstFree; if (this.firstRec == -1) { this.firstRec = firstFree; } } if (this.dataFile.length() > this.maxLength) { this.log.error("Maximum file size for data file: " + this.fileName + " reached (" + this.maxLength + " bytes), truncating."); deleteAllRecords(); } long duration = now() - start; statsCollector.addStat(duration, DISK_LIST_ADD_TO_LIST_TIME); }
From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java
private void cacheCrawlHistoryLog(File localCacheDir, long timestamp) throws IOException { SequenceFile.Reader reader = null; Path mapFilePath = new Path(_remoteDataDirectory, CRAWL_HISTORY_HDFS_LOGFILE_PREFIX + timestamp); Path indexFilePath = new Path(mapFilePath, "index"); Path dataFilePath = new Path(mapFilePath, "data"); File cacheFilePath = new File(localCacheDir, CRAWL_HISTORY_HDFS_LOGFILE_PREFIX + timestamp); SequenceFile.Reader indexReader = new SequenceFile.Reader(_remoteFileSystem, dataFilePath, CrawlEnvironment.getHadoopConfig()); ValueBytes valueBytes = indexReader.createValueBytes(); DataOutputBuffer keyBytes = new DataOutputBuffer(); DataInputBuffer keyBuffer = new DataInputBuffer(); DataOutputBuffer finalOutputStream = new DataOutputBuffer(); DataOutputBuffer uncompressedValueBytes = new DataOutputBuffer(); URLFP fp = new URLFP(); try {//from w w w .j ava2s . c om while (indexReader.nextRaw(keyBytes, valueBytes) != -1) { keyBuffer.reset(keyBytes.getData(), 0, keyBytes.getLength()); // read fingerprint ... fp.readFields(keyBuffer); // write hash only finalOutputStream.writeLong(fp.getUrlHash()); uncompressedValueBytes.reset(); // write value bytes to intermediate buffer ... valueBytes.writeUncompressedBytes(uncompressedValueBytes); // write out uncompressed length WritableUtils.writeVInt(finalOutputStream, uncompressedValueBytes.getLength()); // write out bytes finalOutputStream.write(uncompressedValueBytes.getData(), 0, uncompressedValueBytes.getLength()); } // delete existing ... cacheFilePath.delete(); // compute crc ... CRC32 crc = new CRC32(); crc.update(finalOutputStream.getData(), 0, finalOutputStream.getLength()); // open final output stream DataOutputStream fileOutputStream = new DataOutputStream( new BufferedOutputStream(new FileOutputStream(cacheFilePath))); try { fileOutputStream.writeLong(crc.getValue()); fileOutputStream.write(finalOutputStream.getData(), 0, finalOutputStream.getLength()); fileOutputStream.flush(); } catch (IOException e) { LOG.error(CCStringUtils.stringifyException(e)); fileOutputStream.close(); fileOutputStream = null; cacheFilePath.delete(); throw e; } finally { if (fileOutputStream != null) { fileOutputStream.close(); } } } finally { if (indexReader != null) { indexReader.close(); } } }
From source file:org.apache.jxtadoop.hdfs.server.datanode.DataXceiver.java
/** * Get block checksum (MD5 of CRC32)./*from w ww . j a v a 2 s . c o m*/ * @param in */ void getBlockChecksum(DataInputStream in) throws IOException { LOG.debug("Mathod called : getBlockChecksum()"); final Block block = new Block(in.readLong(), 0, in.readLong()); DataOutputStream out = null; final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block); final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(metadataIn, BUFFER_SIZE)); try { //read metadata file final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); final DataChecksum checksum = header.getChecksum(); final int bytesPerCRC = checksum.getBytesPerChecksum(); final long crcPerBlock = (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / checksum.getChecksumSize(); //compute block checksum final MD5Hash md5 = MD5Hash.digest(checksumIn); if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5); } //write reply //out = new DataOutputStream( // NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); out = new DataOutputStream(s.getOutputStream()); out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); out.writeInt(bytesPerCRC); out.writeLong(crcPerBlock); md5.write(out); out.flush(); } finally { LOG.debug("Finalizing : getBlockChecksum()"); IOUtils.closeStream(out); IOUtils.closeStream(checksumIn); IOUtils.closeStream(metadataIn); } }
From source file:ClassFile.java
public void write(DataOutputStream dos, ConstantPoolInfo pool[]) throws IOException, Exception { dos.write(type);/* w w w. j av a2 s .c om*/ switch (type) { case CLASS: case STRING: dos.writeShort(indexOf(arg1, pool)); break; case FIELDREF: case METHODREF: case INTERFACE: case NAMEANDTYPE: dos.writeShort(indexOf(arg1, pool)); dos.writeShort(indexOf(arg2, pool)); break; case INTEGER: dos.writeInt(intValue); break; case FLOAT: dos.writeFloat(floatValue); break; case LONG: dos.writeLong(longValue); break; case DOUBLE: dos.writeDouble(doubleValue); break; case ASCIZ: case UNICODE: dos.writeShort(strValue.length()); dos.writeBytes(strValue); break; default: throw new Exception("ConstantPoolInfo::write() - bad type."); } }
From source file:org.apache.jxtadoop.hdfs.server.datanode.DataXceiver.java
/** * Receive a block and write it to disk, it then notifies the namenode to * remove the copy from the source.//from w w w . jav a 2 s. c o m * * @param in The stream to read from * @throws IOException */ private void replaceBlock(DataInputStream in) throws IOException { LOG.debug("Mathod called : replaceBlock()"); /* read header */ long blockId = in.readLong(); Block block = new Block(blockId, dataXceiverServer.estimateBlockSize, in.readLong()); // block id & generation stamp String sourceID = Text.readString(in); // read del hint DatanodeInfo proxySource = new DatanodeInfo(); // read proxy source proxySource.readFields(in); if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start LOG.warn("Not able to receive block " + blockId + " from " + s.getRemoteSocketAddress() + " because threads quota is exceeded."); sendResponse(s, (short) DataTransferProtocol.OP_STATUS_ERROR, datanode.socketWriteTimeout); return; } JxtaSocket proxySock = null; DataOutputStream proxyOut = null; short opStatus = DataTransferProtocol.OP_STATUS_SUCCESS; BlockReceiver blockReceiver = null; DataInputStream proxyReply = null; ReliableOutputStream baseStream = null; ReliableInputStream replyStream = null; try { // get the output stream to the proxy //InetSocketAddress proxyAddr = NetUtils.createSocketAddr( // proxySource.getName()); //proxySock = datanode.newSocket(); proxySock = datanode.getDnPeer().getInfoSocket(proxySource.getPeerId().toString()); // NetUtils.connect(proxySock, proxyAddr, datanode.socketTimeout); // proxySock.setSoTimeout(datanode.socketTimeout); /*OutputStream baseStream = NetUtils.getOutputStream(proxySock, datanode.socketWriteTimeout); proxyOut = new DataOutputStream( new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE)); */ baseStream = (ReliableOutputStream) proxySock.getOutputStream(); proxyOut = new DataOutputStream(new BufferedOutputStream(baseStream)); /* send request to the proxy */ proxyOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); // transfer version proxyOut.writeByte(DataTransferProtocol.OP_COPY_BLOCK); // op code proxyOut.writeLong(block.getBlockId()); // block id proxyOut.writeLong(block.getGenerationStamp()); // block id proxyOut.flush(); // receive the response from the proxy //proxyReply = new DataInputStream(new BufferedInputStream( // NetUtils.getInputStream(proxySock), BUFFER_SIZE)); replyStream = (ReliableInputStream) proxySock.getInputStream(); proxyReply = new DataInputStream(new BufferedInputStream(replyStream)); // open a block receiver and check if the block does not exist blockReceiver = new BlockReceiver(block, proxyReply, proxySock.getRemoteSocketAddress().toString(), proxySock.getLocalSocketAddress().toString(), false, "", null, datanode); // receive a block blockReceiver.receiveBlock(null, null, null, null, dataXceiverServer.balanceThrottler, -1); // notify name node datanode.notifyNamenodeReceivedBlock(block, sourceID); LOG.info("Moved block " + block + " from " + s.getRemoteSocketAddress()); } catch (IOException ioe) { opStatus = DataTransferProtocol.OP_STATUS_ERROR; throw ioe; } finally { // receive the last byte that indicates the proxy released its thread resource if (opStatus == DataTransferProtocol.OP_STATUS_SUCCESS) { try { proxyReply.readChar(); } catch (IOException ignored) { } } // now release the thread resource dataXceiverServer.balanceThrottler.release(); // send response back try { sendResponse(s, opStatus, datanode.socketWriteTimeout); } catch (IOException ioe) { LOG.warn("Error writing reply back to " + s.getRemoteSocketAddress()); } LOG.debug("Finalizing : replaceBlock()"); LOG.debug("baseStream queue empty : " + baseStream.isQueueEmpty()); IOUtils.closeStream(proxyOut); IOUtils.closeStream(blockReceiver); IOUtils.closeStream(proxyReply); } }