List of usage examples for java.io DataOutputStream write
public synchronized void write(int b) throws IOException
b
) to the underlying output stream. From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java
/** * append a ProxyCrawlHistoryItem to the active log * /*from w w w . j av a 2s . co m*/ * @param item * @throws IOException */ void appendItemToLog(ProxyCrawlHistoryItem item) throws IOException { try { // open the log file ... DataOutputStream logStream = new DataOutputStream(new FileOutputStream(getActiveLogFilePath(), true)); try { // reset crc calculator (single thread so no worries on synchronization) _crc16Out.reset(); // reset output stream _outputBuffer.reset(); // create checked stream CheckedOutputStream checkedStream = new CheckedOutputStream(_outputBuffer, _crc16Out); DataOutputStream dataOutputStream = new DataOutputStream(checkedStream); // write out item item.serialize(dataOutputStream, new BinaryProtocol()); dataOutputStream.flush(); // ok now write out sync,crc,length then data logStream.write(getLocalLogSyncBytes()); logStream.writeInt((int) checkedStream.getChecksum().getValue()); logStream.writeShort((short) _outputBuffer.getLength()); logStream.write(_outputBuffer.getData(), 0, _outputBuffer.getLength()); logStream.flush(); logStream.close(); logStream = null; // now we need to update the file header updateLogFileHeader(getActiveLogFilePath(), 1, LOG_ITEM_HEADER_SIZE + _outputBuffer.getLength()); URLFP fingerprint = URLUtils.getURLFPFromURL(item.getOriginalURL(), true); // update local log synchronized (_localLogItems) { if (fingerprint != null) { _localLogItems.put(fingerprint, item); } } ImmutableSet<CrawlList> lists = null; // and now walk lists updating them as necessary synchronized (_crawlLists) { lists = new ImmutableSet.Builder<CrawlList>().addAll(_crawlLists.values()).build(); } for (CrawlList list : lists) { try { list.updateItemState(fingerprint, item); } catch (Exception e) { // ok, IF an error occurs updating the list metadata.. we need to // coninue along. // it is critical for this thread to not die in such a circumstane LOG.fatal("Error Updating List(" + list.getListId() + "):" + CCStringUtils.stringifyException(e)); System.out.println("Exception in List Update(" + list.getListId() + "):" + CCStringUtils.stringifyException(e)); } } } finally { if (logStream != null) { logStream.close(); } } } finally { } }
From source file:org.openxdata.server.FormsServer.java
/** * Called when a new connection has been received. Failures are not handled * in this class as different servers (BT,SMS, etc) may want to handle them * differently./* w ww .j a v a2s .c o m*/ * * @param dis - the stream to read from. * @param dos - the stream to write to. */ public void processConnection(InputStream disParam, OutputStream dosParam) { ZOutputStream gzip = new ZOutputStream(dosParam, JZlib.Z_BEST_COMPRESSION); DataOutputStream zdos = new DataOutputStream(gzip); byte responseStatus = ResponseStatus.STATUS_ERROR; try { try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataInputStream dis = new DataInputStream(disParam); String name = dis.readUTF(); String password = dis.readUTF(); String serializer = dis.readUTF(); String locale = dis.readUTF(); byte action = dis.readByte(); User user = authenticationService.authenticate(name, password); if (user == null) responseStatus = ResponseStatus.STATUS_ACCESS_DENIED; else { DataOutputStream dosTemp = new DataOutputStream(baos); if (action == ACTION_DOWNLOAD_FORMS) formDownloadService.downloadForms(dosTemp, serializer, locale); else if (action == ACTION_UPLOAD_DATA) submitXforms(dis, dosTemp, serializer); else if (action == ACTION_DOWNLOAD_USERS) formDownloadService.downloadUsers(dosTemp, serializer); else if (action == ACTION_DOWNLOAD_USERS_AND_FORMS) downloadUsersAndForms(dis.readInt(), dosTemp, serializer, locale); else if (action == ACTION_DOWNLOAD_STUDY_LIST) formDownloadService.downloadStudies(dosTemp, serializer, locale); else if (action == ACTION_DOWNLOAD_LANGUAGES) formDownloadService.downloadLocales(dis, dosTemp, serializer); else if (action == ACTION_DOWNLOAD_MENU_TEXT) formDownloadService.downloadMenuText(dis, dosTemp, serializer, locale); else if (action == ACTION_DOWNLOAD_STUDY_FORMS) formDownloadService.downloadForms(dis.readInt(), zdos, serializer, locale); else if (action == ACTION_DOWNLOAD_USERS_AND_ALL_FORMS) downloadUsersAndAllForms(dosTemp, serializer, locale); responseStatus = ResponseStatus.STATUS_SUCCESS; } zdos.writeByte(responseStatus); if (responseStatus == ResponseStatus.STATUS_SUCCESS) { zdos.write(baos.toByteArray()); } } catch (Exception ex) { log.error(ex.getMessage(), ex); zdos.writeByte(responseStatus); } finally { zdos.flush(); gzip.finish(); } } catch (IOException e) { // this is for exceptions occurring in the catch or finally clauses. log.error(e.getMessage(), e); } }
From source file:core.AbstractTest.java
private int httpRequest(String sUrl, String sMethod, JsonNode payload, Map<String, String> mParameters) { Logger.info("\n\nREQUEST:\n" + sMethod + " " + sUrl + "\nHEADERS: " + mHeaders + "\nParameters: " + mParameters + "\nPayload: " + payload + "\n"); HttpURLConnection conn = null; BufferedReader br = null;/*from w w w .j a v a 2 s .c o m*/ int nRet = 0; boolean fIsMultipart = false; try { setStatusCode(-1); setResponse(null); conn = getHttpConnection(sUrl, sMethod); if (mHeaders.size() > 0) { Set<String> keys = mHeaders.keySet(); for (String sKey : keys) { conn.addRequestProperty(sKey, mHeaders.get(sKey)); if (sKey.equals(HTTP.CONTENT_TYPE)) { if (mHeaders.get(sKey).startsWith(MediaType.MULTIPART_FORM_DATA)) { fIsMultipart = true; } } } } if (payload != null || mParameters != null) { DataOutputStream out = new DataOutputStream(conn.getOutputStream()); try { if (payload != null) { //conn.setRequestProperty("Content-Length", "" + node.toString().length()); out.writeBytes(payload.toString()); } if (mParameters != null) { Set<String> sKeys = mParameters.keySet(); if (fIsMultipart) { out.writeBytes("--" + BOUNDARY + "\r\n"); } for (String sKey : sKeys) { if (fIsMultipart) { out.writeBytes("Content-Disposition: form-data; name=\"" + sKey + "\"\r\n\r\n"); out.writeBytes(mParameters.get(sKey)); out.writeBytes("\r\n"); out.writeBytes("--" + BOUNDARY + "--\r\n"); } else { out.writeBytes(URLEncoder.encode(sKey, "UTF-8")); out.writeBytes("="); out.writeBytes(URLEncoder.encode(mParameters.get(sKey), "UTF-8")); out.writeBytes("&"); } } if (fIsMultipart) { if (nvpFile != null) { File f = Play.application().getFile(nvpFile.getName()); if (f == null) { assertFail("Cannot find file <" + nvpFile.getName() + ">"); } FileBody fb = new FileBody(f); out.writeBytes("Content-Disposition: form-data; name=\"" + PARAM_FILE + "\";filename=\"" + fb.getFilename() + "\"\r\n"); out.writeBytes("Content-Type: " + nvpFile.getValue() + "\r\n\r\n"); out.write(getResource(nvpFile.getName())); } out.writeBytes("\r\n--" + BOUNDARY + "--\r\n"); } } } catch (Exception ex) { assertFail("Send request: " + ex.getMessage()); } finally { try { out.flush(); } catch (Exception ex) { } try { out.close(); } catch (Exception ex) { } } } nRet = conn.getResponseCode(); setStatusCode(nRet); if (nRet / 100 != 2) { if (conn.getErrorStream() != null) { br = new BufferedReader(new InputStreamReader(conn.getErrorStream())); } } else { if (conn.getInputStream() != null) { br = new BufferedReader(new InputStreamReader(conn.getInputStream())); } } if (br != null) { String temp = null; StringBuilder sb = new StringBuilder(1024); while ((temp = br.readLine()) != null) { sb.append(temp).append("\n"); } setResponse(sb.toString().trim()); } Logger.info("\nRESPONSE\nHTTP code: " + nRet + "\nContent: " + sResponse + "\n"); } catch (Exception ex) { assertFail("httpRequest: " + ex.getMessage()); } finally { if (br != null) { try { br.close(); } catch (Exception ex) { } } if (conn != null) { conn.disconnect(); } } return nRet; }
From source file:org.apache.hadoop.hdfs.server.datanode.DataWriter.java
/** * Write a block to disk.//from w ww . j a v a2s . co m * * @param in The stream to read from * @throws IOException */ private void writeBlock() throws IOException { DatanodeInfo srcDataNode = null; LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay()); // // Read in the header // long startTime = System.currentTimeMillis(); int namespaceid = in.readInt(); Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong()); LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress); int pipelineSize = in.readInt(); // num of datanodes in entire pipeline boolean isRecovery = in.readBoolean(); // is this part of recovery? String client = Text.readString(in); // working on behalf of this client boolean hasSrcDataNode = in.readBoolean(); // is src node info present if (hasSrcDataNode) { srcDataNode = new DatanodeInfo(); srcDataNode.readFields(in); } int numTargets = in.readInt(); if (numTargets < 0) { throw new IOException("Mislabelled incoming datastream."); } DatanodeInfo targets[] = new DatanodeInfo[numTargets]; for (int i = 0; i < targets.length; i++) { DatanodeInfo tmp = new DatanodeInfo(); tmp.readFields(in); targets[i] = tmp; } DataOutputStream mirrorOut = null; // stream to next target DataInputStream mirrorIn = null; // reply from next target DataOutputStream replyOut = null; // stream to prev target Socket mirrorSock = null; // socket to next target BlockReceiver blockReceiver = null; // responsible for data handling String mirrorNode = null; // the name:port of next target String firstBadLink = ""; // first datanode that failed in connection setup updateCurrentThreadName("receiving block " + block + " client=" + client); try { // open a block receiver and check if the block does not exist blockReceiver = new BlockReceiver(namespaceid, block, in, s.getRemoteSocketAddress().toString(), s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode); // get a connection back to the previous target replyOut = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(s, datanode.socketWriteTimeout), SMALL_BUFFER_SIZE)); // // Open network conn to backup machine, if // appropriate // if (targets.length > 0) { InetSocketAddress mirrorTarget = null; // Connect to backup machine mirrorNode = targets[0].getName(); mirrorTarget = NetUtils.createSocketAddr(mirrorNode); mirrorSock = datanode.newSocket(); try { int timeoutValue = datanode.socketTimeout + (datanode.socketReadExtentionTimeout * numTargets); int writeTimeout = datanode.socketWriteTimeout + (datanode.socketWriteExtentionTimeout * numTargets); NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue); mirrorSock.setSoTimeout(timeoutValue); mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE); mirrorOut = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE)); mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock)); // Write header: Copied from DFSClient.java! mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK); mirrorOut.writeInt(namespaceid); mirrorOut.writeLong(block.getBlockId()); mirrorOut.writeLong(block.getGenerationStamp()); mirrorOut.writeInt(pipelineSize); mirrorOut.writeBoolean(isRecovery); Text.writeString(mirrorOut, client); mirrorOut.writeBoolean(hasSrcDataNode); if (hasSrcDataNode) { // pass src node information srcDataNode.write(mirrorOut); } mirrorOut.writeInt(targets.length - 1); for (int i = 1; i < targets.length; i++) { targets[i].write(mirrorOut); } blockReceiver.writeChecksumHeader(mirrorOut); mirrorOut.flush(); // read connect ack (only for clients, not for replication req) if (client.length() != 0) { firstBadLink = Text.readString(mirrorIn); if (LOG.isDebugEnabled() || firstBadLink.length() > 0) { LOG.info("Datanode " + targets.length + " got response for connect ack " + " from downstream datanode with firstbadlink as " + firstBadLink); } } } catch (IOException e) { if (client.length() != 0) { Text.writeString(replyOut, mirrorNode); replyOut.flush(); } IOUtils.closeStream(mirrorOut); mirrorOut = null; IOUtils.closeStream(mirrorIn); mirrorIn = null; IOUtils.closeSocket(mirrorSock); mirrorSock = null; if (client.length() > 0) { throw e; } else { LOG.info(datanode.getDatanodeInfo() + ":Exception transfering block " + block + " to mirror " + mirrorNode + ". continuing without the mirror.\n" + StringUtils.stringifyException(e)); } } } // send connect ack back to source (only for clients) if (client.length() != 0) { if (LOG.isDebugEnabled() || firstBadLink.length() > 0) { LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is " + firstBadLink); } Text.writeString(replyOut, firstBadLink); replyOut.flush(); } // receive the block and mirror to the next target String mirrorAddr = (mirrorSock == null) ? null : mirrorNode; long totalReceiveSize = blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length); // if this write is for a replication request (and not // from a client), then confirm block. For client-writes, // the block is finalized in the PacketResponder. if (client.length() == 0) { datanode.notifyNamenodeReceivedBlock(namespaceid, block, null); LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes()); } else { // Log the fact that the block has been received by this datanode and // has been written to the local disk on this datanode. LOG.info("Received Block " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes() + " and written to local disk"); } if (datanode.blockScanner != null) { datanode.blockScanner.addBlock(namespaceid, block); } long writeDuration = System.currentTimeMillis() - startTime; datanode.myMetrics.bytesWrittenLatency.inc(writeDuration); if (totalReceiveSize > KB_RIGHT_SHIFT_MIN) { datanode.myMetrics.bytesWrittenRate.inc((int) (totalReceiveSize >> KB_RIGHT_SHIFT_BITS), writeDuration); } } catch (IOException ioe) { LOG.info("writeBlock " + block + " received exception " + ioe); throw ioe; } finally { // close all opened streams IOUtils.closeStream(mirrorOut); IOUtils.closeStream(mirrorIn); IOUtils.closeStream(replyOut); IOUtils.closeSocket(mirrorSock); IOUtils.closeStream(blockReceiver); } }
From source file:org.apache.hadoop.hdfs.AvatarClient.java
/** * Get the checksum of a file.//ww w . j a va 2 s. c o m * @param src The file path * @return The checksum */ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, AvatarProtocol namenode, SocketFactory socketFactory, int socketTimeout) throws IOException { //get all block locations final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE) .getLocatedBlocks(); final DataOutputBuffer md5out = new DataOutputBuffer(); int bytesPerCRC = 0; long crcPerBlock = 0; //get block checksum for each block for (int i = 0; i < locatedblocks.size(); i++) { LocatedBlock lb = locatedblocks.get(i); final Block block = lb.getBlock(); final DatanodeInfo[] datanodes = lb.getLocations(); //try each datanode location of the block final int timeout = 3000 * datanodes.length + socketTimeout; boolean done = false; for (int j = 0; !done && j < datanodes.length; j++) { //connect to a datanode final Socket sock = socketFactory.createSocket(); NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout); sock.setSoTimeout(timeout); DataOutputStream out = new DataOutputStream( new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE)); DataInputStream in = new DataInputStream(NetUtils.getInputStream(sock)); // get block MD5 try { if (LOG.isDebugEnabled()) { LOG.debug("write to " + datanodes[j].getName() + ": " + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block); } out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM); out.writeLong(block.getBlockId()); out.writeLong(block.getGenerationStamp()); out.flush(); final short reply = in.readShort(); if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) { throw new IOException("Bad response " + reply + " for block " + block + " from datanode " + datanodes[j].getName()); } //read byte-per-checksum final int bpc = in.readInt(); if (i == 0) { //first block bytesPerCRC = bpc; } else if (bpc != bytesPerCRC) { throw new IOException( "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC); } //read crc-per-block final long cpb = in.readLong(); if (locatedblocks.size() > 1 && i == 0) { crcPerBlock = cpb; } //read md5 final MD5Hash md5 = MD5Hash.read(in); md5.write(md5out); done = true; if (LOG.isDebugEnabled()) { if (i == 0) { LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock); } LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5); } } catch (IOException ie) { LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie); } finally { IOUtils.closeStream(in); IOUtils.closeStream(out); IOUtils.closeSocket(sock); } } if (!done) { throw new IOException("Fail to get block MD5 for " + block); } } //compute file MD5 final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5); }
From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java
/** * Write a block to disk.// w ww .j av a 2 s .com * * @param in The stream to read from * @throws IOException */ private void writeBlock(DataInputStream in) throws IOException { DatanodeInfo srcDataNode = null; LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay()); // // Read in the header // Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong()); LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress); int pipelineSize = in.readInt(); // num of datanodes in entire pipeline boolean isRecovery = in.readBoolean(); // is this part of recovery? String client = Text.readString(in); // working on behalf of this client boolean hasSrcDataNode = in.readBoolean(); // is src node info present if (hasSrcDataNode) { srcDataNode = new DatanodeInfo(); srcDataNode.readFields(in); } int numTargets = in.readInt(); if (numTargets < 0) { throw new IOException("Mislabelled incoming datastream."); } DatanodeInfo targets[] = new DatanodeInfo[numTargets]; for (int i = 0; i < targets.length; i++) { DatanodeInfo tmp = new DatanodeInfo(); tmp.readFields(in); targets[i] = tmp; } Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>(); accessToken.readFields(in); DataOutputStream replyOut = null; // stream to prev target replyOut = new DataOutputStream(NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); if (datanode.isBlockTokenEnabled) { try { datanode.blockTokenSecretManager.checkAccess(accessToken, null, block, BlockTokenSecretManager.AccessMode.WRITE); } catch (InvalidToken e) { try { if (client.length() != 0) { replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN); Text.writeString(replyOut, datanode.dnRegistration.getName()); replyOut.flush(); } throw new IOException("Access token verification failed, for client " + remoteAddress + " for OP_WRITE_BLOCK for block " + block); } finally { IOUtils.closeStream(replyOut); } } } DataOutputStream mirrorOut = null; // stream to next target DataInputStream mirrorIn = null; // reply from next target Socket mirrorSock = null; // socket to next target BlockReceiver blockReceiver = null; // responsible for data handling String mirrorNode = null; // the name:port of next target String firstBadLink = ""; // first datanode that failed in connection setup short mirrorInStatus = (short) DataTransferProtocol.OP_STATUS_SUCCESS; try { // open a block receiver and check if the block does not exist blockReceiver = new BlockReceiver(block, in, s.getRemoteSocketAddress().toString(), s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode); // // Open network conn to backup machine, if // appropriate // if (targets.length > 0) { InetSocketAddress mirrorTarget = null; // Connect to backup machine mirrorNode = targets[0].getName(); mirrorTarget = NetUtils.createSocketAddr(mirrorNode); mirrorSock = datanode.newSocket(); try { int timeoutValue = datanode.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * numTargets); int writeTimeout = datanode.socketWriteTimeout + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets); NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue); mirrorSock.setSoTimeout(timeoutValue); mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE); mirrorOut = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE)); mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock)); // Write header: Copied from DFSClient.java! mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK); mirrorOut.writeLong(block.getBlockId()); mirrorOut.writeLong(block.getGenerationStamp()); mirrorOut.writeInt(pipelineSize); mirrorOut.writeBoolean(isRecovery); Text.writeString(mirrorOut, client); mirrorOut.writeBoolean(hasSrcDataNode); if (hasSrcDataNode) { // pass src node information srcDataNode.write(mirrorOut); } mirrorOut.writeInt(targets.length - 1); for (int i = 1; i < targets.length; i++) { targets[i].write(mirrorOut); } accessToken.write(mirrorOut); blockReceiver.writeChecksumHeader(mirrorOut); mirrorOut.flush(); // read connect ack (only for clients, not for replication req) if (client.length() != 0) { mirrorInStatus = mirrorIn.readShort(); firstBadLink = Text.readString(mirrorIn); if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) { LOG.info("Datanode " + targets.length + " got response for connect ack " + " from downstream datanode with firstbadlink as " + firstBadLink); } } } catch (IOException e) { if (client.length() != 0) { replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR); Text.writeString(replyOut, mirrorNode); replyOut.flush(); } IOUtils.closeStream(mirrorOut); mirrorOut = null; IOUtils.closeStream(mirrorIn); mirrorIn = null; IOUtils.closeSocket(mirrorSock); mirrorSock = null; if (client.length() > 0) { throw e; } else { LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror " + mirrorNode + ". continuing without the mirror.\n" + StringUtils.stringifyException(e)); } } } // send connect ack back to source (only for clients) if (client.length() != 0) { if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) { LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is " + firstBadLink); } replyOut.writeShort(mirrorInStatus); Text.writeString(replyOut, firstBadLink); replyOut.flush(); } // receive the block and mirror to the next target String mirrorAddr = (mirrorSock == null) ? null : mirrorNode; blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length); // if this write is for a replication request (and not // from a client), then confirm block. For client-writes, // the block is finalized in the PacketResponder. if (client.length() == 0) { datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT); LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress + " of size " + block.getNumBytes()); } if (datanode.blockScanner != null) { datanode.blockScanner.addBlock(block); } } catch (IOException ioe) { LOG.info("writeBlock " + block + " received exception " + ioe); throw ioe; } finally { // close all opened streams IOUtils.closeStream(mirrorOut); IOUtils.closeStream(mirrorIn); IOUtils.closeStream(replyOut); IOUtils.closeSocket(mirrorSock); IOUtils.closeStream(blockReceiver); } }
From source file:org.apache.jxtadoop.hdfs.DFSClient.java
/** * Get the checksum of a file./*from w w w.j a v a 2s .c o m*/ * @param src The file path * @return The checksum */ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout) throws IOException { //get all block locations final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE) .getLocatedBlocks(); final DataOutputBuffer md5out = new DataOutputBuffer(); int bytesPerCRC = 0; long crcPerBlock = 0; //get block checksum for each block for (int i = 0; i < locatedblocks.size(); i++) { LocatedBlock lb = locatedblocks.get(i); final Block block = lb.getBlock(); final DatanodeInfo[] datanodes = lb.getLocations(); //try each datanode location of the block final int timeout = 3000 * datanodes.length + socketTimeout; boolean done = false; for (int j = 0; !done && j < datanodes.length; j++) { //connect to a datanode /*final Socket sock = socketFactory.createSocket(); NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout); sock.setSoTimeout(timeout);*/ JxtaSocket jsock = DFSClient.getDfsClient().getDfsClientPeer() .getInfoSocket(datanodes[j].getName()); // jsock.setSoTimeout(timeout); jsock.setSoTimeout(Integer.parseInt(conf.get("hadoop.p2p.info.timeout"))); /*DataOutputStream out = new DataOutputStream( new BufferedOutputStream(NetUtils.getOutputStream(jsock), DataNode.SMALL_BUFFER_SIZE)); DataInputStream in = new DataInputStream(NetUtils.getInputStream(jsock));*/ DataOutputStream out = new DataOutputStream(new BufferedOutputStream(jsock.getOutputStream())); DataInputStream in = new DataInputStream(jsock.getInputStream()); // get block MD5 try { if (LOG.isDebugEnabled()) { LOG.debug("write to " + datanodes[j].getName() + ": " + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block); } out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM); out.writeLong(block.getBlockId()); out.writeLong(block.getGenerationStamp()); out.flush(); final short reply = in.readShort(); if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) { throw new IOException("Bad response " + reply + " for block " + block + " from datanode " + datanodes[j].getName()); } //read byte-per-checksum final int bpc = in.readInt(); if (i == 0) { //first block bytesPerCRC = bpc; } else if (bpc != bytesPerCRC) { throw new IOException( "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC); } //read crc-per-block final long cpb = in.readLong(); if (locatedblocks.size() > 1 && i == 0) { crcPerBlock = cpb; } //read md5 final MD5Hash md5 = MD5Hash.read(in); md5.write(md5out); done = true; if (LOG.isDebugEnabled()) { if (i == 0) { LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock); } LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5); } } catch (IOException ie) { LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie); } finally { IOUtils.closeStream(in); IOUtils.closeStream(out); IOUtils.closeSocket(jsock); } } if (!done) { throw new IOException("Fail to get block MD5 for " + block); } } //compute file MD5 final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5); }
From source file:JModem.java
public boolean receive(String tfile) throws IOException, InterruptedException { char checksum, index, blocknumber, errorcount; byte character; byte[] sector = new byte[SECSIZE]; DataOutputStream foo; foo = new DataOutputStream(new FileOutputStream(tfile)); System.out.println("you have " + SLEEP + " seconds..."); /* wait for the user or remote to get his act together */ gotChar = false;/*from w w w .j a v a 2 s . c o m*/ new IOTimer(SLEEP, "receive from remote").start(); errStream.println("Starting receive..."); putchar(NAK); errorcount = 0; blocknumber = 1; rxLoop: do { character = getchar(); gotChar = true; if (character != EOT) { try { byte not_ch; if (character != SOH) { errStream.println("Not SOH"); if (++errorcount < MAXERRORS) continue rxLoop; else xerror(); } character = getchar(); not_ch = (byte) (~getchar()); errStream.println("[" + character + "] "); if (character != not_ch) { errStream.println("Blockcounts not ~"); ++errorcount; continue rxLoop; } if (character != blocknumber) { errStream.println("Wrong blocknumber"); ++errorcount; continue rxLoop; } checksum = 0; for (index = 0; index < SECSIZE; index++) { sector[index] = getchar(); checksum += sector[index]; } if (checksum != getchar()) { errStream.println("Bad checksum"); errorcount++; continue rxLoop; } putchar(ACK); blocknumber++; try { foo.write(sector); } catch (IOException e) { errStream.println("write failed, blocknumber " + blocknumber); } } finally { if (errorcount != 0) putchar(NAK); } } } while (character != EOT); foo.close(); putchar(ACK); /* tell the other end we accepted his EOT */ putchar(ACK); putchar(ACK); errStream.println("Receive Completed."); return true; }