List of usage examples for java.io DataInputStream readLong
public final long readLong() throws IOException
readLong
method of DataInput
. From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java
/** * Get block checksum (MD5 of CRC32)./*from w ww.j a va2 s .co m*/ * @param in */ void getBlockChecksum(DataInputStream in) throws IOException { final Block block = new Block(in.readLong(), 0, in.readLong()); Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>(); accessToken.readFields(in); DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s, datanode.socketWriteTimeout)); if (datanode.isBlockTokenEnabled) { try { datanode.blockTokenSecretManager.checkAccess(accessToken, null, block, BlockTokenSecretManager.AccessMode.READ); } catch (InvalidToken e) { try { out.writeShort(DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN); out.flush(); throw new IOException("Access token verification failed, for client " + remoteAddress + " for OP_BLOCK_CHECKSUM for block " + block); } finally { IOUtils.closeStream(out); } } } final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block); final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(metadataIn, BUFFER_SIZE)); try { //read metadata file final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); final DataChecksum checksum = header.getChecksum(); final int bytesPerCRC = checksum.getBytesPerChecksum(); final long crcPerBlock = (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / checksum.getChecksumSize(); //compute block checksum final MD5Hash md5 = MD5Hash.digest(checksumIn); if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5); } //write reply out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); out.writeInt(bytesPerCRC); out.writeLong(crcPerBlock); md5.write(out); out.flush(); } finally { IOUtils.closeStream(out); IOUtils.closeStream(checksumIn); IOUtils.closeStream(metadataIn); } }
From source file:com.hadoopvietnam.cache.memcached.MemcachedCache.java
/** * Convert the memcached object into a List<Long>. * * @param inBytesOfLongs the byte[] to convert * @return the byte[] as List<Long>, null if not valid or empty bytes * @throws IOException thrown if any errors *//*from w w w.j a va2s . c o m*/ protected ArrayList<Long> getListFromBytes(final Object inBytesOfLongs) throws IOException { if (inBytesOfLongs == null) { return null; } ArrayList<Long> toReturn = new ArrayList<Long>(); ByteArrayInputStream bytes = new ByteArrayInputStream((byte[]) inBytesOfLongs); DataInputStream input = new DataInputStream(bytes); try { while (input.available() > 0) { toReturn.add(input.readLong()); } } finally { input.close(); } return toReturn; }
From source file:cn.edu.wyu.documentviewer.model.DocumentInfo.java
@Override public void read(DataInputStream in) throws IOException { final int version = in.readInt(); switch (version) { case VERSION_INIT: throw new ProtocolException("Ignored upgrade"); case VERSION_SPLIT_URI: authority = DurableUtils.readNullableString(in); documentId = DurableUtils.readNullableString(in); mimeType = DurableUtils.readNullableString(in); displayName = DurableUtils.readNullableString(in); lastModified = in.readLong(); flags = in.readInt();/*from www . j a v a2 s .co m*/ summary = DurableUtils.readNullableString(in); size = in.readLong(); icon = in.readInt(); deriveFields(); break; default: throw new ProtocolException("Unknown version " + version); } }
From source file:org.apache.hadoop.yarn.server.resourcemanager.recovery.NDBRMStateStore.java
/** * Retrieve Delegation Tokens from NDB./*from w ww . jav a 2s. c o m*/ * * @param rmState * @throws Exception */ private void loadRMDelegationTokenState(RMState rmState) throws Exception { //Retrieve all DelegatioTokenIds from NDB List<DelegationToken> delTokens = RMUtilities.getDelegationTokens(); if (delTokens != null) { for (DelegationToken hopDelToken : delTokens) { ByteArrayInputStream is = new ByteArrayInputStream(hopDelToken.getRmdtidentifier()); DataInputStream fsIn = new DataInputStream(is); try { RMDelegationTokenIdentifier identifier = new RMDelegationTokenIdentifier(); identifier.readFields(fsIn); long renewDate = fsIn.readLong(); rmState.rmSecretManagerState.delegationTokenState.put(identifier, renewDate); } finally { is.close(); } } } }
From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java
/** * Read a block from the disk and then sends it to a destination. * /*from www. j av a 2s. c o m*/ * @param in The stream to read from * @throws IOException */ private void copyBlock(DataInputStream in) throws IOException { // Read in the header long blockId = in.readLong(); // read block id Block block = new Block(blockId, 0, in.readLong()); Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>(); accessToken.readFields(in); if (datanode.isBlockTokenEnabled) { try { datanode.blockTokenSecretManager.checkAccess(accessToken, null, block, BlockTokenSecretManager.AccessMode.COPY); } catch (InvalidToken e) { LOG.warn("Invalid access token in request from " + remoteAddress + " for OP_COPY_BLOCK for block " + block); sendResponse(s, (short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN, datanode.socketWriteTimeout); return; } } if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start LOG.info("Not able to copy block " + blockId + " to " + s.getRemoteSocketAddress() + " because threads quota is exceeded."); sendResponse(s, (short) DataTransferProtocol.OP_STATUS_ERROR, datanode.socketWriteTimeout); return; } BlockSender blockSender = null; DataOutputStream reply = null; boolean isOpSuccess = true; try { // check if the block exists or not blockSender = new BlockSender(block, 0, -1, false, false, false, datanode); // set up response stream OutputStream baseStream = NetUtils.getOutputStream(s, datanode.socketWriteTimeout); reply = new DataOutputStream(new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE)); // send status first reply.writeShort((short) DataTransferProtocol.OP_STATUS_SUCCESS); // send block content to the target long read = blockSender.sendBlock(reply, baseStream, dataXceiverServer.balanceThrottler); datanode.myMetrics.incrBytesRead((int) read); datanode.myMetrics.incrBlocksRead(); LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress()); } catch (IOException ioe) { isOpSuccess = false; throw ioe; } finally { dataXceiverServer.balanceThrottler.release(); if (isOpSuccess) { try { // send one last byte to indicate that the resource is cleaned. reply.writeChar('d'); } catch (IOException ignored) { } } IOUtils.closeStream(reply); IOUtils.closeStream(blockSender); } }
From source file:org.apache.mele.embedded.HadoopQueueEmbedded.java
private boolean ackCheck() throws IOException { LOG.info("Starting ack check"); BitSet bitSet = new BitSet(); FileSystem fileSystem = null; try {//from w w w .j a va2s . c o m _ackLock.lock(); _ackOutputStream.close(); fileSystem = newFileSystem(_file); FileStatus fileStatus = fileSystem.getFileStatus(_file); long dataLength = fileStatus.getLen(); long totalAckLength = getTotalAckLength(fileSystem); if (!couldContainAllAcks(totalAckLength)) { LOG.info("Existing early [" + totalAckLength + "] because [" + totalAckLength % 12 + "]"); return false; } for (Path ackFile : _ackFiles) { LOG.info("Starting ack check for file [" + ackFile + "]"); DFSInputStream inputStream = null; try { inputStream = getDFS(fileSystem.open(ackFile)); long length = inputStream.getFileLength(); DataInputStream dataInputStream = new DataInputStream(inputStream); while (length > 0) { int pos = (int) dataInputStream.readLong(); // @TODO check position // 4 bytes for storing the length of the message int len = dataInputStream.readInt() + 4; bitSet.set(pos, pos + len); length -= 12; } if (bitSet.cardinality() == dataLength) { return true; } } finally { if (inputStream != null) { inputStream.close(); } } } return false; } finally { reopenAckFile(fileSystem); _ackLock.unlock(); if (fileSystem != null) { fileSystem.close(); } } }
From source file:com.adito.notification.Notifier.java
void loadFromDisk() throws IOException { File[] f = queueDirectory.listFiles(new FileFilter() { public boolean accept(File f) { return f.getName().endsWith(".msg"); }/*from w w w .j a va 2 s . co m*/ }); // TODO better error handling in parsing of message files. Report on // non-existant / unreadable directory if (f == null) { throw new IOException("Could not list queue directory " + queueDirectory.getAbsolutePath()); } for (int i = 0; i < f.length; i++) { FileInputStream fin = new FileInputStream(f[i]); try { DataInputStream din = new DataInputStream(fin); long id = din.readLong(); String sinkName = din.readUTF(); messageId = Math.max(id, messageId); boolean urgent = din.readBoolean(); String subject = din.readUTF(); List<Recipient> recipientList = new ArrayList<Recipient>(); while (true) { int recipientType = din.readInt(); if (recipientType == Recipient.EOF) { break; } else { String recipientAlias = din.readUTF(); String realmName = din.readUTF(); Recipient recipient = new Recipient(recipientType, recipientAlias, realmName); recipientList.add(recipient); } } Properties parameters = new Properties(); while (true) { int parameterType = din.readInt(); if (parameterType < 1) { break; } else { String key = din.readUTF(); String val = din.readUTF(); parameters.setProperty(key, val); } } String content = din.readUTF(); String lastMessage = din.readUTF(); Message msg = new Message(subject, content, urgent); msg.setId(id); msg.setRecipients(recipientList); msg.setSinkName(sinkName); msg.setLastMessage(lastMessage); queue(msg); } finally { fin.close(); } } }
From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java
/** * Read a block from the disk./*from w ww.ja va 2 s .c om*/ * @param in The stream to read from * @throws IOException */ private void readBlock(DataInputStream in) throws IOException { // // Read in the header // long blockId = in.readLong(); Block block = new Block(blockId, 0, in.readLong()); long startOffset = in.readLong(); long length = in.readLong(); String clientName = Text.readString(in); Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>(); accessToken.readFields(in); OutputStream baseStream = NetUtils.getOutputStream(s, datanode.socketWriteTimeout); DataOutputStream out = new DataOutputStream(new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE)); if (datanode.isBlockTokenEnabled) { try { datanode.blockTokenSecretManager.checkAccess(accessToken, null, block, BlockTokenSecretManager.AccessMode.READ); } catch (InvalidToken e) { try { out.writeShort(DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN); out.flush(); throw new IOException("Access token verification failed, for client " + remoteAddress + " for OP_READ_BLOCK for block " + block); } finally { IOUtils.closeStream(out); } } } // send the block BlockSender blockSender = null; final String clientTraceFmt = clientName.length() > 0 && ClientTraceLog.isInfoEnabled() ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "%d", "HDFS_READ", clientName, "%d", datanode.dnRegistration.getStorageID(), block, "%d") : datanode.dnRegistration + " Served block " + block + " to " + s.getInetAddress(); try { try { blockSender = new BlockSender(block, startOffset, length, true, true, false, datanode, clientTraceFmt); } catch (IOException e) { out.writeShort(DataTransferProtocol.OP_STATUS_ERROR); throw e; } out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); // send op status long read = blockSender.sendBlock(out, baseStream, null); // send data if (blockSender.isBlockReadFully()) { // See if client verification succeeded. // This is an optional response from client. try { if (in.readShort() == DataTransferProtocol.OP_STATUS_CHECKSUM_OK && datanode.blockScanner != null) { datanode.blockScanner.verifiedByClient(block); } } catch (IOException ignored) { } } datanode.myMetrics.incrBytesRead((int) read); datanode.myMetrics.incrBlocksRead(); } catch (SocketException ignored) { // Its ok for remote side to close the connection anytime. datanode.myMetrics.incrBlocksRead(); } catch (IOException ioe) { /* What exactly should we do here? * Earlier version shutdown() datanode if there is disk error. */ LOG.warn(datanode.dnRegistration + ":Got exception while serving " + block + " to " + s.getInetAddress() + ":\n" + StringUtils.stringifyException(ioe)); throw ioe; } finally { IOUtils.closeStream(out); IOUtils.closeStream(blockSender); } }
From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java
/** * Receive a block and write it to disk, it then notifies the namenode to * remove the copy from the source.// ww w . j a v a 2 s . c om * * @param in The stream to read from * @throws IOException */ private void replaceBlock(DataInputStream in) throws IOException { /* read header */ long blockId = in.readLong(); Block block = new Block(blockId, dataXceiverServer.estimateBlockSize, in.readLong()); // block id & generation stamp String sourceID = Text.readString(in); // read del hint DatanodeInfo proxySource = new DatanodeInfo(); // read proxy source proxySource.readFields(in); Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>(); accessToken.readFields(in); if (datanode.isBlockTokenEnabled) { try { datanode.blockTokenSecretManager.checkAccess(accessToken, null, block, BlockTokenSecretManager.AccessMode.REPLACE); } catch (InvalidToken e) { LOG.warn("Invalid access token in request from " + remoteAddress + " for OP_REPLACE_BLOCK for block " + block); sendResponse(s, (short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN, datanode.socketWriteTimeout); return; } } if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start LOG.warn("Not able to receive block " + blockId + " from " + s.getRemoteSocketAddress() + " because threads quota is exceeded."); sendResponse(s, (short) DataTransferProtocol.OP_STATUS_ERROR, datanode.socketWriteTimeout); return; } Socket proxySock = null; DataOutputStream proxyOut = null; short opStatus = DataTransferProtocol.OP_STATUS_SUCCESS; BlockReceiver blockReceiver = null; DataInputStream proxyReply = null; try { // get the output stream to the proxy InetSocketAddress proxyAddr = NetUtils.createSocketAddr(proxySource.getName()); proxySock = datanode.newSocket(); NetUtils.connect(proxySock, proxyAddr, datanode.socketTimeout); proxySock.setSoTimeout(datanode.socketTimeout); OutputStream baseStream = NetUtils.getOutputStream(proxySock, datanode.socketWriteTimeout); proxyOut = new DataOutputStream(new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE)); /* send request to the proxy */ proxyOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); // transfer version proxyOut.writeByte(DataTransferProtocol.OP_COPY_BLOCK); // op code proxyOut.writeLong(block.getBlockId()); // block id proxyOut.writeLong(block.getGenerationStamp()); // block id accessToken.write(proxyOut); proxyOut.flush(); // receive the response from the proxy proxyReply = new DataInputStream( new BufferedInputStream(NetUtils.getInputStream(proxySock), BUFFER_SIZE)); short status = proxyReply.readShort(); if (status != DataTransferProtocol.OP_STATUS_SUCCESS) { if (status == DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN) { throw new IOException("Copy block " + block + " from " + proxySock.getRemoteSocketAddress() + " failed due to access token error"); } throw new IOException( "Copy block " + block + " from " + proxySock.getRemoteSocketAddress() + " failed"); } // open a block receiver and check if the block does not exist blockReceiver = new BlockReceiver(block, proxyReply, proxySock.getRemoteSocketAddress().toString(), proxySock.getLocalSocketAddress().toString(), false, "", null, datanode); // receive a block blockReceiver.receiveBlock(null, null, null, null, dataXceiverServer.balanceThrottler, -1); // notify name node datanode.notifyNamenodeReceivedBlock(block, sourceID); LOG.info("Moved block " + block + " from " + s.getRemoteSocketAddress()); } catch (IOException ioe) { opStatus = DataTransferProtocol.OP_STATUS_ERROR; throw ioe; } finally { // receive the last byte that indicates the proxy released its thread resource if (opStatus == DataTransferProtocol.OP_STATUS_SUCCESS) { try { proxyReply.readChar(); } catch (IOException ignored) { } } // now release the thread resource dataXceiverServer.balanceThrottler.release(); // send response back try { sendResponse(s, opStatus, datanode.socketWriteTimeout); } catch (IOException ioe) { LOG.warn("Error writing reply back to " + s.getRemoteSocketAddress()); } IOUtils.closeStream(proxyOut); IOUtils.closeStream(blockReceiver); IOUtils.closeStream(proxyReply); } }
From source file:de.hybris.platform.cuppytrail.impl.DefaultSecureTokenService.java
@Override public SecureToken decryptData(final String token) { if (token == null || StringUtils.isBlank(token)) { throw new IllegalArgumentException("missing token"); }//from w w w . j a v a2s. c om try { final byte[] decryptedBytes = decrypt(token, encryptionKeyBytes); // Last 16 bytes are the MD5 signature final int decryptedBytesDataLength = decryptedBytes.length - MD5_LENGTH; if (!validateSignature(decryptedBytes, 0, decryptedBytesDataLength, decryptedBytes, decryptedBytesDataLength, signatureKeyBytes)) { throw new IllegalArgumentException("Invalid signature in cookie"); } final ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(decryptedBytes, 0, decryptedBytesDataLength); final DataInputStream dataInputStream = new DataInputStream(byteArrayInputStream); skipPadding(dataInputStream); final String userIdentifier = dataInputStream.readUTF(); final String userChecksum = dataInputStream.readUTF(); if (userChecksum == null || !userChecksum.equals(createChecksum(userIdentifier))) { throw new IllegalArgumentException("invalid token"); } final long timeStampInSeconds = dataInputStream.readLong(); return new SecureToken(userIdentifier, timeStampInSeconds); } catch (final IOException e) { LOG.error("Could not decrypt token", e); throw new SystemException(e.toString(), e); } catch (final GeneralSecurityException e) { LOG.warn("Could not decrypt token: " + e.toString()); throw new IllegalArgumentException("Invalid token", e); } }