List of usage examples for java.nio ByteBuffer remaining
public final int remaining()
From source file:com.facebook.infrastructure.net.TcpConnection.java
public void write(Message message) throws IOException { byte[] data = serializer_.serialize(message); if (data.length <= 0) { return;// ww w .java 2 s . com } boolean listening = !message.getFrom().equals(EndPoint.randomLocalEndPoint_); ByteBuffer buffer = MessagingService.packIt(data, false, false, listening); synchronized (this) { if (!pendingWrites_.isEmpty() || !connected_.get()) { pendingWrites_.add(buffer); return; } logger_.trace("Sending packet of size " + data.length); socketChannel_.write(buffer); if (buffer.remaining() > 0) { pendingWrites_.add(buffer); if ((key_.interestOps() & SelectionKey.OP_WRITE) == 0) { SelectorManager.getSelectorManager().modifyKeyForWrite(key_); } } } }
From source file:org.archive.modules.recrawl.wbm.WbmPersistLoadProcessor.java
protected HashMap<String, Object> getLastCrawl(InputStream is) throws IOException { // read CDX lines, save most recent (at the end) hash. ByteBuffer buffer = ByteBuffer.allocate(32); ByteBuffer tsbuffer = ByteBuffer.allocate(14); int field = 0; int c;/* w w w. ja v a 2s.c o m*/ do { c = is.read(); if (field == 1) { // 14-digits timestamp tsbuffer.clear(); while (Character.isDigit(c) && tsbuffer.remaining() > 0) { tsbuffer.put((byte) c); c = is.read(); } if (c != ' ' || tsbuffer.position() != 14) { tsbuffer.clear(); } // fall through to skip the rest } else if (field == 5) { buffer.clear(); while ((c >= 'A' && c <= 'Z' || c >= '0' && c <= '9') && buffer.remaining() > 0) { buffer.put((byte) c); c = is.read(); } if (c != ' ' || buffer.position() != 32) { buffer.clear(); } // fall through to skip the rest } while (true) { if (c == -1) { break; } else if (c == '\n') { field = 0; break; } else if (c == ' ') { field++; break; } c = is.read(); } } while (c != -1); HashMap<String, Object> info = new HashMap<String, Object>(); if (buffer.remaining() == 0) { info.put(RecrawlAttributeConstants.A_CONTENT_DIGEST, contentDigestScheme + new String(buffer.array())); } if (tsbuffer.remaining() == 0) { try { long ts = DateUtils.parse14DigitDate(new String(tsbuffer.array())).getTime(); // A_TIMESTAMP has been used for sorting history long before A_FETCH_BEGAN_TIME // field was introduced. Now FetchHistoryProcessor fails if A_FETCH_BEGAN_TIME is // not set. We could stop storing A_TIMESTAMP and sort by A_FETCH_BEGAN_TIME. info.put(FetchHistoryHelper.A_TIMESTAMP, ts); info.put(CoreAttributeConstants.A_FETCH_BEGAN_TIME, ts); } catch (ParseException ex) { } } return info.isEmpty() ? null : info; }
From source file:edu.umass.cs.nio.MessageExtractor.java
private void demultiplexMessage(NIOHeader header, ByteBuffer incoming) throws IOException { boolean extracted = false; byte[] msg = null; // synchronized (this.packetDemuxes) {/* w ww.j a v a 2s . c o m*/ for (final AbstractPacketDemultiplexer<?> pd : this.packetDemuxes) { if (pd instanceof PacketDemultiplexerDefault // if congested, don't process || pd.isCongested(header)) continue; if (!extracted) { // extract at most once msg = new byte[incoming.remaining()]; incoming.get(msg); extracted = true; NIOInstrumenter.incrBytesRcvd(msg.length + 8); } // String message = (new String(msg, // MessageNIOTransport.NIO_CHARSET_ENCODING)); if (this.callDemultiplexerHandler(header, msg, pd)) return; } } }
From source file:org.apache.hadoop.hbase.ipc.ServerRpcConnection.java
protected final boolean processPreamble(ByteBuffer preambleBuffer) throws IOException { assert preambleBuffer.remaining() == 6; for (int i = 0; i < RPC_HEADER.length; i++) { if (RPC_HEADER[i] != preambleBuffer.get()) { doBadPreambleHandling(// w w w . ja v a 2 s . com "Expected HEADER=" + Bytes.toStringBinary(RPC_HEADER) + " but received HEADER=" + Bytes.toStringBinary(preambleBuffer.array(), 0, RPC_HEADER.length) + " from " + toString()); return false; } } int version = preambleBuffer.get() & 0xFF; byte authbyte = preambleBuffer.get(); this.authMethod = AuthMethod.valueOf(authbyte); if (version != SimpleRpcServer.CURRENT_VERSION) { String msg = getFatalConnectionString(version, authbyte); doBadPreambleHandling(msg, new WrongVersionException(msg)); return false; } if (authMethod == null) { String msg = getFatalConnectionString(version, authbyte); doBadPreambleHandling(msg, new BadAuthException(msg)); return false; } if (this.rpcServer.isSecurityEnabled && authMethod == AuthMethod.SIMPLE) { if (this.rpcServer.allowFallbackToSimpleAuth) { this.rpcServer.metrics.authenticationFallback(); authenticatedWithFallback = true; } else { AccessDeniedException ae = new AccessDeniedException("Authentication is required"); doRespond(getErrorResponse(ae.getMessage(), ae)); return false; } } if (!this.rpcServer.isSecurityEnabled && authMethod != AuthMethod.SIMPLE) { doRawSaslReply(SaslStatus.SUCCESS, new IntWritable(SaslUtil.SWITCH_TO_SIMPLE_AUTH), null, null); authMethod = AuthMethod.SIMPLE; // client has already sent the initial Sasl message and we // should ignore it. Both client and server should fall back // to simple auth from now on. skipInitialSaslHandshake = true; } if (authMethod != AuthMethod.SIMPLE) { useSasl = true; } return true; }
From source file:com.taobao.metamorphosis.server.transaction.store.JournalTransactionStore.java
@Override public void commit(final TransactionId txid, final boolean wasPrepared) throws IOException { final Tx tx;//from www .ja v a 2 s. co m if (wasPrepared) { synchronized (this.preparedTransactions) { tx = this.preparedTransactions.remove(txid); } } else { synchronized (this.inflightTransactions) { tx = this.inflightTransactions.remove(txid); } } if (tx == null) { return; } // Append messages final Map<MessageStore, List<Long>> msgIds = tx.getMsgIds(); final Map<MessageStore, List<PutCommand>> putCommands = tx.getPutCommands(); final Map<String, AddMsgLocation> locations = new LinkedHashMap<String, JournalTransactionStore.AddMsgLocation>(); final int count = msgIds.size(); for (final Map.Entry<MessageStore, List<Long>> entry : msgIds.entrySet()) { final MessageStore msgStore = entry.getKey(); final List<Long> ids = entry.getValue(); final List<PutCommand> cmds = putCommands.get(msgStore); // Append message msgStore.append(ids, cmds, new AppendCallback() { @Override public void appendComplete(final Location location) { // Calculate checksum final int checkSum = CheckSum.crc32(MessageUtils.makeMessageBuffer(ids, cmds).array()); final String description = msgStore.getDescription(); // Store append location synchronized (locations) { locations.put(description, new AddMsgLocation(location.getOffset(), location.getLength(), checkSum, description)); // if (locations.size() == count) { // tx // commandreplay final ByteBuffer localtionBytes = AddMsgLocationUtils.encodeLocation(locations); TxCommand msg = null; // Log transaction final int attachmentLen = localtionBytes.remaining(); if (txid.isXATransaction()) { final TransactionOperation to = TransactionOperation.newBuilder() // .setType(TransactionType.XA_COMMIT) // .setTransactionId(txid.getTransactionKey()) // .setWasPrepared(wasPrepared) // .setDataLength(attachmentLen) // .build(); msg = TxCommand.newBuilder().setCmdType(TxCommandType.TX_OP) .setCmdContent(to.toByteString()).build(); } else { final TransactionOperation to = TransactionOperation.newBuilder() // .setType(TransactionType.LOCAL_COMMIT) // .setTransactionId(txid.getTransactionKey()) // .setWasPrepared(wasPrepared) // .setDataLength(attachmentLen)// .build(); msg = TxCommand.newBuilder().setCmdType(TxCommandType.TX_OP) .setCmdContent(to.toByteString()).build(); } // commit try { JournalTransactionStore.this.journalStore.write(msg, localtionBytes, tx.location, true); } catch (final IOException e) { throw new RuntimeException("Write tx log failed", e); } } } } }); } }
From source file:org.apache.hadoop.crypto.CryptoStreamsTestBase.java
@Test(timeout = 120000) public void testHasEnhancedByteBufferAccess() throws Exception { OutputStream out = getOutputStream(defaultBufferSize); writeData(out);/*ww w .j a v a 2 s .c o m*/ InputStream in = getInputStream(defaultBufferSize); final int len1 = dataLen / 8; // ByteBuffer size is len1 ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).read(getBufferPool(), len1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); int n1 = buffer.remaining(); byte[] readData = new byte[n1]; buffer.get(readData); byte[] expectedData = new byte[n1]; System.arraycopy(data, 0, expectedData, 0, n1); Assert.assertArrayEquals(readData, expectedData); ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer); // Read len1 bytes readData = new byte[len1]; readAll(in, readData, 0, len1); expectedData = new byte[len1]; System.arraycopy(data, n1, expectedData, 0, len1); Assert.assertArrayEquals(readData, expectedData); // ByteBuffer size is len1 buffer = ((HasEnhancedByteBufferAccess) in).read(getBufferPool(), len1, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); int n2 = buffer.remaining(); readData = new byte[n2]; buffer.get(readData); expectedData = new byte[n2]; System.arraycopy(data, n1 + len1, expectedData, 0, n2); Assert.assertArrayEquals(readData, expectedData); ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer); in.close(); }
From source file:org.apache.jackrabbit.oak.segment.file.TarReader.java
private static TarReader openFirstFileWithValidIndex(List<File> files, boolean memoryMapping) { for (File file : files) { String name = file.getName(); try {/*from w w w. j a v a 2s .c o m*/ RandomAccessFile access = new RandomAccessFile(file, "r"); try { ByteBuffer index = loadAndValidateIndex(access, name); if (index == null) { log.info("No index found in tar file {}, skipping...", name); } else { // found a file with a valid index, drop the others for (File other : files) { if (other != file) { log.info("Removing unused tar file {}", other.getName()); other.delete(); } } if (memoryMapping) { try { FileAccess mapped = new FileAccess.Mapped(access); // re-read the index, now with memory mapping int indexSize = index.remaining(); index = mapped.read(mapped.length() - indexSize - 16 - 1024, indexSize); return new TarReader(file, mapped, index); } catch (IOException e) { log.warn( "Failed to mmap tar file {}. Falling back to normal file " + "IO, which will negatively impact repository performance. " + "This problem may have been caused by restrictions on the " + "amount of virtual memory available to the JVM. Please make " + "sure that a 64-bit JVM is being used and that the process " + "has access to unlimited virtual memory (ulimit option -v).", name, e); } } FileAccess random = new FileAccess.Random(access); // prevent the finally block from closing the file // as the returned TarReader will take care of that access = null; return new TarReader(file, random, index); } } finally { if (access != null) { access.close(); } } } catch (IOException e) { log.warn("Could not read tar file {}, skipping...", name, e); } } return null; }
From source file:com.healthmarketscience.jackcess.impl.OleUtil.java
private static ContentImpl createSimplePackageContent(OleBlobImpl blob, String prettyName, String className, String typeName, ByteBuffer blobBb, int dataBlockLen) { int dataBlockPos = blobBb.position(); ByteBuffer bb = PageChannel.narrowBuffer(blobBb, dataBlockPos, dataBlockPos + dataBlockLen); int packageSig = bb.getShort(); if (packageSig != PACKAGE_STREAM_SIGNATURE) { return new OtherContentImpl(blob, prettyName, className, typeName, dataBlockPos, dataBlockLen); }/*from w w w . j a va2s.c om*/ String fileName = readZeroTermStr(bb); String filePath = readZeroTermStr(bb); int packageType = bb.getInt(); if (packageType == PS_EMBEDDED_FILE) { int localFilePathLen = bb.getInt(); String localFilePath = readStr(bb, bb.position(), localFilePathLen); int dataLen = bb.getInt(); int dataPos = bb.position(); bb.position(dataLen + dataPos); // remaining strings are in "reverse" order (local file path, file name, // file path). these string usee a real utf charset, and therefore can // "fix" problems with ascii based names (so we prefer these strings to // the original strings we found) int strNum = 0; while (true) { int rem = bb.remaining(); if (rem < 4) { break; } int strLen = bb.getInt(); String remStr = readStr(bb, bb.position(), strLen * 2, OLE_UTF_CHARSET); switch (strNum) { case 0: localFilePath = remStr; break; case 1: fileName = remStr; break; case 2: filePath = remStr; break; default: // ignore } ++strNum; } return new SimplePackageContentImpl(blob, prettyName, className, typeName, dataPos, dataLen, fileName, filePath, localFilePath); } if (packageType == PS_LINKED_FILE) { bb.getShort(); //unknown String linkStr = readZeroTermStr(bb); return new LinkContentImpl(blob, prettyName, className, typeName, fileName, linkStr, filePath); } return new OtherContentImpl(blob, prettyName, className, typeName, dataBlockPos, dataBlockLen); }
From source file:org.apache.hadoop.hdfs.RemoteBlockReader2.java
@Override public int read(ByteBuffer buf) throws IOException { if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) { try (TraceScope ignored = tracer.newScope("RemoteBlockReader2#readNextPacket(" + blockId + ")")) { readNextPacket();//w w w .jav a 2s. c o m } } if (curDataSlice.remaining() == 0) { // we're at EOF now return -1; } int nRead = Math.min(curDataSlice.remaining(), buf.remaining()); ByteBuffer writeSlice = curDataSlice.duplicate(); writeSlice.limit(writeSlice.position() + nRead); buf.put(writeSlice); curDataSlice.position(writeSlice.position()); return nRead; }
From source file:com.l2jfree.network.mmocore.ReadWriteThread.java
private void readPacket(SelectionKey key) { @SuppressWarnings("unchecked") T con = (T) key.attachment();// w w w.ja va 2 s . c o m ByteBuffer buf = con.getReadBuffer(); if (buf == null) { buf = getReadBuffer(); buf.clear(); } int readPackets = 0; int readBytes = 0; for (;;) { final int remainingFreeSpace = buf.remaining(); int result = -2; try { result = con.getReadableByteChannel().read(buf); } catch (IOException e) { //error handling goes bellow } switch (result) { case -2: // IOException { closeConnectionImpl(con, true); return; } case -1: // EOS { closeConnectionImpl(con, false); return; } default: { buf.flip(); // try to read as many packets as possible for (;;) { final int startPos = buf.position(); if (readPackets >= getMaxIncomingPacketsPerPass() || readBytes >= getMaxIncomingBytesPerPass()) break; if (!tryReadPacket2(con, buf)) break; readPackets++; readBytes += (buf.position() - startPos); } break; } } // stop reading, if we have reached a config limit if (readPackets >= getMaxIncomingPacketsPerPass() || readBytes >= getMaxIncomingBytesPerPass()) break; // if the buffer wasn't filled completely, we should stop trying as the input channel is empty if (remainingFreeSpace > result) break; // compact the buffer for reusing the remaining bytes if (buf.hasRemaining()) buf.compact(); else buf.clear(); } // check if there are some more bytes in buffer and allocate/compact to prevent content lose. if (buf.hasRemaining()) { if (buf == getReadBuffer()) { con.setReadBuffer(getPooledBuffer().put(getReadBuffer())); } else { buf.compact(); } } else { if (buf == getReadBuffer()) { // no additional buffers used } else { con.setReadBuffer(null); recycleBuffer(buf); } } }