List of usage examples for java.nio.channels ReadableByteChannel read
public int read(ByteBuffer dst) throws IOException;
From source file:org.commoncrawl.util.StreamingArcFileReader.java
public void testReader(File arcFileItem) throws Exception { resetState();/*from ww w .j ava 2 s . co m*/ Thread thread = new Thread(new Runnable() { public void run() { try { TriStateResult result; while ((result = hasMoreItems()) != TriStateResult.NoMoreItems) { if (result == TriStateResult.MoreItems) { ArcFileItem item = null; while ((item = getNextItem()) == null) { LOG.info("Waiting to Read Next Item..."); try { Thread.sleep(1000); } catch (InterruptedException e) { } } LOG.info("GOT Item URL:" + item.getUri() + " OFFSET:" + item.getArcFilePos() + " ContentSize:" + item.getContent().getCount()); for (ArcFileHeaderItem headerItem : item.getHeaderItems()) { if (headerItem.isFieldDirty(ArcFileHeaderItem.Field_ITEMKEY)) { //LOG.info("Header Item:" + headerItem.getItemKey() + " :" + headerItem.getItemValue()); } else { //LOG.info("Header Item:" + headerItem.getItemValue()); } } //LOG.info("Content Length:" + item.getContent().getCount()); } else { // LOG.info("Has More Items Returned Need More Data. Sleeping"); try { Thread.sleep(1000); } catch (InterruptedException e) { } } } LOG.info("NO MORE ITEMS... BYE"); } catch (IOException e) { LOG.error(StringUtils.stringifyException(e)); } } }); // run the thread ... thread.start(); ReadableByteChannel channel = Channels.newChannel(new FileInputStream(arcFileItem)); try { for (;;) { ByteBuffer buffer = ByteBuffer.allocate(BLOCK_SIZE); int bytesRead = channel.read(buffer); LOG.info("Read " + bytesRead + " From File"); if (bytesRead == -1) { finished(); break; } else { buffer.flip(); available(buffer); } } } finally { channel.close(); } // now wait for thread to die ... LOG.info("Done Reading File.... Waiting for ArcFileThread to DIE"); thread.join(); LOG.info("Done Reading File.... ArcFileThread to DIED"); }
From source file:com.github.hrpc.rpc.Server.java
/** * Helper for {@link #channelRead(ReadableByteChannel, ByteBuffer)} * and {@link #channelWrite(WritableByteChannel, ByteBuffer)}. Only * one of readCh or writeCh should be non-null. * * @see #channelRead(ReadableByteChannel, ByteBuffer) * @see #channelWrite(WritableByteChannel, ByteBuffer) *//*from w w w . j a v a 2 s . c o m*/ private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh, ByteBuffer buf) throws IOException { int originalLimit = buf.limit(); int initialRemaining = buf.remaining(); int ret = 0; while (buf.remaining() > 0) { try { int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT); buf.limit(buf.position() + ioSize); ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf); if (ret < ioSize) { break; } } finally { buf.limit(originalLimit); } } int nBytes = initialRemaining - buf.remaining(); return (nBytes > 0) ? nBytes : ret; }
From source file:com.healthmarketscience.jackcess.Database.java
/** * Copies the given InputStream to the given channel using the most * efficient means possible.//from www . j a v a 2 s. com */ private static void transferFrom(FileChannel channel, InputStream in) throws IOException { ReadableByteChannel readChannel = Channels.newChannel(in); if (!BROKEN_NIO) { // sane implementation channel.transferFrom(readChannel, 0, MAX_EMPTYDB_SIZE); } else { // do things the hard way for broken vms ByteBuffer bb = ByteBuffer.allocate(8096); while (readChannel.read(bb) >= 0) { bb.flip(); channel.write(bb); bb.clear(); } } }
From source file:ipc.Server.java
/** * This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. * This is to avoid jdk from creating many direct buffers as the size of * ByteBuffer increases. There should not be any performance degredation. * /*w w w. j a va 2 s .c o m*/ * @see ReadableByteChannel#read(ByteBuffer) */ private int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) : channelIO(channel, null, buffer); return count; }
From source file:com.tomagoyaky.jdwp.IOUtils.java
/** * Reads bytes from a ReadableByteChannel. * <p/>// w ww . j av a 2 s .c o m * This implementation guarantees that it will read as many bytes * as possible before giving up; this may not always be the case for * subclasses of {@link ReadableByteChannel}. * * @param input the byte channel to read * @param buffer byte buffer destination * @return the actual length read; may be less than requested if EOF was reached * @throws IOException if a read error occurs * @since 2.2 */ public static int read(final ReadableByteChannel input, final ByteBuffer buffer) throws IOException { final int length = buffer.remaining(); while (buffer.remaining() > 0) { final int count = input.read(buffer); if (EOF == count) { // EOF break; } } return length - buffer.remaining(); }
From source file:com.hortonworks.hbase.replication.bridge.HBaseServer.java
/** * This is a wrapper around {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. * This is to avoid jdk from creating many direct buffers as the size of * ByteBuffer increases. There should not be any performance degredation. * * @param channel writable byte channel to write on * @param buffer buffer to write//from www.j av a 2s .c o m * @return number of bytes written * @throws java.io.IOException e * @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer) */ protected int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) : channelIO(channel, null, buffer); if (count > 0) { rpcMetrics.receivedBytes.inc(count); } return count; }
From source file:com.github.hrpc.rpc.Server.java
/** * This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. * This is to avoid jdk from creating many direct buffers as the size of * ByteBuffer increases. There should not be any performance degredation. * * @see ReadableByteChannel#read(ByteBuffer) *///from ww w . j a v a2 s. c o m private int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) : channelIO(channel, null, buffer); if (count > 0) { rpcMetrics.incrReceivedBytes(count); } return count; }
From source file:com.tomagoyaky.jdwp.IOUtils.java
/** * Skips bytes from a ReadableByteChannel. * This implementation guarantees that it will read as many bytes * as possible before giving up.//from w ww . ja v a2s . c o m * * @param input ReadableByteChannel to skip * @param toSkip number of bytes to skip. * @return number of bytes actually skipped. * @throws IOException if there is a problem reading the ReadableByteChannel * @throws IllegalArgumentException if toSkip is negative * @since 2.2 */ public static long skip(final ReadableByteChannel input, final long toSkip) throws IOException { if (toSkip < 0) { throw new IllegalArgumentException("Skip count must be non-negative, actual: " + toSkip); } final ByteBuffer skipByteBuffer = ByteBuffer.allocate((int) Math.min(toSkip, SKIP_BUFFER_SIZE)); long remain = toSkip; while (remain > 0) { skipByteBuffer.position(0); skipByteBuffer.limit((int) Math.min(remain, SKIP_BUFFER_SIZE)); final int n = input.read(skipByteBuffer); if (n == EOF) { break; } remain -= n; } return toSkip - remain; }
From source file:org.apache.hadoop.hbase.ipc.RpcServer.java
/** * This is a wrapper around {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}. * If the amount of data is large, it writes to channel in smaller chunks. * This is to avoid jdk from creating many direct buffers as the size of * ByteBuffer increases. There should not be any performance degredation. * * @param channel writable byte channel to write on * @param buffer buffer to write/* ww w . j a va2 s . c o m*/ * @return number of bytes written * @throws java.io.IOException e * @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer) */ protected int channelRead(ReadableByteChannel channel, ByteBuffer buffer) throws IOException { int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.read(buffer) : channelIO(channel, null, buffer); if (count > 0) { metrics.receivedBytes(count); } return count; }
From source file:org.apache.nifi.remote.util.SiteToSiteRestApiClient.java
public void openConnectionForSend(final String transactionUrl, final Peer peer) throws IOException { final CommunicationsSession commSession = peer.getCommunicationsSession(); final String flowFilesPath = transactionUrl + "/flow-files"; final HttpPost post = createPost(flowFilesPath); // Set uri so that it'll be used as transit uri. ((HttpCommunicationsSession) peer.getCommunicationsSession()).setDataTransferUrl(post.getURI().toString()); post.setHeader("Content-Type", "application/octet-stream"); post.setHeader("Accept", "text/plain"); post.setHeader(HttpHeaders.PROTOCOL_VERSION, String.valueOf(transportProtocolVersionNegotiator.getVersion())); setHandshakeProperties(post);/*from ww w . j av a2 s .c om*/ final CountDownLatch initConnectionLatch = new CountDownLatch(1); final URI requestUri = post.getURI(); final PipedOutputStream outputStream = new PipedOutputStream(); final PipedInputStream inputStream = new PipedInputStream(outputStream, DATA_PACKET_CHANNEL_READ_BUFFER_SIZE); final ReadableByteChannel dataPacketChannel = Channels.newChannel(inputStream); final HttpAsyncRequestProducer asyncRequestProducer = new HttpAsyncRequestProducer() { private final ByteBuffer buffer = ByteBuffer.allocate(DATA_PACKET_CHANNEL_READ_BUFFER_SIZE); private int totalRead = 0; private int totalProduced = 0; private boolean requestHasBeenReset = false; @Override public HttpHost getTarget() { return URIUtils.extractHost(requestUri); } @Override public HttpRequest generateRequest() throws IOException, HttpException { // Pass the output stream so that Site-to-Site client thread can send // data packet through this connection. logger.debug("sending data to {} has started...", flowFilesPath); ((HttpOutput) commSession.getOutput()).setOutputStream(outputStream); initConnectionLatch.countDown(); final BasicHttpEntity entity = new BasicHttpEntity(); entity.setChunked(true); entity.setContentType("application/octet-stream"); post.setEntity(entity); return post; } private final AtomicBoolean bufferHasRemainingData = new AtomicBoolean(false); /** * If the proxy server requires authentication, the same POST request has to be sent again. * The first request will result 407, then the next one will be sent with auth headers and actual data. * This method produces a content only when it's need to be sent, to avoid producing the flow-file contents twice. * Whether we need to wait auth is determined heuristically by the previous POST request which creates transaction. * See {@link SiteToSiteRestApiClient#initiateTransactionForSend(HttpPost)} for further detail. */ @Override public void produceContent(final ContentEncoder encoder, final IOControl ioControl) throws IOException { if (shouldCheckProxyAuth() && proxyAuthRequiresResend.get() && !requestHasBeenReset) { logger.debug("Need authentication with proxy server. Postpone producing content."); encoder.complete(); return; } if (bufferHasRemainingData.get()) { // If there's remaining buffer last time, send it first. writeBuffer(encoder); if (bufferHasRemainingData.get()) { return; } } int read; // This read() blocks until data becomes available, // or corresponding outputStream is closed. if ((read = dataPacketChannel.read(buffer)) > -1) { logger.trace("Read {} bytes from dataPacketChannel. {}", read, flowFilesPath); totalRead += read; buffer.flip(); writeBuffer(encoder); } else { final long totalWritten = commSession.getOutput().getBytesWritten(); logger.debug( "sending data to {} has reached to its end. produced {} bytes by reading {} bytes from channel. {} bytes written in this transaction.", flowFilesPath, totalProduced, totalRead, totalWritten); if (totalRead != totalWritten || totalProduced != totalWritten) { final String msg = "Sending data to %s has reached to its end, but produced : read : wrote byte sizes (%d : %d : %d) were not equal. Something went wrong."; throw new RuntimeException( String.format(msg, flowFilesPath, totalProduced, totalRead, totalWritten)); } transferDataLatch.countDown(); encoder.complete(); dataPacketChannel.close(); } } private void writeBuffer(ContentEncoder encoder) throws IOException { while (buffer.hasRemaining()) { final int written = encoder.write(buffer); logger.trace("written {} bytes to encoder.", written); if (written == 0) { logger.trace("Buffer still has remaining. {}", buffer); bufferHasRemainingData.set(true); return; } totalProduced += written; } bufferHasRemainingData.set(false); buffer.clear(); } @Override public void requestCompleted(final HttpContext context) { logger.debug("Sending data to {} completed.", flowFilesPath); debugProxyAuthState(context); } @Override public void failed(final Exception ex) { final String msg = String.format("Failed to send data to %s due to %s", flowFilesPath, ex.toString()); logger.error(msg, ex); eventReporter.reportEvent(Severity.WARNING, EVENT_CATEGORY, msg); } @Override public boolean isRepeatable() { // In order to pass authentication, request has to be repeatable. return true; } @Override public void resetRequest() throws IOException { logger.debug("Sending data request to {} has been reset...", flowFilesPath); requestHasBeenReset = true; } @Override public void close() throws IOException { logger.debug("Closing sending data request to {}", flowFilesPath); closeSilently(outputStream); closeSilently(dataPacketChannel); stopExtendingTtl(); } }; postResult = getHttpAsyncClient().execute(asyncRequestProducer, new BasicAsyncResponseConsumer(), null); try { // Need to wait the post request actually started so that we can write to its output stream. if (!initConnectionLatch.await(connectTimeoutMillis, TimeUnit.MILLISECONDS)) { throw new IOException("Awaiting initConnectionLatch has been timeout."); } // Started. transferDataLatch = new CountDownLatch(1); startExtendingTtl(transactionUrl, dataPacketChannel, null); } catch (final InterruptedException e) { throw new IOException("Awaiting initConnectionLatch has been interrupted.", e); } }