List of usage examples for java.nio ByteBuffer flip
public final Buffer flip()
From source file:edu.hawaii.soest.kilonalu.tchain.TChainSource.java
/** * A method that executes the streaming of data from the source to the RBNB * server after all configuration of settings, connections to hosts, and * thread initiatizing occurs. This method contains the detailed code for * streaming the data and interpreting the stream. *//* w ww . j ava 2s .c o m*/ protected boolean execute() { logger.debug("TChainSource.execute() called."); // do not execute the stream if there is no connection if (!isConnected()) return false; boolean failed = false; SocketChannel socket = getSocketConnection(); // while data are being sent, read them into the buffer try { // create four byte placeholders used to evaluate up to a four-byte // window. The FIFO layout looks like: // ------------------------- // in ---> | One | Two |Three|Four | ---> out // ------------------------- byte byteOne = 0x00, // set initial placeholder values byteTwo = 0x00, byteThree = 0x00, byteFour = 0x00; // Create a buffer that will store the sample bytes as they are read ByteBuffer sampleBuffer = ByteBuffer.allocate(getBufferSize()); // create a byte buffer to store bytes from the TCP stream ByteBuffer buffer = ByteBuffer.allocateDirect(getBufferSize()); // add a channel of data that will be pushed to the server. // Each sample will be sent to the Data Turbine as an rbnb frame. ChannelMap rbnbChannelMap = new ChannelMap(); // while there are bytes to read from the socket ... while (socket.read(buffer) != -1 || buffer.position() > 0) { // prepare the buffer for reading buffer.flip(); // while there are unread bytes in the ByteBuffer while (buffer.hasRemaining()) { byteOne = buffer.get(); logger.debug("char: " + (char) byteOne + "\t" + "b1: " + new String(Hex.encodeHex((new byte[] { byteOne }))) + "\t" + "b2: " + new String(Hex.encodeHex((new byte[] { byteTwo }))) + "\t" + "b3: " + new String(Hex.encodeHex((new byte[] { byteThree }))) + "\t" + "b4: " + new String(Hex.encodeHex((new byte[] { byteFour }))) + "\t" + "sample pos: " + sampleBuffer.position() + "\t" + "sample rem: " + sampleBuffer.remaining() + "\t" + "sample cnt: " + sampleByteCount + "\t" + "buffer pos: " + buffer.position() + "\t" + "buffer rem: " + buffer.remaining() + "\t" + "state: " + state); // Use a State Machine to process the byte stream. // Start building an rbnb frame for the entire sample, first by // inserting a timestamp into the channelMap. This time is merely // the time of insert into the data turbine, not the time of // observations of the measurements. That time should be parsed out // of the sample in the Sink client code switch (state) { case 0: // sample line ending is '\r\n' (carraige return, newline) // note bytes are in reverse order in the FIFO window if (byteOne == this.firstDelimiterByte && byteTwo == this.secondDelimiterByte) { // we've found the end of a sample, move on state = 1; break; } else { break; } case 1: // read the rest of the bytes to the next EOL characters // sample line is terminated by record delimiter bytes (usually \r\n or \n) // note bytes are in reverse order in the FIFO window if (byteOne == this.firstDelimiterByte && byteTwo == this.secondDelimiterByte) { // rewind the sample to overwrite the line ending so we can add // in the timestamp (then add the line ending) sampleBuffer.position(sampleBuffer.position() - 1); --sampleByteCount; // add the delimiter to the end of the sample. byte[] delimiterAsBytes = getFieldDelimiter().getBytes("US-ASCII"); for (byte delim : delimiterAsBytes) { sampleBuffer.put(delim); sampleByteCount++; } // then add a timestamp to the end of the sample DATE_FORMAT.setTimeZone(TZ); byte[] sampleDateAsBytes = DATE_FORMAT.format(new Date()).getBytes("US-ASCII"); for (byte b : sampleDateAsBytes) { sampleBuffer.put(b); sampleByteCount++; } // add the last two bytes found (usually \r\n) to the sample buffer if (sampleBuffer.remaining() > 0) { sampleBuffer.put(byteOne); sampleByteCount++; sampleBuffer.put(byteTwo); sampleByteCount++; } else { sampleBuffer.compact(); sampleBuffer.put(byteOne); sampleByteCount++; sampleBuffer.put(byteTwo); sampleByteCount++; } // extract just the length of the sample bytes out of the // sample buffer, and place it in the channel map as a // byte array. Then, send it to the data turbine. byte[] sampleArray = new byte[sampleByteCount]; sampleBuffer.flip(); sampleBuffer.get(sampleArray); // send the sample to the data turbine rbnbChannelMap.PutTimeAuto("server"); String sampleString = new String(sampleArray, "US-ASCII"); int channelIndex = rbnbChannelMap.Add(getRBNBChannelName()); rbnbChannelMap.PutMime(channelIndex, "text/plain"); rbnbChannelMap.PutDataAsString(channelIndex, sampleString); getSource().Flush(rbnbChannelMap); logger.info("Sample: " + sampleString.substring(0, sampleString.length() - 2) + " sent data to the DataTurbine. "); byteOne = 0x00; byteTwo = 0x00; byteThree = 0x00; byteFour = 0x00; sampleBuffer.clear(); sampleByteCount = 0; rbnbChannelMap.Clear(); logger.debug("Cleared b1,b2,b3,b4. Cleared sampleBuffer. Cleared rbnbChannelMap."); //state = 0; } else { // not 0x0D20 // still in the middle of the sample, keep adding bytes sampleByteCount++; // add each byte found if (sampleBuffer.remaining() > 0) { sampleBuffer.put(byteOne); } else { sampleBuffer.compact(); logger.debug("Compacting sampleBuffer ..."); sampleBuffer.put(byteOne); } break; } // end if for 0x0D20 EOL } // end switch statement // shift the bytes in the FIFO window byteFour = byteThree; byteThree = byteTwo; byteTwo = byteOne; } //end while (more unread bytes) // prepare the buffer to read in more bytes from the stream buffer.compact(); } // end while (more socket bytes to read) socket.close(); } catch (IOException e) { // handle exceptions // In the event of an i/o exception, log the exception, and allow execute() // to return false, which will prompt a retry. failed = true; e.printStackTrace(); return !failed; } catch (SAPIException sapie) { // In the event of an RBNB communication exception, log the exception, // and allow execute() to return false, which will prompt a retry. failed = true; sapie.printStackTrace(); return !failed; } return !failed; }
From source file:org.alfresco.contentstore.ChecksumTest.java
@Test public void test11() throws IOException { try (InputStream in = getClass().getClassLoader().getResourceAsStream("marbles-uncompressed.tif"); InputStream in1 = getClass().getClassLoader().getResourceAsStream("marbles-uncompressed1.tif")) { ByteBuffer buf1 = ByteBuffer.allocate(8192); ByteBuffer buf2 = ByteBuffer.allocate(8192); ReadableByteChannel channel1 = Channels.newChannel(in); ReadableByteChannel channel2 = Channels.newChannel(in1); int numRead1 = -1; int numRead2 = -1; int total = 0; int same = 0; int i = 0; do {/*w w w . j ava2 s .c o m*/ total = 0; same = 0; numRead1 = channel1.read(buf1); numRead2 = channel2.read(buf2); i += 8192; // buf1.clear(); // buf2.clear(); // // numRead1 = channel1.read(buf1); // numRead2 = channel2.read(buf2); buf1.flip(); buf2.flip(); if (numRead1 > 0 && numRead2 > 0) { if (numRead1 <= numRead2) { while (buf1.hasRemaining()) { total++; byte b1 = buf1.get(); byte b2 = buf2.get(); if (b1 == b2) { same++; } } } else { while (buf2.hasRemaining()) { total++; byte b1 = buf1.get(); byte b2 = buf2.get(); if (b1 == b2) { same++; } } } } buf1.clear(); buf2.clear(); } while (numRead1 > 0 && numRead2 > 0 && same < total); // while(numRead1 > 0 && numRead1 == numRead2); System.out.println(i + ", " + numRead1 + ", " + numRead2 + ", " + total + ", " + same + ", " + (double) same / total); } }
From source file:io.neba.core.logviewer.Tail.java
@Override public void run() { SeekableByteChannel channel = null; try {//w w w.j a v a 2s. com channel = newByteChannel(this.file.toPath(), READ); long availableInByte = this.file.length(); long startingFromInByte = max(availableInByte - this.bytesToTail, 0); channel.position(startingFromInByte); long position = startingFromInByte; long totalBytesRead = 0L; // Read up to this amount of data from the file at once. ByteBuffer readBuffer = allocate(4096); while (!this.stopped) { // The file might be temporarily gone during rotation. Wait, then decide // whether the file is considered gone permanently or whether a rotation has occurred. if (!this.file.exists()) { sleep(AWAIT_FILE_ROTATION_MILLIS); } if (!this.file.exists()) { this.remoteEndpoint.sendString("file not found"); return; } if (position > this.file.length()) { this.remoteEndpoint.sendString("file rotated"); position = 0; closeQuietly(channel); channel = newByteChannel(this.file.toPath(), READ); } int read = channel.read(readBuffer); if (read == -1) { if (mode == TAIL) { // EOF, we are done. return; } // If we are in follow mode, reaching the end of the file might signal a file rotation. Sleep and re-try. sleep(TAIL_CHECK_INTERVAL_MILLIS); continue; } totalBytesRead += read; position = channel.position(); readBuffer.flip(); this.remoteEndpoint.sendBytes(readBuffer); readBuffer.clear(); if (mode == TAIL && totalBytesRead >= this.bytesToTail) { return; } } } catch (IOException e) { this.logger.error("Unable to tail " + this.file.getAbsolutePath() + ".", e); } catch (InterruptedException e) { if (!this.stopped) { this.logger.error("Stopped tailing " + this.file.getAbsolutePath() + ", got interrupted.", e); } } finally { closeQuietly(channel); } }
From source file:org.apache.nifi.processor.util.listen.dispatcher.DatagramChannelDispatcher.java
@Override public void run() { final ByteBuffer buffer = bufferPool.poll(); while (!stopped) { try {//from w w w .j a v a2 s .c o m int selected = selector.select(); // if stopped the selector could already be closed which would result in a ClosedSelectorException if (selected > 0 && !stopped) { Iterator<SelectionKey> selectorKeys = selector.selectedKeys().iterator(); // if stopped we don't want to modify the keys because close() may still be in progress while (selectorKeys.hasNext() && !stopped) { SelectionKey key = selectorKeys.next(); selectorKeys.remove(); if (!key.isValid()) { continue; } DatagramChannel channel = (DatagramChannel) key.channel(); SocketAddress socketAddress; buffer.clear(); while (!stopped && (socketAddress = channel.receive(buffer)) != null) { String sender = ""; if (socketAddress instanceof InetSocketAddress) { sender = ((InetSocketAddress) socketAddress).getAddress().toString(); } // create a byte array from the buffer buffer.flip(); byte bytes[] = new byte[buffer.limit()]; buffer.get(bytes, 0, buffer.limit()); final Map<String, String> metadata = EventFactoryUtil.createMapWithSender(sender); final E event = eventFactory.create(bytes, metadata, null); events.offer(event); buffer.clear(); } } } } catch (InterruptedException e) { stopped = true; Thread.currentThread().interrupt(); } catch (IOException e) { logger.error("Error reading from DatagramChannel", e); } } if (buffer != null) { try { bufferPool.put(buffer); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } }
From source file:com.koda.integ.hbase.storage.FileExtStorage.java
@Override public void flush() throws IOException { //TODO this method flashes only internal buffer //and does not touch internal flusher queue LOG.info("Flushing internal buffer to the storage"); long start = System.currentTimeMillis(); writeLock.writeLock().lock();/*from w w w.j av a 2s.c o m*/ try { ByteBuffer buf = activeBuffer.get(); if (bufferOffset.get() == 0) { // skip flush LOG.info("Skipping flush"); return; } if (buf != null) { if (buf.position() != 0) buf.flip(); while (buf.hasRemaining()) { currentForWrite.getChannel().write(buf); } buf.clear(); bufferOffset.set(0); // we advance to next file; } else { LOG.warn("Active buffer is NULL"); } } catch (Exception e) { LOG.error(e); } finally { writeLock.writeLock().unlock(); // Close file currentForWrite.close(); } LOG.info("Flushing completed in " + (System.currentTimeMillis() - start) + "ms"); }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java
/** * Read external with codec./* ww w .j a v a 2s . c o m*/ * * @param blockName the block name * @return the cacheable * @throws IOException Signals that an I/O exception has occurred. */ @SuppressWarnings("unused") private Cacheable readExternalWithCodec(String blockName, boolean repeat, boolean caching) throws IOException { if (overflowExtEnabled == false) return null; // Check if we have already this block in external storage cache try { // We use 16 - byte hash for external storage cache byte[] hashed = Utils.hash128(blockName); StorageHandle handle = storage.newStorageHandle(); byte[] data = (byte[]) extStorageCache.get(hashed); if (data == null) { if (repeat == false) extRefStats.miss(caching); return null; } else { extRefStats.hit(caching); } // Initialize handle handle.fromBytes(data); ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer(); SerDe serde = extStorageCache.getSerDe(); Codec codec = extStorageCache.getCompressionCodec(); buffer.clear(); StorageHandle newHandle = storage.getData(handle, buffer); if (buffer.position() > 0) buffer.flip(); int size = buffer.getInt(); if (size == 0) { // BIGBASE-45 // Remove reference from reference cache // reference is in L3-RAM cache but no object in L3-DISK cache was found // remove only if handle is invalid if (storage.isValid(handle) == false) { extStorageCache.remove(hashed); } return null; } // Skip key int keySize = buffer.getInt(); buffer.position(8 + keySize); boolean inMemory = buffer.get() == (byte) 1; buffer.limit(size + 4); Cacheable obj = (Cacheable) serde.readCompressed(buffer/*, codec*/); offHeapCache.put(blockName, obj); if (newHandle.equals(handle) == false) { extStorageCache.put(hashed, newHandle.toBytes()); } return obj; } catch (Throwable e) { fatalExternalReads.incrementAndGet(); throw new IOException(e); } }
From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager.java
private ByteBuffer readFileToByteBuffer(File source) throws IOException { ByteBuffer buffer = ByteBuffer.allocate(128); FileChannel fileChannel = new FileInputStream(source).getChannel(); fileChannel.read(buffer);// w w w.j ava2s.c om fileChannel.close(); buffer.flip(); return buffer; }
From source file:org.carbondata.processing.restructure.SchemaRestructurer.java
private static ByteBuffer getMemberByteBufferWithoutDefaultValue(String defaultValue) { int minValue = 1; int rowLength = 8; boolean enableEncoding = Boolean .valueOf(CarbonProperties.getInstance().getProperty(CarbonCommonConstants.ENABLE_BASE64_ENCODING, CarbonCommonConstants.ENABLE_BASE64_ENCODING_DEFAULT)); ByteBuffer buffer = null; byte[] data = null; if (enableEncoding) { try {/*ww w . j a v a 2s.com*/ data = Base64.encodeBase64(CarbonCommonConstants.MEMBER_DEFAULT_VAL.getBytes("UTF-8")); } catch (UnsupportedEncodingException e) { data = Base64.encodeBase64(CarbonCommonConstants.MEMBER_DEFAULT_VAL.getBytes()); } } else { try { data = CarbonCommonConstants.MEMBER_DEFAULT_VAL.getBytes("UTF-8"); } catch (UnsupportedEncodingException e) { data = CarbonCommonConstants.MEMBER_DEFAULT_VAL.getBytes(); } } rowLength += 4; rowLength += data.length; if (null == defaultValue) { buffer = ByteBuffer.allocate(rowLength); buffer.putInt(minValue); buffer.putInt(data.length); buffer.put(data); buffer.putInt(minValue); } else { byte[] data1 = null; if (enableEncoding) { try { data1 = Base64.encodeBase64(defaultValue.getBytes("UTF-8")); } catch (UnsupportedEncodingException e) { data1 = Base64.encodeBase64(defaultValue.getBytes()); } } else { try { data1 = defaultValue.getBytes("UTF-8"); } catch (UnsupportedEncodingException e) { data1 = defaultValue.getBytes(); } } rowLength += 4; rowLength += data1.length; buffer = ByteBuffer.allocate(rowLength); buffer.putInt(minValue); buffer.putInt(data.length); buffer.put(data); buffer.putInt(data1.length); buffer.put(data1); buffer.putInt(2); } buffer.flip(); return buffer; }
From source file:org.apache.bookkeeper.bookie.Bookie.java
/** * Fences a ledger. From this point on, clients will be unable to * write to this ledger. Only recoveryAddEntry will be * able to add entries to the ledger./*from w w w. j a v a 2s. c o m*/ * This method is idempotent. Once a ledger is fenced, it can * never be unfenced. Fencing a fenced ledger has no effect. */ public Future<Boolean> fenceLedger(long ledgerId, byte[] masterKey) throws IOException, BookieException { LedgerDescriptor handle = handles.getHandle(ledgerId, masterKey); boolean success; synchronized (handle) { success = handle.setFenced(); } if (success) { // fenced first time, we should add the key to journal ensure we can rebuild ByteBuffer bb = ByteBuffer.allocate(8 + 8); bb.putLong(ledgerId); bb.putLong(METAENTRY_ID_FENCE_KEY); bb.flip(); FutureWriteCallback fwc = new FutureWriteCallback(); LOG.debug("record fenced state for ledger {} in journal.", ledgerId); journal.logAddEntry(bb, fwc, null); return fwc.getResult(); } else { // already fenced return SUCCESS_FUTURE; } }
From source file:org.apache.bookkeeper.bookie.Bookie.java
/** * Retrieve the ledger descriptor for the ledger which entry should be added to. * The LedgerDescriptor returned from this method should be eventually freed with * #putHandle().//from w w w .java 2 s.c o m * * @throws BookieException if masterKey does not match the master key of the ledger */ private LedgerDescriptor getLedgerForEntry(ByteBuffer entry, byte[] masterKey) throws IOException, BookieException { long ledgerId = entry.getLong(); LedgerDescriptor l = handles.getHandle(ledgerId, masterKey); if (!masterKeyCache.containsKey(ledgerId)) { // new handle, we should add the key to journal ensure we can rebuild ByteBuffer bb = ByteBuffer.allocate(8 + 8 + 4 + masterKey.length); bb.putLong(ledgerId); bb.putLong(METAENTRY_ID_LEDGER_KEY); bb.putInt(masterKey.length); bb.put(masterKey); bb.flip(); if (null == masterKeyCache.putIfAbsent(ledgerId, masterKey)) { journal.logAddEntry(bb, new NopWriteCallback(), null); } } return l; }