List of usage examples for java.nio ByteBuffer limit
public final int limit()
From source file:org.apache.hadoop.hive.ql.exec.persistence.AnalysisBuffer.java
public Row next() { if (currentseekrowid > lastrowid || currentseekrowid < firstrowid) return null; if (!fileused || currentseekrowid >= membuffer.firstrowid) { return membuffer.getByRowid(currentseekrowid++); } else {//from ww w. j a v a2 s . c o m currentseekrowid++; ByteBuffer buf = indexedFile.next(); byte[] bytes = Arrays.copyOf(buf.array(), buf.limit()); return deserialize(bytes, 0, bytes.length); } }
From source file:org.apache.arrow.vector.util.Text.java
/** * Set to contain the contents of a string. *///from w w w. j a v a 2s . c o m public void set(String string) { try { ByteBuffer bb = encode(string, true); bytes = bb.array(); length = bb.limit(); } catch (CharacterCodingException e) { throw new RuntimeException("Should not have happened ", e); } }
From source file:org.apache.hadoop.hive.ql.exec.persistence.AnalysisBuffer.java
public Row removeFirst(boolean res) { Row row = null;//from ww w . j ava 2s .co m if (fileused) { if (res) { ByteBuffer buf = indexedFile.get(firstrowid); row = deserialize(buf.array(), 0, buf.limit()); } firstrowid++; if (firstrowid == membuffer.firstrowid) { fileused = false; } } else { row = membuffer.removeFirst(); firstrowid++; } return row; }
From source file:eu.stratosphere.runtime.io.network.netty.InboundEnvelopeDecoder.java
/** * Copies min(from.readableBytes(), to.remaining() bytes from Nettys ByteBuf to the Java NIO ByteBuffer. *///from w w w. j a v a 2s .co m private void copy(ByteBuf src, ByteBuffer dst) { // This branch is necessary, because an Exception is thrown if the // destination buffer has more remaining (writable) bytes than // currently readable from the Netty ByteBuf source. if (src.isReadable()) { if (src.readableBytes() < dst.remaining()) { int oldLimit = dst.limit(); dst.limit(dst.position() + src.readableBytes()); src.readBytes(dst); dst.limit(oldLimit); } else { src.readBytes(dst); } } }
From source file:org.commoncrawl.util.S3InputStream.java
@Override public boolean contentAvailable(NIOHttpConnection theConnection, int itemId, String itemKey, NIOBufferList contentBuffer) {/*from w ww.java 2 s .c o m*/ ByteBuffer buffer = null; IOException exception = null; //int receivedBytes = 0; try { while ((buffer = contentBuffer.read()) != null) { if (buffer.position() != 0) { buffer = buffer.slice(); } //receivedBytes += buffer.remaining(); buffer.position(buffer.limit()); _bufferQueue.write(buffer); } _bufferQueue.flush(); } catch (IOException e) { LOG.error(CCStringUtils.stringifyException(e)); exception = e; } if (_bufferQueue.available() >= MAX_BUFFER_SIZE) { LOG.info("*** PAUSING DOWNLOADS FOR:" + theConnection.getURL()); theConnection.disableReads(); pausedConnection.set(theConnection); } //long nanoTimeStart = System.nanoTime(); _writeLock.lock(); //long nanoTimeEnd = System.nanoTime(); //System.out.println("Received: " + receivedBytes + "for URI:" + uri + " Lock took:" + (nanoTimeEnd-nanoTimeStart)); try { Condition writeCondition = _writeEvent.getAndSet(null); if (exception != null) { _eofCondition.set(true); _exception.set(exception); } if (writeCondition != null) { writeCondition.signal(); } } finally { _writeLock.unlock(); } return true; }
From source file:cn.iie.haiep.hbase.value.Bytes.java
/** * Converts the given byte buffer, from its array offset to its limit, to * a string. The position and the mark are ignored. * * @param buf a byte buffer//w w w .j a v a 2 s. c o m * @return a string representation of the buffer's binary contents */ public static String toStringBinary(ByteBuffer buf) { if (buf == null) return "null"; return toStringBinary(buf.array(), buf.arrayOffset(), buf.limit()); }
From source file:net.sf.jinsim.response.ResponseFactory.java
public InSimResponse getPacketData(ByteBuffer buffer) throws UnhandledPacketTypeException, BufferUnderflowException, InstantiationException, IllegalAccessException { InSimResponse insimResponse = null;/*from w w w . j a v a 2 s . c o m*/ /* if (buffer.limit() == 91) { // is OutGauge message insimResponse = new OutGaugeResponse(); } else if (buffer.limit() == 63) { // is OutSim message log.debug("OutSim is not processed"); } else */ if (buffer.limit() >= 3) { int packetId = buffer.get() & 0xFF; PacketType packetType = PacketType.getPacket(packetId); /* if (log.isDebugEnabled()) { log.debug("Packet is of type " + packetType); } */ Class<? extends InSimResponse> insimResponseClass = (Class<? extends InSimResponse>) registeredTypes .get(packetType); if (insimResponseClass == null) { buffer.position(buffer.limit()); throw new UnhandledPacketTypeException(packetId + ": is unkown"); } insimResponse = (InSimResponse) insimResponseClass.newInstance(); } else { if (log.isDebugEnabled()) { String bufferBytes = ""; for (int i = 0; i < buffer.limit(); i++) { bufferBytes += buffer.get() + ", "; } log.debug("unknown packet: " + bufferBytes); } else { buffer.position(buffer.limit()); } } if (insimResponse == null) { throw new UnhandledPacketTypeException("Can not identify response packet"); } try { insimResponse.construct(buffer); } catch (BufferUnderflowException ex) { log.error(ex); } /* if (log.isDebugEnabled()) { log.debug("InSimResponse {packet size= " + (buffer.limit()+1) + "}: " + insimResponse); } */ return insimResponse; }
From source file:bamboo.openhash.fileshare.FileShare.java
public void write() { logger.debug("write"); if (is != null) { while (ready.size() < MAX_BUFFER) { ByteBuffer bb = ByteBuffer.wrap(new byte[1024]); bb.putInt(0);/*from w ww . ja v a2 s . co m*/ int len = 0; try { len = is.read(bb.array(), 4, bb.limit() - 4); } catch (IOException e) { is = null; break; } if (len == -1) { is = null; break; } logger.debug("position=" + bb.position() + " read " + len + " bytes"); // We're going to flip this later, so set the position // where we want the limit to end up. bb.position(len + 4); wblocks.elementAt(0).addLast(bb); logger.debug("read a block"); if (wblocks.elementAt(0).size() == BRANCHING) make_parents(false); } if (is == null) { make_parents(true); // There should now be only one non-empty level, at it // should have exactly one block in it. for (int l = 0; l < wblocks.size(); ++l) { if (!wblocks.elementAt(l).isEmpty()) { ByteBuffer bb = wblocks.elementAt(l).removeFirst(); bb.flip(); md.update(secret); md.update(bb.array(), 0, bb.limit()); byte[] dig = md.digest(); StringBuffer sb = new StringBuffer(100); bytes_to_sbuf(dig, 0, dig.length, false, sb); logger.info("root digest is 0x" + sb.toString()); ready.addLast(new Pair<byte[], ByteBuffer>(dig, bb)); break; } } } } // Do put. if (ready.isEmpty()) { if (outstanding == 0) { logger.info("all puts finished successfully"); System.exit(0); } } else { Pair<byte[], ByteBuffer> head = ready.removeFirst(); outstanding++; bamboo_put_args put = new bamboo_put_args(); put.application = APPLICATION; // GatewayClient will fill in put.client_library put.value = new bamboo_value(); if (head.second.limit() == head.second.array().length) put.value.value = head.second.array(); else { put.value.value = new byte[head.second.limit()]; head.second.get(put.value.value); } put.key = new bamboo_key(); put.key.value = head.first; put.ttl_sec = 3600; // TODO StringBuffer sb = new StringBuffer(100); bytes_to_sbuf(head.first, 0, head.first.length, false, sb); logger.debug("putting block size=" + put.value.value.length + " key=0x" + sb.toString()); client.put(put, curry(put_done_cb, put)); } }
From source file:org.inquidia.kettle.plugins.tokenreplacement.TokenReplacement.java
public synchronized boolean processRow(StepMetaInterface smi, StepDataInterface sdi) throws KettleException { meta = (TokenReplacementMeta) smi;/*w ww. jav a 2 s . c om*/ data = (TokenReplacementData) sdi; boolean result = true; Object[] r = getRow(); // This also waits for a row to be finished. if (first && r != null) { first = false; data.inputRowMeta = getInputRowMeta(); data.outputRowMeta = getInputRowMeta().clone(); if (meta.getOutputType().equalsIgnoreCase("field")) { meta.getFields(data.outputRowMeta, getStepname(), null, null, this, repository, metaStore); } if (meta.getOutputType().equalsIgnoreCase("file") && !meta.isOutputFileNameInField()) { if (meta.getOutputFileName() != null) { String filename = meta.buildFilename(meta.getOutputFileName(), getTransMeta(), getCopy(), getPartitionID(), data.splitnr); openNewOutputFile(filename); } else { throw new KettleException("Output file name cannot be null."); } } } if (r == null) { // no more input to be expected... closeAllOutputFiles(); setOutputDone(); return false; } if (meta.getOutputType().equalsIgnoreCase("file") && !meta.isOutputFileNameInField() && meta.getSplitEvery() > 0 && data.rowNumber % meta.getSplitEvery() == 0) { if (data.rowNumber > 0) { closeAllOutputFiles(); data.splitnr++; String filename = meta.buildFilename(meta.getOutputFileName(), getTransMeta(), getCopy(), getPartitionID(), data.splitnr); openNewOutputFile(filename); } } String outputFilename = ""; if (meta.getOutputType().equalsIgnoreCase("file") && !meta.isOutputFileNameInField()) { outputFilename = meta.buildFilename(meta.getOutputFileName(), getTransMeta(), getCopy(), getPartitionID(), data.splitnr); } else if (meta.getOutputType().equalsIgnoreCase("file") && meta.isOutputFileNameInField()) { String filenameValue = data.inputRowMeta.getString(r, environmentSubstitute(meta.getOutputFileNameField()), ""); if (!Const.isEmpty(filenameValue)) { outputFilename = filenameValue; } else { throw new KettleException("Filename cannot be empty."); } } //Create token resolver TokenResolver resolver = new TokenResolver(); for (TokenReplacementField field : meta.getTokenReplacementFields()) { if (data.inputRowMeta.indexOfValue(field.getName()) >= 0) { String fieldValue = environmentSubstitute(data.inputRowMeta.getString(r, field.getName(), null)); if (fieldValue == null && !BooleanUtils .toBoolean(Const.getEnvironmentVariable("KETTLE_EMPTY_STRING_DIFFERS_FROM_NULL", "N"))) { fieldValue = Const.nullToEmpty(fieldValue); } resolver.addToken(field.getTokenName(), fieldValue); } else { throw new KettleValueException("Field " + field.getName() + " not found on input stream."); } } Reader reader; String inputFilename = ""; if (meta.getInputType().equalsIgnoreCase("text")) { reader = new TokenReplacingReader(resolver, new StringReader(meta.getInputText()), environmentSubstitute(meta.getTokenStartString()), environmentSubstitute(meta.getTokenEndString())); } else if (meta.getInputType().equalsIgnoreCase("field")) { if (data.inputRowMeta.indexOfValue(meta.getInputFieldName()) >= 0) { String inputString = data.inputRowMeta.getString(r, meta.getInputFieldName(), ""); reader = new TokenReplacingReader(resolver, new StringReader(inputString), environmentSubstitute(meta.getTokenStartString()), environmentSubstitute(meta.getTokenEndString())); } else { throw new KettleValueException( "Input field " + meta.getInputFieldName() + " not found on input stream."); } } else if (meta.getInputType().equalsIgnoreCase("file")) { if (meta.isInputFileNameInField()) { if (data.inputRowMeta.indexOfValue(environmentSubstitute(meta.getInputFileNameField())) >= 0) { inputFilename = data.inputRowMeta.getString(r, environmentSubstitute(meta.getInputFileNameField()), ""); } else { throw new KettleValueException("Input filename field " + environmentSubstitute(meta.getInputFileNameField()) + " not found on input stream."); } } else { inputFilename = environmentSubstitute(meta.getInputFileName()); } if (Const.isEmpty(inputFilename)) { throw new KettleValueException("Input filename cannot be empty"); } FileObject file = KettleVFS.getFileObject(inputFilename, getTransMeta()); reader = new TokenReplacingReader(resolver, new InputStreamReader(KettleVFS.getInputStream(inputFilename, getTransMeta())), environmentSubstitute(meta.getTokenStartString()), environmentSubstitute(meta.getTokenEndString())); if (meta.isAddInputFileNameToResult()) { ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject(inputFilename, getTransMeta()), getTransMeta().getName(), getStepname()); resultFile.setComment(BaseMessages.getString(PKG, "TokenReplacement.AddInputResultFile")); addResultFile(resultFile); } } else { throw new KettleException("Unsupported input type " + meta.getInputType()); } Writer stringWriter = null; OutputStream bufferedWriter = null; if (meta.getOutputType().equalsIgnoreCase("field")) { stringWriter = new StringBufferWriter(new StringBuffer(5000)); } else if (meta.getOutputType().equalsIgnoreCase("file")) { if (inputFilename.equals(outputFilename)) { throw new KettleException("Input and output filenames must not be the same " + inputFilename); } int fileIndex = data.openFiles.indexOf(outputFilename); if (fileIndex < 0) { openNewOutputFile(outputFilename); fileIndex = data.openFiles.indexOf(outputFilename); } bufferedWriter = data.openBufferedWriters.get(fileIndex); } else { throw new KettleException("Unsupported output type " + meta.getOutputType()); } String output = ""; try { char[] cbuf = new char[5000]; StringBuffer sb = new StringBuffer(); int length = 0; while ((length = reader.read(cbuf)) > 0) { if (meta.getOutputType().equalsIgnoreCase("field")) { stringWriter.write(cbuf, 0, length); } else if (meta.getOutputType().equalsIgnoreCase("file")) { CharBuffer cBuffer = CharBuffer.wrap(cbuf, 0, length); ByteBuffer bBuffer = Charset.forName(meta.getOutputFileEncoding()).encode(cBuffer); byte[] bytes = new byte[bBuffer.limit()]; bBuffer.get(bytes); bufferedWriter.write(bytes); } //No else. Anything else will be thrown to a Kettle exception prior to getting here. cbuf = new char[5000]; } if (meta.getOutputType().equalsIgnoreCase("field")) { output += stringWriter.toString(); } else if (meta.getOutputType().equalsIgnoreCase("file")) { bufferedWriter.write(meta.getOutputFileFormatString().getBytes()); } } catch (IOException ex) { throw new KettleException(ex.getMessage(), ex); } finally { try { reader.close(); if (stringWriter != null) { stringWriter.close(); } reader = null; stringWriter = null; } catch (IOException ex) { throw new KettleException(ex.getMessage(), ex); } } if (meta.getOutputType().equalsIgnoreCase("field")) { r = RowDataUtil.addValueData(r, data.outputRowMeta.size() - 1, output); } else if (meta.getOutputType().equalsIgnoreCase("file")) { incrementLinesWritten(); } putRow(data.outputRowMeta, r); // in case we want it to go further... data.rowNumber++; if (checkFeedback(getLinesOutput())) { logBasic("linenr " + getLinesOutput()); } return result; }
From source file:org.apache.hadoop.hdfs.BlockReaderLocalLegacy.java
/** * Utility method used by read(ByteBuffer) to partially copy a ByteBuffer into * another./* w ww . j a v a 2s .c o m*/ */ private void writeSlice(ByteBuffer from, ByteBuffer to, int length) { int oldLimit = from.limit(); from.limit(from.position() + length); try { to.put(from); } finally { from.limit(oldLimit); } }