List of usage examples for java.nio CharBuffer clear
public final Buffer clear()
From source file:fi.johannes.kata.ocr.utils.files.CFileOperations.java
public static void createChunksByRowsChars(String inputFile, String outputFolder, int bufferSize, int rows) throws FileNotFoundException, IOException { File f = new File(inputFile); String filename = f.getName(); BufferedReader bw = new BufferedReader(new FileReader(f)); CharBuffer buffer = CharBuffer.allocate(bufferSize); int j = 0;//from ww w . ja v a2 s. co m for (int i = 0; i <= rows; i++) { String lineStr = bw.readLine(); if (lineStr != null) { char[] line = lineStr.toCharArray(); if (i == rows) { String outputfile = outputFolder + j + "-" + filename; writeChunk(buffer, outputfile); buffer.clear(); j++; i = 0; } buffer.put(line); buffer.put(System.getProperty("line.separator")); } else { break; } } }
From source file:ch.rasc.edsutil.optimizer.WebResourceProcessor.java
private static String inputStream2String(InputStream is, Charset cs) throws IOException { StringBuilder to = new StringBuilder(); try (Reader from = new InputStreamReader(is, cs.newDecoder())) { CharBuffer buf = CharBuffer.allocate(0x800); while (from.read(buf) != -1) { buf.flip();// w w w. j a v a 2 s . c o m to.append(buf); buf.clear(); } return to.toString(); } }
From source file:ChannelToWriter.java
/** * Read bytes from the specified channel, decode them using the specified * Charset, and write the resulting characters to the specified writer *///w ww . ja va 2s . co m public static void copy(ReadableByteChannel channel, Writer writer, Charset charset) throws IOException { // Get and configure the CharsetDecoder we'll use CharsetDecoder decoder = charset.newDecoder(); decoder.onMalformedInput(CodingErrorAction.IGNORE); decoder.onUnmappableCharacter(CodingErrorAction.IGNORE); // Get the buffers we'll use, and the backing array for the CharBuffer. ByteBuffer bytes = ByteBuffer.allocateDirect(2 * 1024); CharBuffer chars = CharBuffer.allocate(2 * 1024); char[] array = chars.array(); while (channel.read(bytes) != -1) { // Read from channel until EOF bytes.flip(); // Switch to drain mode for decoding // Decode the byte buffer into the char buffer. // Pass false to indicate that we're not done. decoder.decode(bytes, chars, false); // Put the char buffer into drain mode, and write its contents // to the Writer, reading them from the backing array. chars.flip(); writer.write(array, chars.position(), chars.remaining()); // Discard all bytes we decoded, and put the byte buffer back into // fill mode. Since all characters were output, clear that buffer. bytes.compact(); // Discard decoded bytes chars.clear(); // Clear the character buffer } // At this point there may still be some bytes in the buffer to decode // So put the buffer into drain mode call decode() a final time, and // finish with a flush(). bytes.flip(); decoder.decode(bytes, chars, true); // True means final call decoder.flush(chars); // Flush any buffered chars // Write these final chars (if any) to the writer. chars.flip(); writer.write(array, chars.position(), chars.remaining()); writer.flush(); }
From source file:de.undercouch.bson4jackson.io.StaticBuffers.java
/** * Creates or re-uses a {@link CharBuffer} that has a minimum size. Calling * this method multiple times with the same key will always return the * same buffer, as long as it has the minimum size and is marked to be * re-used. Buffers that are allowed to be re-used should be released using * {@link #releaseCharBuffer(Key, CharBuffer)}. * @param key the buffer's identifier/*from w ww .ja v a 2 s . c o m*/ * @param minSize the minimum size * @return the {@link CharBuffer} instance */ public CharBuffer charBuffer(Key key, int minSize) { minSize = Math.max(minSize, GLOBAL_MIN_SIZE); CharBuffer r = _charBuffers[key.ordinal()]; if (r == null || r.capacity() < minSize) { r = CharBuffer.allocate(minSize); } else { _charBuffers[key.ordinal()] = null; r.clear(); } return r; }
From source file:com.gamesalutes.utils.ByteUtils.java
/** * Extends the size of <code>buf</code> to at least meet <code>minCap</code>. * If <code>buf</code> is too small, then a new buffer is allocated and * any existing contents in <code>buf</code> will be transfered. The position * of the new buffer will be that of the old buffer if it was not <code>null</code>, and * the previous mark will be discarded if one was set. * /* ww w. j a v a 2s . c o m*/ * @param buf the input <code>ByteBuffer</code> * @param minCap the minimum capacity * @return a <code>CharBuffer</code> that can meet <code>minCap</code> */ public static CharBuffer growBuffer(CharBuffer buf, int minCap) { int myLimit = buf != null ? buf.limit() : 0; // limit can accomidate capacity requirements if (buf != null && myLimit >= minCap) return buf; int myCap = buf != null ? buf.capacity() : 0; // capacity can accomidate but limit is too small if (buf != null && myCap >= minCap) { buf.limit(myCap); return buf; } else //if(myCap < minCap) { CharBuffer newBuffer = null; if (myCap == 0) myCap = 1; while (myCap < minCap) myCap <<= 1; // if(buf != null && buf.isDirect()) // newBuffer = CharBuffer.allocateDirect(myCap); // else newBuffer = CharBuffer.allocate(myCap); // copy contents of original buffer if (buf != null) { int pos = buf.position(); buf.clear(); newBuffer.put(buf); newBuffer.position(pos); } return newBuffer; } }
From source file:com.google.flatbuffers.Table.java
/** * Create a Java `String` from UTF-8 data stored inside the FlatBuffer. * * This allocates a new string and converts to wide chars upon each access, * which is not very efficient. Instead, each FlatBuffer string also comes with an * accessor based on __vector_as_bytebuffer below, which is much more efficient, * assuming your Java program can handle UTF-8 data directly. * * @param offset An `int` index into the Table's ByteBuffer. * @return Returns a `String` from the data stored inside the FlatBuffer at `offset`. */// ww w . j av a 2s . c o m protected String __string(int offset) { CharsetDecoder decoder = UTF8_DECODER.get(); decoder.reset(); offset += bb.getInt(offset); ByteBuffer src = bb.duplicate().order(ByteOrder.LITTLE_ENDIAN); int length = src.getInt(offset); src.position(offset + SIZEOF_INT); src.limit(offset + SIZEOF_INT + length); int required = (int) ((float) length * decoder.maxCharsPerByte()); CharBuffer dst = CHAR_BUFFER.get(); if (dst == null || dst.capacity() < required) { dst = CharBuffer.allocate(required); CHAR_BUFFER.set(dst); } dst.clear(); try { CoderResult cr = decoder.decode(src, dst, true); if (!cr.isUnderflow()) { cr.throwException(); } } catch (CharacterCodingException x) { throw new Error(x); } return dst.flip().toString(); }
From source file:com.asakusafw.runtime.io.csv.CsvParser.java
private int getNextCharacter() throws IOException { CharBuffer buf = readerBuffer; if (buf.remaining() == 0) { buf.clear(); int read = reader.read(buf); buf.flip();// w w w . j a va 2 s . c o m assert read != 0; if (read < 0) { return EOF; } } return buf.get(); }
From source file:com.asakusafw.runtime.io.csv.CsvParser.java
private void emit(int c) throws IOException { assert c >= 0; CharBuffer buf = lineBuffer;//from ww w.j av a 2 s . co m if (buf.remaining() == 0) { if (buf.capacity() == BUFFER_LIMIT) { throw new IOException( MessageFormat.format("Line is too large (near {0}:{1}, size={2}, record-number={3})", path, currentPhysicalHeadLine, BUFFER_LIMIT, currentRecordNumber)); } CharBuffer newBuf = CharBuffer.allocate(Math.min(buf.capacity() * 2, BUFFER_LIMIT)); newBuf.clear(); buf.flip(); newBuf.put(buf); buf = newBuf; lineBuffer = newBuf; } buf.put((char) c); }
From source file:org.auraframework.util.text.Hash.java
/** * Consumes and closes a reader to generate its contents' hash. * * @param reader the reader for pulling content. Must be at the beginning of file. *//*from w w w .j av a2 s. co m*/ public void setHash(Reader reader) throws IOException, IllegalStateException { try { MessageDigest digest = MessageDigest.getInstance("MD5"); Charset utf8 = Charset.forName("UTF-8"); CharBuffer cbuffer = CharBuffer.allocate(2048); while (reader.read(cbuffer) >= 0) { cbuffer.flip(); ByteBuffer bytes = utf8.encode(cbuffer); digest.update(bytes); cbuffer.clear(); } setHash(digest.digest()); } catch (NoSuchAlgorithmException e) { throw new RuntimeException("MD5 is a required MessageDigest algorithm, but is not registered here."); } finally { reader.close(); } }
From source file:org.omnaest.utils.structure.container.ByteArrayContainer.java
/** * Copies the content from a {@link Readable} using the given encoding * /*from w w w .ja v a 2 s . c om*/ * @param readable * @param encoding * @return this */ public ByteArrayContainer copyFrom(Readable readable, String encoding) { // this.isContentInvalid = false; if (readable != null) { // encoding = StringUtils.defaultString(encoding, ENCODING_UTF8); // try { // final StringBuffer stringBuffer = new StringBuffer(); final CharBuffer charBuffer = CharBuffer.wrap(new char[1000]); for (int read = 0; read >= 0;) { // charBuffer.clear(); read = readable.read(charBuffer); charBuffer.flip(); if (read > 0) { stringBuffer.append(charBuffer, 0, read); } } this.copyFrom(stringBuffer, encoding); } catch (IOException e) { // this.isContentInvalid = true; // this.handleException(e); } } // return this; }