Example usage for java.nio ByteBuffer flip

List of usage examples for java.nio ByteBuffer flip

Introduction

In this page you can find the example usage for java.nio ByteBuffer flip.

Prototype

public final Buffer flip() 

Source Link

Document

Flips this buffer.

Usage

From source file:com.glaf.core.util.FileUtils.java

public static byte[] getBytes(InputStream inputStream) {
    if (inputStream == null) {
        return null;
    }/* ww  w. ja v  a  2s  .c  o m*/
    ByteArrayOutputStream output = null;
    try {
        ByteBuffer buffer = ByteBuffer.allocate(8192);
        ReadableByteChannel readChannel = Channels.newChannel(inputStream);
        output = new ByteArrayOutputStream(32 * 1024);
        WritableByteChannel writeChannel = Channels.newChannel(output);
        while ((readChannel.read(buffer)) > 0 || buffer.position() != 0) {
            buffer.flip();
            writeChannel.write(buffer);
            buffer.compact();
        }
        return output.toByteArray();
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    } finally {
        if (output != null) {
            try {
                output.close();
                output = null;
            } catch (IOException ex) {
            }
        }
    }
}

From source file:com.ah.ui.actions.home.clientManagement.service.CertificateGenSV.java

public static void writeFile(String pathName, byte[] bytes) throws IOException {
    File fl = new File(pathName);
    if (existFile(pathName)) {
        fl.delete();/*  ww  w . j a  v  a  2  s. c  o  m*/
    }
    ByteBuffer bb = ByteBuffer.allocate(bytes.length);
    bb.put(bytes);
    bb.flip();
    FileChannel fileChannel = null;
    try {
        fileChannel = new FileOutputStream(pathName, false).getChannel();
        fileChannel.write(bb);
    } finally {
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (IOException ioe) {
                ioe.printStackTrace();
            }
        }
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock.java

/**
 * Verifies the block's checksum. This is an I/O intensive operation.
 * @return if the block was successfully checksummed.
 *///  w w  w.j a  v a 2s  .  co  m
private static void verifyChecksum(long length, FileInputStream metaIn, FileChannel blockChannel,
        String blockFileName) throws IOException, ChecksumException {
    // Verify the checksum from the block's meta file
    // Get the DataChecksum from the meta file header
    BlockMetadataHeader header = BlockMetadataHeader.readHeader(
            new DataInputStream(new BufferedInputStream(metaIn, BlockMetadataHeader.getHeaderSize())));
    FileChannel metaChannel = null;
    try {
        metaChannel = metaIn.getChannel();
        if (metaChannel == null) {
            throw new IOException("Block InputStream meta file has no FileChannel.");
        }
        DataChecksum checksum = header.getChecksum();
        final int bytesPerChecksum = checksum.getBytesPerChecksum();
        final int checksumSize = checksum.getChecksumSize();
        final int numChunks = (8 * 1024 * 1024) / bytesPerChecksum;
        ByteBuffer blockBuf = ByteBuffer.allocate(numChunks * bytesPerChecksum);
        ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks * checksumSize);
        // Verify the checksum
        int bytesVerified = 0;
        while (bytesVerified < length) {
            Preconditions.checkState(bytesVerified % bytesPerChecksum == 0,
                    "Unexpected partial chunk before EOF");
            assert bytesVerified % bytesPerChecksum == 0;
            int bytesRead = fillBuffer(blockChannel, blockBuf);
            if (bytesRead == -1) {
                throw new IOException("checksum verification failed: premature EOF");
            }
            blockBuf.flip();
            // Number of read chunks, including partial chunk at end
            int chunks = (bytesRead + bytesPerChecksum - 1) / bytesPerChecksum;
            checksumBuf.limit(chunks * checksumSize);
            fillBuffer(metaChannel, checksumBuf);
            checksumBuf.flip();
            checksum.verifyChunkedSums(blockBuf, checksumBuf, blockFileName, bytesVerified);
            // Success
            bytesVerified += bytesRead;
            blockBuf.clear();
            checksumBuf.clear();
        }
    } finally {
        IOUtils.closeQuietly(metaChannel);
    }
}

From source file:Main.java

/**
 * Decodes the specified URL as per RFC 3986, i.e. transforms
 * percent-encoded octets to characters by decoding with the UTF-8 character
 * set. This function is primarily intended for usage with
 * {@link URL} which unfortunately does not enforce proper URLs. As
 * such, this method will leniently accept invalid characters or malformed
 * percent-encoded octets and simply pass them literally through to the
 * result string. Except for rare edge cases, this will make unencoded URLs
 * pass through unaltered./*w  ww  . j a va2  s . com*/
 *
 * @param url The URL to decode, may be {@code null}.
 * @return The decoded URL or {@code null} if the input was
 * {@code null}.
 */
static String decodeUrl(String url) {
    String decoded = url;
    if (url != null && url.indexOf('%') >= 0) {
        int n = url.length();
        StringBuffer buffer = new StringBuffer();
        ByteBuffer bytes = ByteBuffer.allocate(n);
        for (int i = 0; i < n;) {
            if (url.charAt(i) == '%') {
                try {
                    do {
                        byte octet = (byte) Integer.parseInt(url.substring(i + 1, i + 3), 16);
                        bytes.put(octet);
                        i += 3;
                    } while (i < n && url.charAt(i) == '%');
                    continue;
                } catch (RuntimeException e) {
                    // malformed percent-encoded octet, fall through and
                    // append characters literally
                } finally {
                    if (bytes.position() > 0) {
                        bytes.flip();
                        buffer.append(UTF8.decode(bytes).toString());
                        bytes.clear();
                    }
                }
            }
            buffer.append(url.charAt(i++));
        }
        decoded = buffer.toString();
    }
    return decoded;
}

From source file:com.ettrema.zsync.Upload.java

/**
 * Returns the next String terminated by one of the specified delimiters or the end of the InputStream.<p/>
 * /*ww w.  jav  a2  s .c  om*/
 * This method simply reads from an InputStream one byte at a time, up to maxsearch bytes, until it reads a byte equal to one of the delimiters
 * or reaches the end of the stream. It uses the CHARSET encoding to translate the bytes read into a String, which it returns with delimiter excluded, 
 * or it throws a ParseException if maxSearch bytes are read without reaching a delimiter or the end of the stream.<p/>
 * 
 * A non-buffering method is used because a buffering reader would likely pull in part of the binary data
 * from the InputStream. An alternative is to use a BufferedReader with a given buffer size and use
 * mark and reset to get back binary data pulled into the buffer.
 * 
 * @param in The InputStream to read from
 * @param delimiters A list of byte values, each of which indicates the end of a token
 * @param maxsearch The maximum number of bytes to search for a delimiter
 * @return The String containing the CHARSET decoded String with delimiter excluded
 * @throws IOException
 * @throws ParseException If a delimiter byte is not found within maxsearch reads
 */
public static String readToken(InputStream in, byte[] delimiters, int maxsearch)
        throws ParseException, IOException {

    if (maxsearch <= 0) {
        throw new RuntimeException("readToken: Invalid maxsearch " + maxsearch);
    }

    ByteBuffer bytes = ByteBuffer.allocate(maxsearch);
    byte nextByte;

    try {

        read: while ((nextByte = (byte) in.read()) > -1) {

            for (byte delimiter : delimiters) {
                if (nextByte == delimiter) {
                    break read;
                }
            }
            bytes.put(nextByte);
        }

        bytes.flip();
        return Charset.forName(CHARSET).decode(bytes).toString();

    } catch (BufferOverflowException ex) {

        throw new ParseException("Could not find delimiter within " + maxsearch + " bytes.", 0);
    }
}

From source file:com.ah.ui.actions.home.clientManagement.service.CertificateGenSV.java

@SuppressWarnings("resource")
public static byte[] readFromFile(File file) throws IOException {
    FileChannel fileChannel = new FileInputStream(file).getChannel();
    ByteBuffer bb = ByteBuffer.allocate((int) fileChannel.size());
    fileChannel.read(bb);/*from w w  w.j a va 2  s. co  m*/
    fileChannel.close();
    bb.flip();
    byte[] bytes;

    if (bb.hasArray()) {
        bytes = bb.array();
    } else {
        bytes = new byte[bb.limit()];
        bb.get(bytes);
    }

    return bytes;
}

From source file:com.fujitsu.dc.test.jersey.bar.BarInstallTestUtils.java

/**
 * bar?.//  ww w .  j  ava  2s .  com
 * @param barFile bar?
 * @return ??bar
 */
public static byte[] readBarFile(File barFile) {
    InputStream is = ClassLoader.getSystemResourceAsStream(barFile.getPath());
    ByteBuffer buff = ByteBuffer.allocate(READ_BUFFER_SIZE);
    log.debug(String.valueOf(buff.capacity()));
    try {
        byte[] bbuf = new byte[SIZE_KB];
        int size;
        while ((size = is.read(bbuf)) != -1) {
            buff.put(bbuf, 0, size);
        }
    } catch (IOException e) {
        throw new RuntimeException("failed to load bar file:" + barFile.getPath(), e);
    } finally {
        try {
            is.close();
        } catch (IOException e) {
            throw new RuntimeException("failed to close bar file:" + barFile.getPath(), e);
        }
    }
    int size = buff.position();
    buff.flip();
    byte[] retv = new byte[size];
    buff.get(retv, 0, size);
    return retv;
}

From source file:org.apache.kylin.engine.mr.common.CubeStatsWriter.java

public static void writeCuboidStatistics(Configuration conf, Path outputPath, //
        Map<Long, HLLCounter> cuboidHLLMap, int samplingPercentage, int mapperNumber, double mapperOverlapRatio)
        throws IOException {
    Path seqFilePath = new Path(outputPath, BatchConstants.CFG_STATISTICS_CUBOID_ESTIMATION_FILENAME);

    List<Long> allCuboids = new ArrayList<Long>();
    allCuboids.addAll(cuboidHLLMap.keySet());
    Collections.sort(allCuboids);

    ByteBuffer valueBuf = ByteBuffer.allocate(BufferedMeasureCodec.DEFAULT_BUFFER_SIZE);
    SequenceFile.Writer writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(seqFilePath),
            SequenceFile.Writer.keyClass(LongWritable.class),
            SequenceFile.Writer.valueClass(BytesWritable.class));
    try {/*  w w w .  ja  v  a2 s  .  co  m*/
        // mapper overlap ratio at key -1
        writer.append(new LongWritable(-1), new BytesWritable(Bytes.toBytes(mapperOverlapRatio)));

        // mapper number at key -2
        writer.append(new LongWritable(-2), new BytesWritable(Bytes.toBytes(mapperNumber)));

        // sampling percentage at key 0
        writer.append(new LongWritable(0L), new BytesWritable(Bytes.toBytes(samplingPercentage)));

        for (long i : allCuboids) {
            valueBuf.clear();
            cuboidHLLMap.get(i).writeRegisters(valueBuf);
            valueBuf.flip();
            writer.append(new LongWritable(i), new BytesWritable(valueBuf.array(), valueBuf.limit()));
        }
    } finally {
        IOUtils.closeQuietly(writer);
    }
}

From source file:com.turn.ttorrent.common.TorrentCreator.java

/**
 * Return the concatenation of the SHA-1 hashes of a file's pieces.
 *
 * <p>/*w  ww.  ja v  a  2s.c o  m*/
 * Hashes the given file piece by piece using the default Torrent piece
 * length (see {@link #PIECE_LENGTH}) and returns the concatenation of
 * these hashes, as a string.
 * </p>
 *
 * <p>
 * This is used for creating Torrent meta-info structures from a file.
 * </p>
 *
 * @param file The file to hash.
 */
public /* for testing */ static byte[] hashFiles(Executor executor, List<File> files, long nbytes,
        int pieceLength) throws InterruptedException, IOException {
    int npieces = (int) Math.ceil((double) nbytes / pieceLength);
    byte[] out = new byte[Torrent.PIECE_HASH_SIZE * npieces];
    CountDownLatch latch = new CountDownLatch(npieces);

    ByteBuffer buffer = ByteBuffer.allocate(pieceLength);

    long start = System.nanoTime();
    int piece = 0;
    for (File file : files) {
        logger.info("Hashing data from {} ({} pieces)...",
                new Object[] { file.getName(), (int) Math.ceil((double) file.length() / pieceLength) });

        FileInputStream fis = FileUtils.openInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.flip();
                    executor.execute(new ChunkHasher(out, piece, latch, buffer));
                    buffer = ByteBuffer.allocate(pieceLength);
                    piece++;
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.flip();
        executor.execute(new ChunkHasher(out, piece, latch, buffer));
        piece++;
    }

    // Wait for hashing tasks to complete.
    latch.await();
    long elapsed = System.nanoTime() - start;

    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.",
            new Object[] { files.size(), nbytes, piece, npieces, String.format("%.1f", elapsed / 1e6) });

    return out;
}

From source file:it.unimi.di.big.mg4j.index.DiskBasedIndex.java

/** Commodity method for loading from a channel a big list of binary longs with specified endianness into a {@linkplain LongBigArrays long big array}.
 * //from  ww  w.  ja  va  2s. c om
 * @param channel the channel.
 * @param byteOrder the endianness of the longs.
 * @return a big list of longs containing the longs returned by <code>channel</code>.
 */
public static LongBigArrayBigList loadLongBigList(final ReadableByteChannel channel, final long length,
        final ByteOrder byteOrder) throws IOException {
    final ByteBuffer byteBuffer = ByteBuffer.allocateDirect(BUFFER_SIZE).order(byteOrder);

    LongBigArrayBigList list = new LongBigArrayBigList(length);

    while (channel.read(byteBuffer) > 0) {
        byteBuffer.flip();
        while (byteBuffer.hasRemaining())
            list.add(byteBuffer.getLong());
        byteBuffer.clear();
    }

    return list;
}