Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final Buffer limit(int newLimit) 

Source Link

Document

Sets the limit of this buffer.

Usage

From source file:com.emc.ecs.smart.SmartUploader.java

private String computeFileMD5() throws IOException {
    l4j.debug("Computing File MD5 with NIO");
    fileChannel.position(0);// w ww . jav a  2s  .c  om
    MessageDigest md5;
    try {
        md5 = MessageDigest.getInstance("MD5");
    } catch (NoSuchAlgorithmException e) {
        // Should never happen
        throw new RuntimeException("Could not load MD5", e);
    }

    long start = System.currentTimeMillis();
    // Force a 2MB buffer for better performance.
    ByteBuffer buf = ByteBuffer.allocateDirect(SMALL_SEGMENT);
    int c;
    long position = 0;
    buf.clear();
    while ((c = fileChannel.read(buf)) != -1) {
        buf.rewind();
        buf.limit(c);
        md5.update(buf);
        buf.clear();
        position += c;
        System.out.printf("\rLocal MD5 computation: %d / %d (%d %%)", position, fileSize,
                position * 100L / fileSize);
    }
    long duration = System.currentTimeMillis() - start;
    printRate(duration);

    return MD5Utils.toHexString(md5.digest());
}

From source file:org.apache.hadoop.hdfs.tools.offlineEditsViewer.TestOfflineEditsViewer.java

/**
 * Compare two files, ignore trailing zeros at the end, for edits log the
 * trailing zeros do not make any difference, throw exception is the files are
 * not same/*from w ww  . j a v  a2  s  . c o m*/
 *
 * @param filenameSmall first file to compare (doesn't have to be smaller)
 * @param filenameLarge second file to compare (doesn't have to be larger)
 */
private boolean filesEqualIgnoreTrailingZeros(String filenameSmall, String filenameLarge) throws IOException {

    ByteBuffer small = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameSmall));
    ByteBuffer large = ByteBuffer.wrap(DFSTestUtil.loadFile(filenameLarge));
    // OEV outputs with the latest layout version, so tweak the old file's
    // contents to have latest version so checkedin binary files don't
    // require frequent updates
    small.put(3, (byte) NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);

    // now correct if it's otherwise
    if (small.capacity() > large.capacity()) {
        ByteBuffer tmpByteBuffer = small;
        small = large;
        large = tmpByteBuffer;
        String tmpFilename = filenameSmall;
        filenameSmall = filenameLarge;
        filenameLarge = tmpFilename;
    }

    // compare from 0 to capacity of small
    // the rest of the large should be all zeros
    small.position(0);
    small.limit(small.capacity());
    large.position(0);
    large.limit(small.capacity());

    // compares position to limit
    if (!small.equals(large)) {
        return false;
    }

    // everything after limit should be 0xFF
    int i = large.limit();
    large.clear();
    for (; i < large.capacity(); i++) {
        if (large.get(i) != FSEditLogOpCodes.OP_INVALID.getOpCode()) {
            return false;
        }
    }

    return true;
}

From source file:com.linkedin.databus.core.DbusEventV1.java

@Override
public ByteBuffer value() {
    ByteBuffer value = _buf.asReadOnlyBuffer().order(_buf.order());
    value.position(_position + LongKeyOffset + keyLength());
    value = value.slice().order(_buf.order());
    int valueSize = valueLength();
    value.limit(valueSize);
    value.rewind();//  ww w  .  j  av  a2s  .  co  m
    return value;
}

From source file:com.linkedin.databus.core.DbusEventV2.java

public DbusEventInternalWritable convertToV1() throws KeyTypeNotImplementedException {
    DbusEventKey key;//  w  w  w .  j av  a 2  s . co  m
    DbusEventFactory eventV1Factory = new DbusEventV1Factory();

    // to create new event we need to get data from ByteBuffer of the current event
    ByteBuffer curValue = value();

    // create the key
    if (isKeyNumber()) {
        key = new DbusEventKey(key());
    } else if (isKeyString()) {
        key = new DbusEventKey(keyBytes());
    } else {
        String msg = "Conversion not supported for this key type:" + getKeyType(getKeyTypeAttribute());
        LOG.error(msg);
        throw new KeyTypeNotImplementedException(msg);
    }

    // validate schmeaId - for v1 it should be array of bytes with 0s
    byte[] schemaId = schemaId();
    if (schemaId == null) {
        schemaId = DbusEventInternalWritable.emptyMd5;
    }

    boolean autocommit = true; // will generate CRC for the event - should always be true
    DbusEventInfo dbusEventInfo = new DbusEventInfo(getOpcode(), sequence(), getPartitionId(), getPartitionId(),
            timestampInNanos(), (short) getSourceId(), schemaId, null, isTraceEnabled(), autocommit);
    if (curValue != null) {
        dbusEventInfo.setValueByteBuffer(curValue); // to make it more efficient we should copy directly from the buffer
    }

    // allocate the buffer
    int newEventSize = eventV1Factory.computeEventLength(key, dbusEventInfo);
    ByteBuffer serializationBuffer = ByteBuffer.allocate(newEventSize);
    serializationBuffer.order(_buf.order());
    int size = DbusEventV1.serializeEvent(key, serializationBuffer, dbusEventInfo);

    if (size != newEventSize)
        throw new DatabusRuntimeException("event size doesn't match after conversion from V2 to V1");
    serializationBuffer.limit(size); // set the limit to the end of the event
    // construct the event from the buffer at the position
    return new DbusEventV1(serializationBuffer, 0);
}

From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCacheOld.java

/**
 * Read external./*ww  w. j  av  a2 s.  c o m*/
 *
 * @param blockName the block name
 * @return the cacheable
 * @throws IOException Signals that an I/O exception has occurred.
 */
@SuppressWarnings("unused")
private Cacheable readExternal(String blockName) throws IOException {
    if (overflowExtEnabled == false)
        return null;
    // Check if we have  already this block in external storage cache
    try {
        StorageHandle handle = (StorageHandle) extStorageCache.get(blockName);
        if (handle == null)
            return null;
        ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer();

        buffer.clear();

        StorageHandle newHandle = storage.getData(handle, buffer);
        int size = buffer.getInt(0);
        if (size == 0)
            return null;
        boolean inMemory = buffer.get(4) == (byte) 1;
        buffer.position(5);
        buffer.limit(size + 4);
        if (deserializer.get() == null)
            return null;
        CacheableDeserializer<Cacheable> deserializer = this.deserializer.get();
        Cacheable obj = deserializer.deserialize(buffer);
        if (inMemory) {
            permGenCache.put(blockName, obj);
        } else {
            tenGenCache.put(blockName, obj);
        }

        if (newHandle.equals(handle) == false) {
            extStorageCache.put(blockName, newHandle);
        }

        return obj;

    } catch (NativeMemoryException e) {
        throw new IOException(e);
    }

}

From source file:com.turn.ttorrent.common.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght)
        throws InterruptedException, IOException, NoSuchAlgorithmException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / pieceLenght)) });

        length += file.length();//from w  ww.  j  a  v  a2  s  .c om

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:org.commoncrawl.hadoop.io.deprecated.ArcFileReader.java

/**
 * blocking call to retrieve next ArcFileItem contained within an ARC File
 * /*  w w  w .jav a  2  s  .c o m*/
 * @return Fully constructed ArcFileItem
 * @throws IOException
 */
public void getNextItem(ArcFileItem itemOut) throws IOException {

    // preserve incoming arc file name ...
    String arcFileName = itemOut.getArcFileName();
    // reset item
    itemOut.clear();
    // restore arc file name
    itemOut.setArcFileName(arcFileName);

    // read content
    _crc.reset();
    // and reset inflater
    resetInflater();

    // set the arc file stream positon up front
    itemOut.setArcFilePos(getARCFileStreamPos());

    ArcFileBuilder builder = new ArcFileBuilder(itemOut);

    // read header line buffer
    for (;;) {

        byte scanBuffer[] = new byte[_blockSize];
        ByteBuffer byteBuffer = ByteBuffer.wrap(scanBuffer);

        // read up to scan buffer size of data ...
        int readAmount = read(scanBuffer, 0, scanBuffer.length);

        if (readAmount != -1) {
            // update crc calculation
            _crc.update(scanBuffer, 0, readAmount);
            // and limit byte buffer ...
            byteBuffer.limit(readAmount);
            // and then input data input builder
            builder.inputData(byteBuffer);
        } else {
            // validate crc and header length ...
            readTrailer();

            builder.finish();

            // set the compressed content size ...
            itemOut.setArcFileSize(getARCFileStreamPos() - itemOut.getArcFilePos());

            return;
        }
    }
}

From source file:com.bittorrent.mpetazzoni.common.Torrent.java

private static String hashFiles(List<File> files) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(Torrent.PIECE_LENGTH);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / Torrent.PIECE_LENGTH)) });

        length += file.length();/*from ww w  . java 2 s.  c  o m*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / Torrent.PIECE_LENGTH));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:de.csdev.ebus.command.EBusCommandUtils.java

/**
 * @param commandMethod// www . ja  va 2s  . c o  m
 * @param values
 * @return
 * @throws EBusTypeException
 */
public static ByteBuffer composeMasterData(IEBusCommandMethod commandMethod, Map<String, Object> values)
        throws EBusTypeException {

    ByteBuffer buf = ByteBuffer.allocate(50);

    Map<Integer, IEBusComplexType<?>> complexTypes = new HashMap<Integer, IEBusComplexType<?>>();

    if (commandMethod.getMasterTypes() != null) {
        for (IEBusValue entry : commandMethod.getMasterTypes()) {

            IEBusType<?> type = entry.getType();
            byte[] b = null;

            // use the value from the values map if set
            if (values != null && values.containsKey(entry.getName())) {
                b = type.encode(values.get(entry.getName()));

            } else {
                if (type instanceof IEBusComplexType) {

                    // add the complex to the list for post processing
                    complexTypes.put(buf.position(), (IEBusComplexType<?>) type);

                    // add placeholder
                    b = new byte[entry.getType().getTypeLength()];

                } else {
                    b = type.encode(entry.getDefaultValue());

                }

            }

            if (b == null) {
                throw new RuntimeException("Encoded value is null! " + type.toString());
            }
            // buf.p
            buf.put(b);
            // len += type.getTypeLength();
        }
    }

    // replace the placeholders with the complex values
    if (!complexTypes.isEmpty()) {
        int orgPos = buf.position();
        for (Entry<Integer, IEBusComplexType<?>> entry : complexTypes.entrySet()) {
            // jump to position
            buf.position(entry.getKey());
            // put new value
            buf.put(entry.getValue().encodeComplex(buf));

        }
        buf.position(orgPos);

    }

    // reset pos to zero and set the new limit
    buf.limit(buf.position());
    buf.position(0);

    return buf;
}

From source file:ga.rugal.jpt.common.tracker.common.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        LOG.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(), threads,
                (int) (Math.ceil((double) file.length() / pieceLenght)) });

        length += file.length();//w  w w  . j  a  v  a2 s .c o m

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    LOG.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    LOG.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}