Example usage for java.nio ByteBuffer rewind

List of usage examples for java.nio ByteBuffer rewind

Introduction

In this page you can find the example usage for java.nio ByteBuffer rewind.

Prototype

public final Buffer rewind() 

Source Link

Document

Rewinds this buffer.

Usage

From source file:org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient.java

/**
 * It computes average while fetching sum and row count from all the
 * corresponding regions. Approach is to compute a global sum of region level
 * sum and rowcount and then compute the average.
 * @param table//from ww w.j av  a2s.  c  o m
 * @param scan
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs(
        final long transactionId, final TransactionalTable table, final ColumnInterpreter<R, S, P, Q, T> ci,
        final Scan scan) throws Throwable {
    byte[] currentBeginKey = scan.getStartRow();
    HRegionInfo currentRegion = table.getRegionLocation(currentBeginKey).getRegionInfo();
    com.google.protobuf.ByteString regionName = ByteString.copyFromUtf8(currentRegion.getRegionNameAsString());
    final TransactionalAggregateRequest requestArg = validateArgAndGetPB(regionName, transactionId, scan, ci,
            false);
    class AvgCallBack implements Batch.Callback<Pair<S, Long>> {
        S sum = null;
        Long rowCount = 0l;

        public Pair<S, Long> getAvgArgs() {
            return new Pair<S, Long>(sum, rowCount);
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, Pair<S, Long> result) {
            sum = ci.add(sum, result.getFirst());
            rowCount += result.getSecond();
        }
    }
    AvgCallBack avgCallBack = new AvgCallBack();
    table.coprocessorService(TrxRegionService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TrxRegionService, Pair<S, Long>>() {
                @Override
                public Pair<S, Long> call(TrxRegionService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TransactionalAggregateResponse> rpcCallback = new BlockingRpcCallback<TransactionalAggregateResponse>();
                    instance.getAvg(controller, requestArg, rpcCallback);
                    TransactionalAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    Pair<S, Long> pair = new Pair<S, Long>(null, 0L);
                    if (response.getFirstPartCount() == 0) {
                        return pair;
                    }
                    ByteString b = response.getFirstPart(0);
                    T t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                    S s = ci.getPromotedValueFromProto(t);
                    pair.setFirst(s);
                    ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart()));
                    bb.rewind();
                    pair.setSecond(bb.getLong());
                    return pair;
                }
            }, avgCallBack);
    return avgCallBack.getAvgArgs();
}

From source file:x10.x10rt.yarn.ApplicationMaster.java

private void processMessage(ByteBuffer msg, SocketChannel sc) {
    assert (msg.capacity() >= headerLength);

    msg.rewind(); // reset the buffer for reading from the beginning
    CTRL_MSG_TYPE type = CTRL_MSG_TYPE.values()[msg.getInt()];
    int destination = msg.getInt();
    final int source = msg.getInt();
    int datalen = msg.getInt();
    assert (datalen == msg.remaining());

    LOG.info("Processing message of type " + type + ", size " + (headerLength + datalen) + " from place "
            + source);//  w w  w  .j av a 2  s  .c  o m
    /*      System.err.print("Message contents:");
          for (int i=0; i<msg.capacity(); i++)
             System.err.print(" "+Integer.toHexString(msg.get(i)).toUpperCase());
          System.err.println();
    */
    switch (type) {
    case HELLO: {
        // read the port information, and update the record for this place
        assert (datalen == 4 && source < numRequestedContainers.get() && source >= 0);
        final CommunicationLink linkInfo;
        LOG.info("Getting link for place " + source);
        synchronized (links) {
            linkInfo = links.get(source);
        }
        linkInfo.port = ((msg.get() & 0xFF) << 8) | (msg.get() & 0xFF); // unsigned short in network order
        linkInfo.sc = sc;

        // check if there are pending port requests for this place
        if (linkInfo.pendingPortRequests != null) {
            // prepare response message
            String linkString = linkInfo.node.getHost() + ":" + linkInfo.port;
            byte[] linkBytes = linkString.getBytes(); // matches existing code.  TODO: switch to UTF8
            ByteBuffer response = ByteBuffer.allocateDirect(headerLength + linkBytes.length)
                    .order(ByteOrder.nativeOrder());
            response.putInt(CTRL_MSG_TYPE.PORT_RESPONSE.ordinal());
            response.putInt(-1);
            response.putInt(source);
            response.putInt(linkBytes.length);
            response.put(linkBytes);
            // send response to each waiting place
            for (int place : linkInfo.pendingPortRequests) {
                response.putInt(4, place); // set the destination to the requesting place
                response.rewind();
                // TODO: this code may get stuck here if the reciever isn't reading properly
                try {
                    while (response.hasRemaining())
                        links.get(place).sc.write(response);
                } catch (IOException e) {
                    LOG.warn("Unable to send out port response for place " + place + " to place " + source, e);
                }
            }
            linkInfo.pendingPortRequests = null;
        }
        LOG.info("HELLO from place " + source + " at port " + linkInfo.port);

        if (pendingKills != null && pendingKills.containsKey(source)) {
            int delay = pendingKills.remove(source);
            LOG.info("Scheduling a takedown of place " + source + " in " + delay + " seconds");
            placeKiller.schedule(new Runnable() {
                @Override
                public void run() {
                    LOG.info("KILLING CONTAINER FOR PLACE " + source);
                    nodeManager.stopContainerAsync(linkInfo.container, linkInfo.node);
                }
            }, delay, TimeUnit.SECONDS);
        }
    }
        break;
    case GOODBYE: {
        try {
            CommunicationLink link = links.get(source);
            assert (link.pendingPortRequests == null);
            sc.close();
            link.port = PORT_DEAD;
        } catch (IOException e) {
            LOG.warn("Error closing socket channel", e);
        }
        LOG.info("GOODBYE to place " + source);
    }
        break;
    case PORT_REQUEST: {
        LOG.info("Got PORT_REQUEST from place " + source + " for place " + destination);
        // check to see if we know the requested information
        CommunicationLink linkInfo = links.get(destination);
        if (linkInfo.port != PORT_UNKNOWN) {
            String linkString;
            if (linkInfo.port == PORT_DEAD)
                linkString = DEAD;
            else
                linkString = linkInfo.node.getHost() + ":" + linkInfo.port;
            LOG.info("Telling place " + source + " that place " + destination + " is at " + linkString);
            byte[] linkBytes = linkString.getBytes(); // matches existing code.  TODO: switch to UTF8
            ByteBuffer response = ByteBuffer.allocateDirect(headerLength + linkBytes.length)
                    .order(ByteOrder.nativeOrder());
            response.putInt(CTRL_MSG_TYPE.PORT_RESPONSE.ordinal());
            response.putInt(source);
            response.putInt(destination);
            response.putInt(linkBytes.length);
            response.put(linkBytes);
            response.rewind();
            // TODO: this code may get stuck here if the reciever isn't reading properly
            try {
                while (response.hasRemaining())
                    sc.write(response);
            } catch (IOException e) {
                LOG.warn("Unable to send out port response for place " + destination + " to place " + source,
                        e);
            }
        } else { // port is not known.  remember we have a place asking for it when it becomes available
            if (linkInfo.pendingPortRequests == null)
                linkInfo.pendingPortRequests = new ArrayList<Integer>(2);
            linkInfo.pendingPortRequests.add(source);
            LOG.info("Stashing PORT_REQUEST from place " + source + " for place " + destination
                    + " until the answer is known");
        }
    }
        break;
    case LAUNCH_REQUEST: {
        assert (datalen == 8);
        int numPlacesRequested = (int) msg.getLong();

        int oldvalue = numRequestedContainers.getAndAdd((int) numPlacesRequested);

        // Send request for containers to RM
        for (int i = 0; i < numPlacesRequested; i++) {
            Resource capability = Resource.newInstance(memoryPerPlaceInMb, coresPerPlace);
            ContainerRequest request = new ContainerRequest(capability, null, null, Priority.newInstance(0));
            LOG.info("Adding a new container request " + request.toString());
            resourceManager.addContainerRequest(request);
            pendingRequests.add(request);
        }

        LOG.info("Requested an increase of " + numPlacesRequested + " places on top of the previous " + oldvalue
                + " places");
        msg.rewind();
        msg.putInt(CTRL_MSG_TYPE.LAUNCH_RESPONSE.ordinal());
        msg.rewind();
        try {
            while (msg.hasRemaining())
                sc.write(msg);
        } catch (IOException e) {
            LOG.warn("Unable to send out launch response to place " + source, e);
        }
    }
        break;
    default:
        LOG.warn("unknown message type " + type);
    }
    LOG.info("Finished processing message of size " + (headerLength + datalen) + " from place " + source);
}

From source file:jext2.DataBlockAccess.java

/**
 * Splice the allocated branch onto inode
 * @throws IOException/*w w  w .j a v a 2s  . c  o m*/
 */
@NotThreadSafe(useLock = true)
private void spliceBranch(long logicalBlock, int[] offsets, long[] blockNrs, LinkedList<Long> newBlockNrs)
        throws IoError {

    int existDepth = blockNrs.length;

    if (existDepth == 0) { /* add direct block */
        long[] directBlocks = inode.getBlock();
        directBlocks[offsets[0]] = newBlockNrs.getFirst().longValue();
    } else {
        ByteBuffer buf = blocks.read(blockNrs[existDepth - 1]);
        Ext2fsDataTypes.putLE32U(buf, newBlockNrs.getFirst().longValue(), offsets[existDepth] * 4);
        buf.rewind();
        blocks.write(blockNrs[existDepth - 1], buf);
    }

    lastAllocLogicalBlock = logicalBlock;
    lastAllocPhysicalBlock = newBlockNrs.getLast().intValue();

    inode.setBlocks(inode.getBlocks() + newBlockNrs.size() * (superblock.getBlocksize() / 512));
    inode.setModificationTime(new Date());
}

From source file:org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient.java

/**
 * It computes a global standard deviation for a given column and its value.
 * Standard deviation is square root of (average of squares -
 * average*average). From individual regions, it obtains sum, square sum and
 * number of rows. With these, the above values are computed to get the global
 * std./*from   w  ww. j  av  a2 s.  c  o  m*/
 * @param table
 * @param scan
 * @return standard deviations
 * @throws Throwable
 */
private <R, S, P extends Message, Q extends Message, T extends Message> Pair<List<S>, Long> getStdArgs(
        final long transactionId, final TransactionalTable table, final ColumnInterpreter<R, S, P, Q, T> ci,
        final Scan scan) throws Throwable {
    byte[] currentBeginKey = scan.getStartRow();
    HRegionInfo currentRegion = table.getRegionLocation(currentBeginKey).getRegionInfo();
    com.google.protobuf.ByteString regionName = ByteString.copyFromUtf8(currentRegion.getRegionNameAsString());
    final TransactionalAggregateRequest requestArg = validateArgAndGetPB(regionName, transactionId, scan, ci,
            false);
    class StdCallback implements Batch.Callback<Pair<List<S>, Long>> {
        long rowCountVal = 0l;
        S sumVal = null, sumSqVal = null;

        public Pair<List<S>, Long> getStdParams() {
            List<S> l = new ArrayList<S>();
            l.add(sumVal);
            l.add(sumSqVal);
            Pair<List<S>, Long> p = new Pair<List<S>, Long>(l, rowCountVal);
            return p;
        }

        @Override
        public synchronized void update(byte[] region, byte[] row, Pair<List<S>, Long> result) {
            if (result.getFirst().size() > 0) {
                sumVal = ci.add(sumVal, result.getFirst().get(0));
                sumSqVal = ci.add(sumSqVal, result.getFirst().get(1));
                rowCountVal += result.getSecond();
            }
        }
    }
    StdCallback stdCallback = new StdCallback();
    table.coprocessorService(TrxRegionService.class, scan.getStartRow(), scan.getStopRow(),
            new Batch.Call<TrxRegionService, Pair<List<S>, Long>>() {
                @Override
                public Pair<List<S>, Long> call(TrxRegionService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<TransactionalAggregateResponse> rpcCallback = new BlockingRpcCallback<TransactionalAggregateResponse>();
                    instance.getStd(controller, requestArg, rpcCallback);
                    TransactionalAggregateResponse response = rpcCallback.get();
                    if (controller.failedOnException()) {
                        throw controller.getFailedOn();
                    }
                    Pair<List<S>, Long> pair = new Pair<List<S>, Long>(new ArrayList<S>(), 0L);
                    if (response.getFirstPartCount() == 0) {
                        return pair;
                    }
                    List<S> list = new ArrayList<S>();
                    for (int i = 0; i < response.getFirstPartCount(); i++) {
                        ByteString b = response.getFirstPart(i);
                        T t = ProtobufUtil.getParsedGenericInstance(ci.getClass(), 4, b);
                        S s = ci.getPromotedValueFromProto(t);
                        list.add(s);
                    }
                    pair.setFirst(list);
                    ByteBuffer bb = ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart()));
                    bb.rewind();
                    pair.setSecond(bb.getLong());
                    return pair;
                }
            }, stdCallback);
    return stdCallback.getStdParams();
}

From source file:jext2.DataBlockAccess.java

/** Allocate and set up a chain of blocks.
 * @param  inode   owner/* w  ww .  j  av a2  s  .c o m*/
 * @param  num     depth of the chain (number of blocks to allocate)
 * @param  goal    gloal block
 * @param  offsets offsets in the indirection chain
 * @param  blockNrs    chain of allready allocated blocks
 * @throws NoSpaceLeftOnDevice
 *
 * This function allocates num blocks, zeros out all but the last one, links
 * them into a chain and writes them to disk.
 */
@NotThreadSafe(useLock = true)
private LinkedList<Long> allocBranch(int num, long goal, int[] offsets, long[] blockNrs)
        throws JExt2Exception, NoSpaceLeftOnDevice {

    int n = 0;
    LinkedList<Long> result = new LinkedList<Long>();

    try {
        long parent = allocateBlock(goal);
        result.addLast(parent);

        if (parent > 0) {
            for (n = 1; n < num; n++) {
                /* allocate the next block */
                long nr = allocateBlock(parent);
                if (nr > 0) {
                    result.addLast(nr);

                    ByteBuffer buf = ByteBuffer.allocate(superblock.getBlocksize());
                    Ext2fsDataTypes.putLE32U(buf, nr, offsets[n] * 4);
                    buf.rewind();
                    blocks.write(parent, buf);
                } else {
                    break;
                }

                parent = nr;
            }
        }
    } catch (NoSpaceLeftOnDevice e) {
        for (long nr : result) {
            freeBlocks(new long[] { nr });
        }
    }

    if (num == n) /* Allocation successful */
        return result;
    else /* Allocation failed */
        throw new NoSpaceLeftOnDevice();
}

From source file:org.apache.usergrid.persistence.Schema.java

public static ByteBuffer serializeEntityProperty(String entityType, String propertyName, Object propertyValue) {
    ByteBuffer bytes = null;
    if (PROPERTY_UUID.equals(propertyName)) {
        bytes = bytebuffer(uuid(propertyValue));
    } else if (PROPERTY_TYPE.equals(propertyName)) {
        bytes = bytebuffer(string(propertyValue));
    } else {//from w  w  w  . j a  v  a2  s .  com
        bytes = Schema.serializePropertyValueToJsonBinary(toJsonNode(propertyValue));
        if (Schema.getDefaultSchema().isPropertyEncrypted(entityType, propertyName)) {
            bytes.rewind();
            bytes = encrypt(bytes);
        }
    }
    return bytes;
}

From source file:org.apache.hadoop.raid.IADecoder.java

void writeFixedBlock(FSDataInputStream[] inputs, int[] erasedLocations, int[] validErasedLocations, long limit,
        byte[] outBuf, IAStreamFactory sf) throws IOException {

    int seq = 0;/*www  . j a  v a2s . com*/

    for (long read = 0; read < limit;) {

        boolean important = false;

        LOG.info("anchor Decode_stripe " + seq + " Data_reading " + System.nanoTime());
        //read packets

        ReadPackage rp = readFromInputs(inputs, validErasedLocations, sf, seq);
        ByteBuffer buf = rp.buf;
        validErasedLocations = rp.validErasedLocations;
        int bufOffset = encodedBufSize * (stripeSize + paritySize - validErasedLocations.length);
        LOG.info("anchor Decode_stripe " + seq + " Data_read " + System.nanoTime());
        buf.rewind();

        //last threadNum# packet checked
        if ((limit - read + bufSize - 1) / bufSize <= threadNum) {
            important = true;
            buf.put(bufOffset + 4, (byte) 1);
        } else {
            buf.put(bufOffset + 4, (byte) 0);
        }

        int toRead = (int) Math.min((long) bufSize, limit - read);

        //finding the best ring buffer
        int remain = -1;
        int chosen = -1;
        for (int i = 0; i < threadNum; i++) {
            int rc = q[i].remainingCapacity();
            if (remain < rc) {
                remain = rc;
                chosen = i;
            }
        }
        if (important) {
            chosen = (int) (((limit - read + bufSize - 1) / bufSize - 1) % threadNum);
        }

        DecodePackage dp = (new DecodePackage(erasedLocations, validErasedLocations, buf)).limit(limit)
                .outputBuffer(outBuf);
        //dispatch
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                q[chosen].put(dp);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                flag = true;
            }
        }
        LOG.info("anchor Decode_stripe " + seq + " Data_pushed " + System.nanoTime());

        seq++;
        read += toRead;
    }

    //waiting for the end of the decode
    for (int i = 0; i < threadNum; i++) {
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                p[i].take();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                flag = true;
            }
        }
    }
}

From source file:org.carbondata.core.util.CarbonUtil.java

public static short[] getUnCompressColumnIndex(int totalLength, byte[] columnIndexData) {
    ByteBuffer buffer = ByteBuffer.wrap(columnIndexData);
    buffer.rewind();
    int indexDataLength = buffer.getInt();
    short[] indexData = new short[indexDataLength / 2];
    short[] indexMap = new short[(totalLength - indexDataLength - CarbonCommonConstants.INT_SIZE_IN_BYTE) / 2];
    int counter = 0;
    while (counter < indexData.length) {
        indexData[counter] = buffer.getShort();
        counter++;/*from   w ww. j a  va2s  . c o  m*/
    }
    counter = 0;
    while (buffer.hasRemaining()) {
        indexMap[counter++] = buffer.getShort();
    }
    return UnBlockIndexer.uncompressIndex(indexData, indexMap);
}

From source file:com.sonymobile.android.media.internal.VUParser.java

protected boolean parseODSMData(IsoTrack odsmTrack) {
    int kObjectSize = 11;
    SampleTable sampleTable = odsmTrack.getSampleTable();
    if (sampleTable.getSampleCount() > 1) {
        // TODO: Should multiple entries be supported?
        return false;
    }/*from www .ja v  a 2  s.  c  o  m*/
    mSinfList = new ArrayList<SinfData>(2);

    ByteBuffer stszData = sampleTable.getStszData();
    stszData.rewind();
    stszData.getInt(); // version and flags

    int dataSize = stszData.getInt();
    if (dataSize == 0) {
        stszData.getInt(); // sample_count
        dataSize = stszData.getInt();
    }

    byte[] data = new byte[dataSize];
    try {
        ByteBuffer stcoData = sampleTable.getStcoData();
        stcoData.rewind();

        stcoData.getInt(); // version and flags
        stcoData.getInt(); // entry_count

        long sampleOffset = 0;
        if (sampleTable.isUsingLongChunkOffsets()) {
            sampleOffset = stcoData.getLong();
        } else {
            sampleOffset = 0xFFFFFFFFL & stcoData.getInt();
        }

        mDataSource.readAt(sampleOffset, data, dataSize);
        ByteBuffer dataBuffer = ByteBuffer.wrap(data);
        byte updateTag = dataBuffer.get();
        if (updateTag != 1) {
            return false;
        }
        int size = 0;
        int sizePart = 0;
        do {
            sizePart = (dataBuffer.get() & 0xFF);
            size = ((size << 7) & 0xFFFFFF80) | (sizePart & 0x7F);
        } while (sizePart > 128);
        while (size >= kObjectSize) {
            byte descriptorTag = dataBuffer.get();
            if (descriptorTag != 17) {
                // not mp4 descriptor
                return false;
            }
            dataBuffer.get(); // ODLength
            dataBuffer.getShort(); // 10 bit ObjectDescriptorID, 1 bit
                                   // URL_FLAG and 5 bit reserved

            byte esTag = dataBuffer.get();
            if (esTag != 0x0F) {
                return false;
            }
            dataBuffer.get(); // ES Length
            short esTrackReferenceIndex = dataBuffer.getShort();
            byte ipmpDescriptorPointer = dataBuffer.get();
            if (ipmpDescriptorPointer != 0x0A) {
                // unexpected pointer
                return false;
            }
            dataBuffer.get(); // ipmpLength
            byte ipmpDescriptorId = dataBuffer.get();
            SinfData sinfData = new SinfData();
            sinfData.esIdReference = esTrackReferenceIndex;
            sinfData.ipmpDescriptorId = ipmpDescriptorId;
            mSinfList.add(sinfData);
            size -= kObjectSize;
        }
        dataBuffer.get(); // IPMP Descriptor Update Tag
        int sinfCount = mSinfList.size();
        size = 0;
        sizePart = 0;
        do {
            sizePart = (dataBuffer.get() & 0xFF);
            size = ((size << 7) & 0xFFFFFF80) | (sizePart & 0x7F);
        } while (sizePart > 128);
        while (size > 0) {
            dataBuffer.get(); // IPMP Descriptor Tag
            int ipmpByteCount = 1;
            int ipmpLength = 0;
            sizePart = 0;
            do {
                sizePart = (dataBuffer.get() & 0xFF);
                ipmpByteCount++;
                ipmpLength = ((ipmpLength << 7) & 0xFFFFFF80) | (sizePart & 0x7F);
            } while (sizePart > 128);
            ipmpByteCount += ipmpLength;
            byte ipmpDescriptorId = dataBuffer.get();
            dataBuffer.getShort(); // IPMPS Type
            byte[] ipmpData = new byte[ipmpLength - 3];
            dataBuffer.get(ipmpData);
            SinfData sinfData = null;
            for (int i = 0; i < sinfCount; i++) {
                sinfData = mSinfList.get(i);
                if (sinfData.ipmpDescriptorId == ipmpDescriptorId) {
                    sinfData.ipmpData = new byte[ipmpData.length];
                    for (int j = 0; j < ipmpData.length; j++) {
                        sinfData.ipmpData[j] = ipmpData[j];
                    }
                    break;
                }
            }
            size -= ipmpByteCount;
        }
        int ipmpDataLength = 0;
        for (int i = 0; i < sinfCount; i++) {
            SinfData sinfData = mSinfList.get(i);
            ipmpDataLength += sinfData.ipmpData.length;
        }

        int ipmpMetaDataLength = 16 // MARLIN_SYSTEM_ID
                + 4 // size of all SINF data
                + 4 // size of SINF box id
                + 4 * sinfCount // trackIndex * sinfCount
                + 4 * sinfCount // ipmpLength * sinfCount
                + ipmpDataLength; // size of ipmpData
        byte[] ipmpMetaData = new byte[ipmpMetaDataLength];
        int offset = 16;

        for (int i = 0; i < offset; i++) {
            int hexVal = Integer.parseInt(Util.MARLIN_SYSTEM_ID.substring(i * 2, i * 2 + 2), 16);
            ipmpMetaData[i] = (byte) hexVal;
        }
        ipmpMetaData[offset++] = (byte) ((ipmpDataLength >> 24) & 0xFF);
        ipmpMetaData[offset++] = (byte) ((ipmpDataLength >> 16) & 0xFF);
        ipmpMetaData[offset++] = (byte) ((ipmpDataLength >> 8) & 0xFF);
        ipmpMetaData[offset++] = (byte) (ipmpDataLength & 0xFF);
        ipmpMetaData[offset++] = 0x73; // S
        ipmpMetaData[offset++] = 0x69; // I
        ipmpMetaData[offset++] = 0x6E; // N
        ipmpMetaData[offset++] = 0x66; // F

        int numTracks = mTracks.size();
        for (int i = 0; i < numTracks; i++) {
            IsoTrack track = (IsoTrack) mTracks.get(i);
            for (int j = 0; j < sinfCount; j++) {
                SinfData sinfData = mSinfList.get(j);
                if (sinfData.esIdReference == track.getTrackId()) {
                    track.getMetaData().addValue(MetaData.KEY_IPMP_DATA, sinfData.ipmpData);
                    // track index
                    ipmpMetaData[offset++] = (byte) ((i >> 24) & 0xFF);
                    ipmpMetaData[offset++] = (byte) ((i >> 16) & 0xFF);
                    ipmpMetaData[offset++] = (byte) ((i >> 8) & 0xFF);
                    ipmpMetaData[offset++] = (byte) (i & 0xFF);

                    // sinf data length
                    ipmpMetaData[offset++] = (byte) ((sinfData.ipmpData.length >> 24) & 0xFF);
                    ipmpMetaData[offset++] = (byte) ((sinfData.ipmpData.length >> 16) & 0xFF);
                    ipmpMetaData[offset++] = (byte) ((sinfData.ipmpData.length >> 8) & 0xFF);
                    ipmpMetaData[offset++] = (byte) (sinfData.ipmpData.length & 0xFF);

                    System.arraycopy(sinfData.ipmpData, 0, ipmpMetaData, offset, sinfData.ipmpData.length);

                    byte[] tempData = new byte[4 + sinfData.ipmpData.length];
                    tempData[0] = (byte) ((sinfData.ipmpData.length >> 24) & 0xFF);
                    tempData[1] = (byte) ((sinfData.ipmpData.length >> 16) & 0xFF);
                    tempData[2] = (byte) ((sinfData.ipmpData.length >> 8) & 0xFF);
                    tempData[3] = (byte) (sinfData.ipmpData.length & 0xFF);
                    System.arraycopy(sinfData.ipmpData, 0, tempData, 4, sinfData.ipmpData.length);

                    // Create JSON for this track
                    String jsonData = null;
                    try {
                        jsonData = Util.getJSONIPMPData(tempData);
                    } catch (JSONException e) {
                        if (LOGS_ENABLED)
                            Log.e(TAG, "Exception when creating JSON object" + e);
                        return false;
                    }
                    track.getMediaFormat().setString(KEY_DRM_UUID, Util.MARLIN_SYSTEM_ID);
                    track.getMediaFormat().setString(KEY_MARLIN_JSON, jsonData);

                    offset += sinfData.ipmpData.length;
                    break;
                }
            }
        }

        mIpmpMetaData = ipmpMetaData;

        addMetaDataValue(KEY_IPMP_DATA, mIpmpMetaData);

        mCurrentTrack.getMetaData().addValue(KEY_MIME_TYPE, MimeType.OCTET_STREAM);

    } catch (IOException e) {
        if (LOGS_ENABLED)
            Log.e(TAG, "IOException when parsing ODSM data");
    }
    return true;
}

From source file:org.carbondata.core.util.CarbonUtil.java

public static int[] getUnCompressColumnIndex(int totalLength, byte[] columnIndexData,
        NumberCompressor numberCompressor) {
    byte[] indexData = null;
    byte[] indexMap = null;
    try {//from w  ww .j a v  a 2  s . c  o m
        ByteBuffer buffer = ByteBuffer.wrap(columnIndexData);
        buffer.rewind();
        int indexDataLength = buffer.getInt();
        indexData = new byte[indexDataLength];
        indexMap = new byte[totalLength - indexDataLength - CarbonCommonConstants.INT_SIZE_IN_BYTE];
        buffer.get(indexData);
        buffer.get(indexMap);
    } catch (Exception e) {
        LOGGER.error("Error while compressColumn Index ");
    }
    return UnBlockIndexer.uncompressIndex(numberCompressor.unCompress(indexData),
            numberCompressor.unCompress(indexMap));
}