Example usage for java.io DataOutputStream writeInt

List of usage examples for java.io DataOutputStream writeInt

Introduction

In this page you can find the example usage for java.io DataOutputStream writeInt.

Prototype

public final void writeInt(int v) throws IOException 

Source Link

Document

Writes an int to the underlying output stream as four bytes, high byte first.

Usage

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex.java

@Test
public void testSecondaryIndexBinarySearch() throws IOException {
    int numTotalKeys = 99;
    assertTrue(numTotalKeys % 2 == 1); // Ensure no one made this even.

    // We only add odd-index keys into the array that we will binary-search.
    int numSearchedKeys = (numTotalKeys - 1) / 2;

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(baos);

    dos.writeInt(numSearchedKeys);
    int curAllEntriesSize = 0;
    int numEntriesAdded = 0;

    // Only odd-index elements of this array are used to keep the secondary
    // index entries of the corresponding keys.
    int secondaryIndexEntries[] = new int[numTotalKeys];

    for (int i = 0; i < numTotalKeys; ++i) {
        byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i * 2);
        KeyValue cell = new KeyValue(k, Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("val"));
        //KeyValue cell = new KeyValue.KeyOnlyKeyValue(k, 0, k.length);
        keys.add(cell.getKey());/*from w  w  w  .  j av a2  s . co  m*/
        String msgPrefix = "Key #" + i + " (" + Bytes.toStringBinary(k) + "): ";
        StringBuilder padding = new StringBuilder();
        while (msgPrefix.length() + padding.length() < 70)
            padding.append(' ');
        msgPrefix += padding;
        if (i % 2 == 1) {
            dos.writeInt(curAllEntriesSize);
            secondaryIndexEntries[i] = curAllEntriesSize;
            LOG.info(msgPrefix + "secondary index entry #" + ((i - 1) / 2) + ", offset " + curAllEntriesSize);
            curAllEntriesSize += cell.getKey().length + HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD;
            ++numEntriesAdded;
        } else {
            secondaryIndexEntries[i] = -1;
            LOG.info(msgPrefix + "not in the searched array");
        }
    }

    // Make sure the keys are increasing.
    for (int i = 0; i < keys.size() - 1; ++i)
        assertTrue(KeyValue.COMPARATOR.compare(new KeyValue.KeyOnlyKeyValue(keys.get(i), 0, keys.get(i).length),
                new KeyValue.KeyOnlyKeyValue(keys.get(i + 1), 0, keys.get(i + 1).length)) < 0);

    dos.writeInt(curAllEntriesSize);
    assertEquals(numSearchedKeys, numEntriesAdded);
    int secondaryIndexOffset = dos.size();
    assertEquals(Bytes.SIZEOF_INT * (numSearchedKeys + 2), secondaryIndexOffset);

    for (int i = 1; i <= numTotalKeys - 1; i += 2) {
        assertEquals(dos.size(), secondaryIndexOffset + secondaryIndexEntries[i]);
        long dummyFileOffset = getDummyFileOffset(i);
        int dummyOnDiskSize = getDummyOnDiskSize(i);
        LOG.debug("Storing file offset=" + dummyFileOffset + " and onDiskSize=" + dummyOnDiskSize
                + " at offset " + dos.size());
        dos.writeLong(dummyFileOffset);
        dos.writeInt(dummyOnDiskSize);
        LOG.debug("Stored key " + ((i - 1) / 2) + " at offset " + dos.size());
        dos.write(keys.get(i));
    }

    dos.writeInt(curAllEntriesSize);

    ByteBuffer nonRootIndex = ByteBuffer.wrap(baos.toByteArray());
    for (int i = 0; i < numTotalKeys; ++i) {
        byte[] searchKey = keys.get(i);
        byte[] arrayHoldingKey = new byte[searchKey.length + searchKey.length / 2];

        // To make things a bit more interesting, store the key we are looking
        // for at a non-zero offset in a new array.
        System.arraycopy(searchKey, 0, arrayHoldingKey, searchKey.length / 2, searchKey.length);

        KeyValue.KeyOnlyKeyValue cell = new KeyValue.KeyOnlyKeyValue(arrayHoldingKey, searchKey.length / 2,
                searchKey.length);
        int searchResult = BlockIndexReader.binarySearchNonRootIndex(cell, nonRootIndex, KeyValue.COMPARATOR);
        String lookupFailureMsg = "Failed to look up key #" + i + " (" + Bytes.toStringBinary(searchKey) + ")";

        int expectedResult;
        int referenceItem;

        if (i % 2 == 1) {
            // This key is in the array we search as the element (i - 1) / 2. Make
            // sure we find it.
            expectedResult = (i - 1) / 2;
            referenceItem = i;
        } else {
            // This key is not in the array but between two elements on the array,
            // in the beginning, or in the end. The result should be the previous
            // key in the searched array, or -1 for i = 0.
            expectedResult = i / 2 - 1;
            referenceItem = i - 1;
        }

        assertEquals(lookupFailureMsg, expectedResult, searchResult);

        // Now test we can get the offset and the on-disk-size using a
        // higher-level API function.s
        boolean locateBlockResult = (BlockIndexReader.locateNonRootIndexEntry(nonRootIndex, cell,
                KeyValue.COMPARATOR) != -1);

        if (i == 0) {
            assertFalse(locateBlockResult);
        } else {
            assertTrue(locateBlockResult);
            String errorMsg = "i=" + i + ", position=" + nonRootIndex.position();
            assertEquals(errorMsg, getDummyFileOffset(referenceItem), nonRootIndex.getLong());
            assertEquals(errorMsg, getDummyOnDiskSize(referenceItem), nonRootIndex.getInt());
        }
    }

}

From source file:ClipboardExample.java

public void javaToNative(Object object, TransferData transferData) {
    if (!checkMyType(object) || !isSupportedType(transferData)) {
        DND.error(DND.ERROR_INVALID_DATA);
    }//w  w w.  j ava2 s .  c om
    MyType[] myTypes = (MyType[]) object;
    try {
        // write data to a byte array and then ask super to convert to
        // pMedium
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        DataOutputStream writeOut = new DataOutputStream(out);
        for (int i = 0, length = myTypes.length; i < length; i++) {
            byte[] buffer = myTypes[i].firstName.getBytes();
            writeOut.writeInt(buffer.length);
            writeOut.write(buffer);
            buffer = myTypes[i].firstName.getBytes();
            writeOut.writeInt(buffer.length);
            writeOut.write(buffer);
        }
        byte[] buffer = out.toByteArray();
        writeOut.close();
        super.javaToNative(buffer, transferData);
    } catch (IOException e) {
    }
}

From source file:org.prorefactor.refactor.PUB.java

private void writeSymbol(DataOutputStream out, Symbol symbol) throws IOException {
    out.writeInt(symbol.getProgressType());
    out.writeUTF(symbol.fullName()); // We write caseAsDefined
    if (symbol instanceof Primative) {
        Primative primative = (Primative) symbol;
        int dataType = primative.getDataType().getTokenType();
        out.writeInt(dataType);/*ww w. j a  v  a2  s.  c  o  m*/
        if (dataType == TokenTypes.CLASS)
            out.writeUTF(primative.getClassName());
    } else
        out.writeInt(0);
}

From source file:org.apache.hadoop.hbase.ipc.BlockingRpcConnection.java

BlockingRpcConnection(BlockingRpcClient rpcClient, ConnectionId remoteId) throws IOException {
    super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId,
            rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor);
    this.rpcClient = rpcClient;
    if (remoteId.getAddress().isUnresolved()) {
        throw new UnknownHostException("unknown host: " + remoteId.getAddress().getHostName());
    }/* w ww  .  j  ava2 s .c om*/

    this.connectionHeaderPreamble = getConnectionHeaderPreamble();
    ConnectionHeader header = getConnectionHeader();
    ByteArrayOutputStream baos = new ByteArrayOutputStream(4 + header.getSerializedSize());
    DataOutputStream dos = new DataOutputStream(baos);
    dos.writeInt(header.getSerializedSize());
    header.writeTo(dos);
    assert baos.size() == 4 + header.getSerializedSize();
    this.connectionHeaderWithLength = baos.getBuffer();

    UserGroupInformation ticket = remoteId.ticket.getUGI();
    this.threadName = "IPC Client (" + this.rpcClient.socketFactory.hashCode() + ") connection to "
            + remoteId.getAddress().toString()
            + ((ticket == null) ? " from an unknown user" : (" from " + ticket.getUserName()));

    if (this.rpcClient.conf.getBoolean(BlockingRpcClient.SPECIFIC_WRITE_THREAD, false)) {
        callSender = new CallSender(threadName, this.rpcClient.conf);
        callSender.start();
    } else {
        callSender = null;
    }
}

From source file:com.igormaznitsa.jhexed.hexmap.HexFieldLayer.java

public void write(final OutputStream out) throws IOException {
    final DataOutputStream dout = out instanceof DataOutputStream ? (DataOutputStream) out
            : new DataOutputStream(out);
    dout.writeUTF(this.name);
    dout.writeUTF(this.comments);

    dout.writeShort(this.values.size());
    for (int i = 0; i < this.values.size(); i++) {
        this.values.get(i).write(dout);
    }/*from w w w.  j  a v  a 2s .  c  o  m*/

    dout.writeInt(this.columns);
    dout.writeInt(this.rows);
    dout.writeBoolean(this.visible);

    final byte[] packed = Utils.packByteArray(this.array);
    dout.writeInt(packed.length);
    dout.write(packed);
    dout.flush();
}

From source file:org.prorefactor.refactor.PUB.java

private void writeStrings(DataOutputStream out) throws IOException {
    int size = stringTable.size();
    out.writeInt(size);
    for (int i = 0; i < size; i++) {
        out.writeUTF((String) stringTable.get(new Integer(i)));
    }//from  w ww. ja v  a  2s  .  com
}

From source file:org.apache.hadoop.hive.accumulo.mr.TestHiveAccumuloTableInputFormat.java

private byte[] parseIntBytes(String s) throws IOException {
    int val = Integer.parseInt(s);
    ByteArrayOutputStream baos = new ByteArrayOutputStream(4);
    DataOutputStream out = new DataOutputStream(baos);
    out.writeInt(val);
    out.close();/*from   w w w  . ja  v a2s .co m*/
    return baos.toByteArray();
}

From source file:com.project.qrypto.keymanagement.KeyManager.java

/**
 * Saves the Keystore. Uses either internal or external memory depending on settings.
 * @param context the context to use//from  w w w  . ja va 2 s. c om
 * 
 * @throws IOException if the outstream is somehow bad or interrupted
 * @throws InvalidCipherTextException if the key is bad or the data is bad
 */
public void commit(Context context) throws IOException, InvalidCipherTextException {
    //Commit Preferences
    Editor edit = context.getSharedPreferences(PREFERENCES_NAME, Context.MODE_PRIVATE).edit();
    edit.putBoolean(USE_INTERNAL_STORAGE, internalStorage);
    edit.putBoolean(PASSWORD_PROTECTED, passwordProtected);
    edit.putBoolean(SETUP_COMPLETE, true);
    edit.commit();

    //Commit Key Data
    DataOutputStream writer = null;
    ByteArrayOutputStream output = null;

    //Create the proper streams
    if (passwordProtected) {
        output = new ByteArrayOutputStream();
        writer = new DataOutputStream(output);
    } else {
        writer = new DataOutputStream(getAssociatedOutFileStream(context));
    }

    //Write everything out to the stream
    writer.writeUTF("RCYTHR1"); //Special indicator to determine if we decrypt properly
    writer.writeInt(lookup.size());
    for (Entry<String, Key> entry : lookup.entrySet()) {
        writer.writeUTF(entry.getKey());
        entry.getValue().writeData(writer);
    }
    writer.writeByte(1); //Prevent null padding from causing too much truncation.

    writer.flush();

    //If we're password protecting we still need to encrypt and output to file
    if (passwordProtected) {
        OutputStream finalOut = getAssociatedOutFileStream(context);
        finalOut.write(AES.handle(true, output.toByteArray(), keyStoreKey));
        finalOut.close();
    }

    writer.close();
}

From source file:edu.umn.cs.spatialHadoop.nasa.StockQuadTree.java

/**
 * Merges multiple trees of the same spatial resolution into one tree of
 * lower temporal resolution (larger time) and the same spatial resolution.
 * @param inTrees//from   w  ww  . j a v  a2s  .  c om
 * @param outTree
 * @throws IOException 
 */
public static void merge(DataInputStream[] inTrees, DataOutputStream outTree) throws IOException {
    // Write the spatial resolution of the output as the same of all input trees
    int resolution = inTrees[0].readInt();
    short fillValue = inTrees[0].readShort();
    for (int iTree = 1; iTree < inTrees.length; iTree++) {
        int iResolution = inTrees[iTree].readInt();
        int iFillValue = inTrees[iTree].readShort();
        if (resolution != iResolution || fillValue != iFillValue)
            throw new RuntimeException("Tree #0 has a resolution of " + resolution
                    + " not compatible with resolution" + iResolution + " of Tree #" + iTree);
    }
    outTree.writeInt(resolution);
    outTree.writeShort(fillValue);

    // Sum up the cardinality of all input trees
    int cardinality = 0;
    int cardinalities[] = new int[inTrees.length];
    for (int iTree = 0; iTree < inTrees.length; iTree++)
        cardinality += (cardinalities[iTree] = inTrees[iTree].readInt());
    outTree.writeInt(cardinality);

    // Write timestamps of all trees
    for (int iTree = 0; iTree < inTrees.length; iTree++) {
        outTree.writeLong(inTrees[iTree].readLong());
    }

    // Merge sorted values in all input trees
    byte[] buffer = new byte[1024 * 1024];
    int size = resolution * resolution;
    while (size-- > 0) {
        for (int iTree = 0; iTree < inTrees.length; iTree++) {
            int sizeToRead = ValueSize * cardinalities[iTree]; // sizeof(short) * c
            while (sizeToRead > 0) {
                int bytesRead = inTrees[iTree].read(buffer, 0, Math.min(sizeToRead, buffer.length));
                outTree.write(buffer, 0, bytesRead);
                sizeToRead -= bytesRead;
            }
        }
    }

    // Merge aggregate values of all nodes
    Node treeNode = new Node();
    StockQuadTree stockQuadTree = getOrCreateStockQuadTree(resolution);
    int numOfNodes = stockQuadTree.nodesID.length;
    for (int iNode = 0; iNode < numOfNodes; iNode++) {
        Node outputNode = new Node();
        for (int iTree = 0; iTree < inTrees.length; iTree++) {
            treeNode.readFields(inTrees[iTree]);
            outputNode.accumulate(treeNode);
        }
        outputNode.write(outTree);
    }
}

From source file:org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore.java

@Override
public void storeToken(TimelineDelegationTokenIdentifier tokenId, Long renewDate) throws IOException {
    DataOutputStream ds = null;
    WriteBatch batch = null;/*from w  w w. j av  a2s .c  o  m*/
    try {
        byte[] k = createTokenEntryKey(tokenId.getSequenceNumber());
        if (db.get(k) != null) {
            throw new IOException(tokenId + " already exists");
        }
        byte[] v = buildTokenData(tokenId, renewDate);
        ByteArrayOutputStream bs = new ByteArrayOutputStream();
        ds = new DataOutputStream(bs);
        ds.writeInt(tokenId.getSequenceNumber());
        batch = db.createWriteBatch();
        batch.put(k, v);
        batch.put(LATEST_SEQUENCE_NUMBER_KEY, bs.toByteArray());
        db.write(batch);
    } catch (DBException e) {
        throw new IOException(e);
    } finally {
        IOUtils.cleanup(LOG, ds);
        IOUtils.cleanup(LOG, batch);
    }
}