Example usage for java.io DataOutputStream writeInt

List of usage examples for java.io DataOutputStream writeInt

Introduction

In this page you can find the example usage for java.io DataOutputStream writeInt.

Prototype

public final void writeInt(int v) throws IOException 

Source Link

Document

Writes an int to the underlying output stream as four bytes, high byte first.

Usage

From source file:ipc.Server.java

/**
 * Setup response for the IPC Call.//from   w w w  .  j  a  v  a2 s .c  o  m
 * 
 * @param response buffer to serialize the response into
 * @param call {@link Call} to which we are setting up the response
 * @param status {@link Status} of the IPC call
 * @param rv return value for the IPC Call, if the call was successful
 * @param errorClass error class, if the the call failed
 * @param error error message, if the call failed
 * @throws IOException
 */
private void setupResponse(ByteArrayOutputStream response, Call call, Status status, Writable rv,
        String errorClass, String error) throws IOException {
    response.reset();
    DataOutputStream out = new DataOutputStream(response);
    out.writeInt(call.id); // write call id
    out.writeInt(status.state); // write status

    if (status == Status.SUCCESS) {
        rv.write(out);
    } else {
        WritableUtils.writeString(out, errorClass);
        WritableUtils.writeString(out, error);
    }
    wrapWithSasl(response, call);
    call.setResponse(ByteBuffer.wrap(response.toByteArray()));
}

From source file:ipc.Server.java

private void wrapWithSasl(ByteArrayOutputStream response, Call call) throws IOException {
    if (call.connection.useSasl) {
        byte[] token = response.toByteArray();
        // synchronization may be needed since there can be multiple Handler
        // threads using saslServer to wrap responses.
        synchronized (call.connection.saslServer) {
            token = call.connection.saslServer.wrap(token, 0, token.length);
        }// w ww .j a v  a2s  . c o  m
        if (LOG.isDebugEnabled())
            LOG.debug("Adding saslServer wrapped token of size " + token.length + " as call response.");
        response.reset();
        DataOutputStream saslOut = new DataOutputStream(response);
        saslOut.writeInt(token.length);
        saslOut.write(token, 0, token.length);
    }
}

From source file:org.apache.jmeter.protocol.mqtt.client.MqttPublisher.java

public byte[] createRandomPayload(String Seed, String min, String max, String type_random, String useTimeStamp,
        String useNumSeq, String type_value, String format, String charset)
        throws IOException, NumberFormatException {
    ByteArrayOutputStream b = new ByteArrayOutputStream();
    DataOutputStream d = new DataOutputStream(b);
    // flags     
    byte flags = 0x00;
    if ("TRUE".equals(useTimeStamp))
        flags |= 0x80;/*from  ww w . jav  a2  s .  c o m*/
    if ("TRUE".equals(useNumSeq))
        flags |= 0x40;
    if (MQTTPublisherGui.INT.equals(type_value))
        flags |= 0x20;
    if (MQTTPublisherGui.LONG.equals(type_value))
        flags |= 0x10;
    if (MQTTPublisherGui.FLOAT.equals(type_value))
        flags |= 0x08;
    if (MQTTPublisherGui.DOUBLE.equals(type_value))
        flags |= 0x04;
    if (MQTTPublisherGui.STRING.equals(type_value))
        flags |= 0x02;
    if (!"TEXT".equals(type_value)) {
        d.writeByte(flags);
    }
    // TimeStamp
    if ("TRUE".equals(useTimeStamp)) {
        Date date = new java.util.Date();
        d.writeLong(date.getTime());
    }
    // Number Sequence
    if ("TRUE".equals(useNumSeq)) {
        d.writeInt(numSeq++);

    }
    // Value

    if (MQTTPublisherGui.PSEUDO.equals(type_random)) {
        generator.setSeed(Long.parseLong(Seed));
        if (MQTTPublisherGui.INT.equals(type_value)) {
            d.writeInt(
                    generator.nextInt(Integer.parseInt(max) - Integer.parseInt(min)) + Integer.parseInt(min));
        } else if (MQTTPublisherGui.LONG.equals(type_value)) {
            long Max = Long.parseLong(max);
            long Min = Long.parseLong(min);
            d.writeLong((Math.abs(generator.nextLong() % (Max - Min)) + Min));
        } else if (MQTTPublisherGui.DOUBLE.equals(type_value)) {
            double Max = Double.parseDouble(max);
            double Min = Double.parseDouble(min);
            d.writeDouble((Min + (Max - Min) * generator.nextDouble()));
        } else if (MQTTPublisherGui.FLOAT.equals(type_value)) {
            float Max = Float.parseFloat(max);
            float Min = Float.parseFloat(min);
            d.writeFloat((Min + (Max - Min) * generator.nextFloat()));
        }
    } else if (MQTTPublisherGui.SECURE.equals(type_random)) {

        secureGenerator.setSeed(Long.parseLong(Seed));
        if (MQTTPublisherGui.INT.equals(type_value)) {
            d.writeInt(secureGenerator.nextInt(Integer.parseInt(max) - Integer.parseInt(min))
                    + Integer.parseInt(min));
        } else if (MQTTPublisherGui.LONG.equals(type_value)) {
            long Max = Long.parseLong(max);
            long Min = Long.parseLong(min);
            d.writeLong((Math.abs(secureGenerator.nextLong() % (Max - Min)) + Min));
        } else if (MQTTPublisherGui.DOUBLE.equals(type_value)) {
            double Max = Double.parseDouble(max);
            double Min = Double.parseDouble(min);
            d.writeDouble((Min + (Max - Min) * secureGenerator.nextDouble()));
        } else if (MQTTPublisherGui.FLOAT.equals(type_value)) {
            float Max = Float.parseFloat(max);
            float Min = Float.parseFloat(min);
            d.writeFloat((Min + (Max - Min) * secureGenerator.nextFloat()));
        }
    }

    // Format: Encoding        
    if (MQTTPublisherGui.BINARY.equals(format)) {
        BinaryCodec encoder = new BinaryCodec();
        return encoder.encode(b.toByteArray());
    } else if (MQTTPublisherGui.BASE64.equals(format)) {
        return Base64.encodeBase64(b.toByteArray());
    } else if (MQTTPublisherGui.BINHEX.equals(format)) {
        Hex encoder = new Hex();
        return encoder.encode(b.toByteArray());
    } else if (MQTTPublisherGui.PLAIN_TEXT.equals(format)) {
        String s = new String(b.toByteArray(), charset);
        return s.getBytes();

    } else
        return b.toByteArray();

}

From source file:com.basho.riak.pbc.RiakClient.java

public ByteString[] store(RiakObject[] values, RequestMeta meta) throws IOException {

    RiakConnection c = getConnection();/*from ww  w.  j a  va  2  s  .  co  m*/
    try {
        BulkReader reader = new BulkReader(c, values.length);
        Thread worker = new Thread(reader);
        worker.start();

        DataOutputStream dout = c.getOutputStream();

        for (int i = 0; i < values.length; i++) {
            RiakObject value = values[i];

            RiakKvPB.RpbPutReq.Builder builder = RiakKvPB.RpbPutReq.newBuilder().setBucket(value.getBucketBS())
                    .setKey(value.getKeyBS()).setContent(value.buildContent());

            if (value.getVclock() != null) {
                builder.setVclock(value.getVclock());
            }

            builder.setReturnBody(true);

            if (meta != null) {

                if (meta.writeQuorum != null) {
                    builder.setW(meta.writeQuorum.intValue());
                }

                if (meta.durableWriteQuorum != null) {
                    builder.setDw(meta.durableWriteQuorum.intValue());
                }
            }

            RpbPutReq req = builder.build();

            int len = req.getSerializedSize();
            dout.writeInt(len + 1);
            dout.write(MSG_PutReq);
            req.writeTo(dout);
        }

        dout.flush();

        try {
            worker.join();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

        return reader.vclocks;
    } finally {
        release(c);
    }
}

From source file:org.apache.hadoop.ipc.Server.java

/**
 * Setup response for the IPC Call.//  w ww  . j  a  v a  2s. c om
 * 
 * @param response buffer to serialize the response into
 * @param call {@link Call} to which we are setting up the response
 * @param status {@link Status} of the IPC call
 * @param rv return value for the IPC Call, if the call was successful
 * @param errorClass error class, if the the call failed
 * @param error error message, if the call failed
 * @throws IOException
 */
private void setupResponse(ByteArrayOutputStream response, Call call, Status status, Writable rv,
        String errorClass, String error) throws IOException {
    response.reset();
    DataOutputStream out = new DataOutputStream(response);
    out.writeInt(call.id); // write call id
    out.writeInt(status.state); // write status

    if (status == Status.SUCCESS) {
        rv.write(out);
    } else {
        WritableUtils.writeString(out, errorClass);
        WritableUtils.writeString(out, error);
    }
    if (call.connection.useWrap) {
        wrapWithSasl(response, call);
    }
    call.setResponse(ByteBuffer.wrap(response.toByteArray()));
}

From source file:voldemort.store.readonly.disk.HadoopStoreWriterPerBucket.java

@Override
public void write(BytesWritable key, Iterator<BytesWritable> iterator, Reporter reporter) throws IOException {

    // Read chunk id
    int chunkId = ReadOnlyUtils.chunk(key.get(), getNumChunks());

    // Write key and position
    this.indexFileStream[chunkId].write(key.get(), 0, key.getSize());
    this.indexFileSizeInBytes[chunkId] += key.getSize();
    this.indexFileStream[chunkId].writeInt(this.position[chunkId]);
    this.indexFileSizeInBytes[chunkId] += ByteUtils.SIZE_OF_INT;

    // Run key through checksum digest
    if (this.checkSumDigestIndex[chunkId] != null) {
        this.checkSumDigestIndex[chunkId].update(key.get(), 0, key.getSize());
        this.checkSumDigestIndex[chunkId].update(this.position[chunkId]);
    }//from   w w  w . ja v  a  2 s  .  c  o m

    short numTuples = 0;
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    DataOutputStream valueStream = new DataOutputStream(stream);

    while (iterator.hasNext()) {
        BytesWritable writable = iterator.next();
        byte[] valueBytes = writable.get();
        int offsetTillNow = 0;

        // Read node Id
        if (this.nodeId == -1)
            this.nodeId = ByteUtils.readInt(valueBytes, offsetTillNow);
        offsetTillNow += ByteUtils.SIZE_OF_INT;

        // Read partition id
        if (this.partitionId == -1)
            this.partitionId = ByteUtils.readInt(valueBytes, offsetTillNow);
        offsetTillNow += ByteUtils.SIZE_OF_INT;

        // Read replica type
        if (getSaveKeys()) {
            if (this.replicaType == -1)
                this.replicaType = (int) ByteUtils.readBytes(valueBytes, offsetTillNow, ByteUtils.SIZE_OF_BYTE);
            offsetTillNow += ByteUtils.SIZE_OF_BYTE;
        }

        int valueLength = writable.getSize() - offsetTillNow;
        if (getSaveKeys()) {
            // Write ( key_length, value_length, key,
            // value )
            valueStream.write(valueBytes, offsetTillNow, valueLength);
        } else {
            // Write (value_length + value)
            valueStream.writeInt(valueLength);
            valueStream.write(valueBytes, offsetTillNow, valueLength);
        }

        numTuples++;

        // If we have multiple values for this md5 that is a collision,
        // throw an exception--either the data itself has duplicates, there
        // are trillions of keys, or someone is attempting something
        // malicious ( We obviously expect collisions when we save keys )
        if (!getSaveKeys() && numTuples > 1)
            throw new VoldemortException("Duplicate keys detected for md5 sum "
                    + ByteUtils.toHexString(ByteUtils.copy(key.get(), 0, key.getSize())));

    }

    if (numTuples < 0) {
        // Overflow
        throw new VoldemortException("Found too many collisions: chunk " + chunkId + " has exceeded "
                + Short.MAX_VALUE + " collisions.");
    } else if (numTuples > 1) {
        // Update number of collisions + max keys per collision
        reporter.incrCounter(CollisionCounter.NUM_COLLISIONS, 1);

        long numCollisions = reporter.getCounter(CollisionCounter.MAX_COLLISIONS).getCounter();
        if (numTuples > numCollisions) {
            reporter.incrCounter(CollisionCounter.MAX_COLLISIONS, numTuples - numCollisions);
        }
    }

    // Flush the value
    valueStream.flush();
    byte[] value = stream.toByteArray();

    // Start writing to file now
    // First, if save keys flag set the number of keys
    if (getSaveKeys()) {

        this.valueFileStream[chunkId].writeShort(numTuples);
        this.valueFileSizeInBytes[chunkId] += ByteUtils.SIZE_OF_SHORT;
        this.position[chunkId] += ByteUtils.SIZE_OF_SHORT;

        if (this.checkSumDigestValue[chunkId] != null) {
            this.checkSumDigestValue[chunkId].update(numTuples);
        }
    }

    this.valueFileStream[chunkId].write(value);
    this.valueFileSizeInBytes[chunkId] += value.length;
    this.position[chunkId] += value.length;

    if (this.checkSumDigestValue[chunkId] != null) {
        this.checkSumDigestValue[chunkId].update(value);
    }

    if (this.position[chunkId] < 0)
        throw new VoldemortException("Chunk overflow exception: chunk " + chunkId + " has exceeded "
                + Integer.MAX_VALUE + " bytes.");

}

From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageDecompressor.java

/**
 * Process image file./*  w  w  w.  j a  v a 2  s .co  m*/
 */
private void go() throws IOException {
    long start = System.currentTimeMillis();
    System.out.println("Decompressing image file: " + inputFile + " to " + outputFile);
    DataInputStream in = null;
    DataOutputStream out = null;

    try {
        // setup in
        PositionTrackingInputStream ptis = new PositionTrackingInputStream(
                new FileInputStream(new File(inputFile)));
        in = new DataInputStream(ptis);

        // read header information
        int imgVersion = in.readInt();
        if (!LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imgVersion)) {
            System.out.println("Image is not compressed. No output will be produced.");
            return;
        }
        int namespaceId = in.readInt();
        long numFiles = in.readLong();
        long genstamp = in.readLong();

        long imgTxId = -1;
        if (LayoutVersion.supports(Feature.STORED_TXIDS, imgVersion)) {
            imgTxId = in.readLong();
        }
        FSImageCompression compression = FSImageCompression.readCompressionHeader(new Configuration(), in);
        if (compression.isNoOpCompression()) {
            System.out.println("Image is not compressed. No output will be produced.");
            return;
        }
        in = BufferedByteInputStream.wrapInputStream(compression.unwrapInputStream(in),
                FSImage.LOAD_SAVE_BUFFER_SIZE, FSImage.LOAD_SAVE_CHUNK_SIZE);
        System.out.println("Starting decompression.");

        // setup output
        out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(outputFile)));

        // write back the uncompressed information
        out.writeInt(imgVersion);
        out.writeInt(namespaceId);
        out.writeLong(numFiles);
        out.writeLong(genstamp);
        if (LayoutVersion.supports(Feature.STORED_TXIDS, imgVersion)) {
            out.writeLong(imgTxId);
        }
        // no compression
        out.writeBoolean(false);

        // copy the data
        long size = new File(inputFile).length();
        // read in 1MB chunks
        byte[] block = new byte[1024 * 1024];
        while (true) {
            int bytesRead = in.read(block);
            if (bytesRead <= 0)
                break;
            out.write(block, 0, bytesRead);
            printProgress(ptis.getPos(), size);
        }

        out.close();

        long stop = System.currentTimeMillis();
        System.out.println("Input file : " + inputFile + " size: " + size);
        System.out.println("Output file: " + outputFile + " size: " + new File(outputFile).length());
        System.out.println("Decompression completed in " + (stop - start) + " ms.");
    } finally {
        if (in != null)
            in.close();
        if (out != null)
            out.close();
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore.java

private void addStoreOrUpdateOps(ArrayList<Op> opList, RMDelegationTokenIdentifier rmDTIdentifier,
        Long renewDate, boolean isUpdate) throws Exception {
    // store RM delegation token
    String nodeCreatePath = getNodePath(delegationTokensRootPath,
            DELEGATION_TOKEN_PREFIX + rmDTIdentifier.getSequenceNumber());
    ByteArrayOutputStream seqOs = new ByteArrayOutputStream();
    DataOutputStream seqOut = new DataOutputStream(seqOs);
    RMDelegationTokenIdentifierData identifierData = new RMDelegationTokenIdentifierData(rmDTIdentifier,
            renewDate);// w ww .  j  a v  a  2 s. c o  m
    try {
        if (LOG.isDebugEnabled()) {
            LOG.debug((isUpdate ? "Storing " : "Updating ") + "RMDelegationToken_"
                    + rmDTIdentifier.getSequenceNumber());
        }

        if (isUpdate) {
            opList.add(Op.setData(nodeCreatePath, identifierData.toByteArray(), -1));
        } else {
            opList.add(Op.create(nodeCreatePath, identifierData.toByteArray(), zkAcl, CreateMode.PERSISTENT));
            // Update Sequence number only while storing DT
            seqOut.writeInt(rmDTIdentifier.getSequenceNumber());
            if (LOG.isDebugEnabled()) {
                LOG.debug((isUpdate ? "Storing " : "Updating ") + dtSequenceNumberPath + ". SequenceNumber: "
                        + rmDTIdentifier.getSequenceNumber());
            }
            opList.add(Op.setData(dtSequenceNumberPath, seqOs.toByteArray(), -1));
        }
    } finally {
        seqOs.close();
    }
}

From source file:com.trigger_context.Main_Service.java

public void senderSync(DataInputStream in, DataOutputStream out, String folder) {
    String tfolder = folder + (folder.charAt(folder.length() - 1) == '/' ? "" : "/");
    File f = new File(folder);
    File file[] = f.listFiles();//from   w  w  w  .  jav  a 2  s  .co m
    // noti(file.toString(),"");
    String md5 = null;
    HashMap<String, File> hm = new HashMap<String, File>();

    HashSet<String> A = new HashSet<String>();
    for (File element : file) {
        hm.put(md5 = calculateMD5(element), element);
        A.add(md5);
    }
    // noti(hm.toString(),"");
    int numB = 0;
    try {
        numB = in.readInt();
    } catch (IOException e) {
        // TODO Auto-generated catch block
        noti("error reading 1st int in sendersync", "");
        e.printStackTrace();
    }
    HashSet<String> B = new HashSet<String>();
    for (int i = 0; i < numB; i++) {
        try {
            B.add(in.readUTF());
        } catch (IOException e1) {
            noti("error in readins md5", "");
            e1.printStackTrace();
        }
    }
    HashSet<String> aMb = new HashSet<String>(A);
    aMb.removeAll(B);
    int l1 = aMb.size();
    try {
        out.writeInt(l1);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        noti("error in writing 1st int", "");
        e.printStackTrace();
    }
    Iterator<String> itr = aMb.iterator();
    while (itr.hasNext()) {
        f = hm.get(itr.next());
        sendFile(out, f.getPath());
    }
    HashSet<String> bMa = new HashSet<String>(B);
    bMa.removeAll(A);
    int l2 = bMa.size();
    try {
        out.writeInt(l2);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        noti("error in writing 2nd int", "");
        e.printStackTrace();
    }
    itr = bMa.iterator();
    while (itr.hasNext()) {
        md5 = itr.next();
        try {
            out.writeUTF(md5);
        } catch (IOException e) {
            // TODO Auto-generated catch block
            noti("error in sending md5", "");
            e.printStackTrace();
        }
        recvFile(in, folder);
    }
}

From source file:org.apache.qpid.server.store.derby.DerbyMessageStore.java

private byte[] convertStringMapToBytes(final Map<String, String> arguments) throws AMQStoreException {
    byte[] argumentBytes;
    if (arguments == null) {
        argumentBytes = new byte[0];
    } else {//www .ja  va 2 s  .  c om
        ByteArrayOutputStream bos = new ByteArrayOutputStream();
        DataOutputStream dos = new DataOutputStream(bos);

        try {
            dos.writeInt(arguments.size());
            for (Map.Entry<String, String> arg : arguments.entrySet()) {
                dos.writeUTF(arg.getKey());
                dos.writeUTF(arg.getValue());
            }
        } catch (IOException e) {
            // This should never happen
            throw new AMQStoreException(e.getMessage(), e);
        }
        argumentBytes = bos.toByteArray();
    }
    return argumentBytes;
}