Example usage for java.io DataOutputStream writeLong

List of usage examples for java.io DataOutputStream writeLong

Introduction

In this page you can find the example usage for java.io DataOutputStream writeLong.

Prototype

public final void writeLong(long v) throws IOException 

Source Link

Document

Writes a long to the underlying output stream as eight bytes, high byte first.

Usage

From source file:net.mybox.mybox.ClientStatus.java

private synchronized boolean updateLastSync() {
    lastSync = (new Date()).getTime();

    try {//  w ww. ja v a 2 s  .c  o  m
        FileOutputStream fos = new FileOutputStream(lastSyncFile);
        DataOutputStream dos = new DataOutputStream(fos);
        dos.writeLong(lastSync);
        dos.close();
    } catch (Exception e) {
        return false;
    }

    return true;
}

From source file:org.apache.hadoop.io.TestArrayOutputStream.java

private void runComparison(ArrayOutputStream aos, DataOutputStream dos, ByteArrayOutputStream bos)
        throws IOException {
    Random r = new Random();
    // byte//w  w w . ja v  a 2 s  .  c  o m
    int b = r.nextInt(128);
    aos.write(b);
    dos.write(b);

    // byte[]
    byte[] bytes = new byte[10];
    r.nextBytes(bytes);
    aos.write(bytes, 0, 10);
    dos.write(bytes, 0, 10);

    // Byte
    aos.writeByte(b);
    dos.writeByte(b);

    // boolean
    boolean bool = r.nextBoolean();
    aos.writeBoolean(bool);
    dos.writeBoolean(bool);

    // short
    short s = (short) r.nextInt();
    aos.writeShort(s);
    dos.writeShort(s);

    // char
    int c = r.nextInt();
    aos.writeChar(c);
    dos.writeChar(c);

    // int
    int i = r.nextInt();
    aos.writeInt(i);
    dos.writeInt(i);

    // long
    long l = r.nextLong();
    aos.writeLong(l);
    dos.writeLong(l);

    // float
    float f = r.nextFloat();
    aos.writeFloat(f);
    dos.writeFloat(f);

    // double
    double d = r.nextDouble();
    aos.writeDouble(d);
    dos.writeDouble(d);

    // strings
    String str = RandomStringUtils.random(20);
    aos.writeBytes(str);
    aos.writeChars(str);
    aos.writeUTF(str);
    dos.writeBytes(str);
    dos.writeChars(str);
    dos.writeUTF(str);

    byte[] expected = bos.toByteArray();
    assertEquals(expected.length, aos.size());

    byte[] actual = new byte[aos.size()];
    System.arraycopy(aos.getBytes(), 0, actual, 0, actual.length);
    // serialized bytes should be the same
    assertTrue(Arrays.equals(expected, actual));
}

From source file:org.apache.jmeter.protocol.mqtt.client.MqttPublisher.java

private byte[] createBigVolume(String useTimeStamp, String useNumberSeq, String format, String charset,
        String sizeArray) throws IOException, NumberFormatException {
    ByteArrayOutputStream b = new ByteArrayOutputStream();
    DataOutputStream d = new DataOutputStream(b);
    // flags     
    byte flags = 0x00;
    if ("TRUE".equals(useTimeStamp))
        flags |= 0x80;//from   ww  w .j  av a2 s  . co  m
    if ("TRUE".equals(useNumberSeq))
        flags |= 0x40;
    d.writeByte(flags);
    // TimeStamp
    if ("TRUE".equals(useTimeStamp)) {
        Date date = new java.util.Date();
        d.writeLong(date.getTime());
    }
    // Number Sequence
    if ("TRUE".equals(useNumberSeq)) {
        d.writeInt(numSeq++);

    }
    int size = Integer.parseInt(sizeArray);
    byte[] content = new byte[size];

    for (int i = 0; i < size; i++) {
        content[i] = (byte) (i % 10);
    }
    d.write(content);
    // Format: Encoding        
    if (MQTTPublisherGui.BINARY.equals(format)) {
        BinaryCodec encoder = new BinaryCodec();
        return encoder.encode(b.toByteArray());
    } else if (MQTTPublisherGui.BASE64.equals(format)) {
        return Base64.encodeBase64(b.toByteArray());
    } else if (MQTTPublisherGui.BINHEX.equals(format)) {
        Hex encoder = new Hex();
        return encoder.encode(b.toByteArray());
    } else if (MQTTPublisherGui.PLAIN_TEXT.equals(format)) {
        String s = new String(b.toByteArray(), charset);
        return s.getBytes();

    } else

        return b.toByteArray();

}

From source file:org.apache.hadoop.hdfs.server.datanode.BlockSender.java

/**
 * sendBlock() is used to read block and its metadata and stream the data to
 * either a client or to another datanode. 
 * /*from  w ww.j  a  v  a  2  s  .  c  o m*/
 * @param out  stream to which the block is written to
 * @param baseStream optional. if non-null, <code>out</code> is assumed to 
 *        be a wrapper over this stream. This enables optimizations for
 *        sending the data, e.g. 
 *        {@link SocketOutputStream#transferToFully(FileChannel, 
 *        long, int)}.
 * @param throttler for sending data.
 * @return total bytes reads, including crc.
 */
long sendBlock(DataOutputStream out, OutputStream baseStream, BlockTransferThrottler throttler)
        throws IOException {
    if (out == null) {
        throw new IOException("out stream is null");
    }
    this.throttler = throttler;

    long initialOffset = offset;
    long totalRead = 0;
    OutputStream streamForSendChunks = out;

    final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
    try {
        try {
            checksum.writeHeader(out);
            if (chunkOffsetOK) {
                out.writeLong(offset);
            }
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }

        int maxChunksPerPacket;
        int pktSize = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER;

        if (transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream
                && blockIn instanceof FileInputStream) {

            FileChannel fileChannel = ((FileInputStream) blockIn).getChannel();

            // blockInPosition also indicates sendChunks() uses transferTo.
            blockInPosition = fileChannel.position();
            streamForSendChunks = baseStream;

            // assure a mininum buffer size.
            maxChunksPerPacket = (Math.max(BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO) + bytesPerChecksum - 1)
                    / bytesPerChecksum;

            // packet buffer has to be able to do a normal transfer in the case
            // of recomputing checksum
            pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
        } else {
            maxChunksPerPacket = Math.max(1, (BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum);
            pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
        }

        ByteBuffer pktBuf = ByteBuffer.allocate(pktSize);

        while (endOffset > offset) {
            long len = sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);
            offset += len;
            totalRead += len + ((len + bytesPerChecksum - 1) / bytesPerChecksum * checksumSize);
            seqno++;
        }
        try {
            out.writeInt(0); // mark the end of block        
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }
    } catch (RuntimeException e) {
        LOG.error("unexpected exception sending block", e);

        throw new IOException("unexpected runtime exception", e);
    } finally {
        if (clientTraceFmt != null) {
            final long endTime = System.nanoTime();
            ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime));
        }
        close();
    }

    blockReadFully = (initialOffset == 0 && offset >= blockLength);

    return totalRead;
}

From source file:org.apache.hadoop.hdfs.server.datanode.BlockSender.java

/**
 * sendBlock() is used to read block and its metadata and stream the data to
 * either a client or to another datanode. 
 * /*from w  w  w  . ja v a2  s .c  om*/
 * @param out  stream to which the block is written to
 * @param baseStream optional. if non-null, <code>out</code> is assumed to 
 *        be a wrapper over this stream. This enables optimizations for
 *        sending the data, e.g. 
 *        {@link SocketOutputStream#transferToFully(FileChannel, 
 *        long, int)}.
 * @param throttler for sending data.
 * @return total bytes reads, including crc.
 */
long sendBlock(DataOutputStream out, OutputStream baseStream, DataTransferThrottler throttler)
        throws IOException {
    if (out == null) {
        throw new IOException("out stream is null");
    }
    this.throttler = throttler;

    long initialOffset = offset;
    long totalRead = 0;
    OutputStream streamForSendChunks = out;

    final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
    try {
        try {
            checksum.writeHeader(out);
            if (chunkOffsetOK) {
                out.writeLong(offset);
            }
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }

        int maxChunksPerPacket;
        int pktSize = PacketHeader.PKT_HEADER_LEN;

        if (transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream
                && blockIn instanceof FileInputStream) {

            FileChannel fileChannel = ((FileInputStream) blockIn).getChannel();

            // blockInPosition also indicates sendChunks() uses transferTo.
            blockInPosition = fileChannel.position();
            streamForSendChunks = baseStream;

            // assure a mininum buffer size.
            maxChunksPerPacket = (Math.max(HdfsConstants.IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO)
                    + bytesPerChecksum - 1) / bytesPerChecksum;

            // allocate smaller buffer while using transferTo(). 
            pktSize += checksumSize * maxChunksPerPacket;
        } else {
            maxChunksPerPacket = Math.max(1,
                    (HdfsConstants.IO_FILE_BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum);
            pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
        }

        ByteBuffer pktBuf = ByteBuffer.allocate(pktSize);

        while (endOffset > offset) {
            long len = sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);
            offset += len;
            totalRead += len + ((len + bytesPerChecksum - 1) / bytesPerChecksum * checksumSize);
            seqno++;
        }
        try {
            // send an empty packet to mark the end of the block
            sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks);
            out.flush();
        } catch (IOException e) { //socket error
            throw ioeToSocketException(e);
        }

        sentEntireByteRange = true;
    } finally {
        if (clientTraceFmt != null) {
            final long endTime = System.nanoTime();
            ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime));
        }
        close();
    }

    return totalRead;
}

From source file:org.hyperic.hq.agent.server.AgentDListProvider.java

private synchronized void flush(boolean toShutdown) throws AgentStorageException {
    if (shutdown.get() && !toShutdown) {
        return;/*from  ww  w. ja  v  a 2s. c  o m*/
    }
    final long start = System.currentTimeMillis();
    BufferedOutputStream bOs = null;
    FileOutputStream fOs = null;
    DataOutputStream dOs = null;
    if (!keyValDirty.get()) {
        return;
    }
    Entry<EncVal, EncVal> curr = null;
    try {
        fOs = new FileOutputStream(keyValFile);
        bOs = new BufferedOutputStream(fOs);
        dOs = new DataOutputStream(bOs);
        synchronized (keyVals) {
            dOs.writeLong(keyVals.size());
            for (Entry<EncVal, EncVal> entry : keyVals.entrySet()) {
                curr = entry;
                String encKey = entry.getKey().getEnc();
                String encVal = entry.getValue().getEnc();
                dOs.writeUTF(encKey);
                dOs.writeUTF(encVal);
            }
        }
    } catch (UTFDataFormatException e) {
        if (curr != null) {
            log.error("error writing key=" + curr.getKey().getVal() + ", value=" + curr.getValue().getVal(), e);
        } else {
            log.error(e, e);
        }
    } catch (IOException e) {
        log.error("Error flushing data", e);
        AgentStorageException toThrow = new AgentStorageException("Error flushing data: " + e);
        toThrow.initCause(e);
        throw toThrow;
    } finally {
        close(dOs);
        close(bOs);
        // After successful write, clear dirty flag.
        keyValDirty.set(false);
        close(fOs);
    }

    // After successful flush, update backup copy
    try {
        synchronized (keyVals) {
            FileUtil.copyFile(this.keyValFile, this.keyValFileBackup);
        }
    } catch (FileNotFoundException e) {
        log.warn(e);
        log.debug(e, e);
    } catch (IOException e) {
        log.error("Error backing up keyvals", e);
        AgentStorageException toThrow = new AgentStorageException("Error backing up keyvals: " + e);
        toThrow.initCause(e);
        throw toThrow;
    }
    agentStatsCollector.addStat(System.currentTimeMillis() - start,
            AgentStatsCollector.DISK_LIST_KEYVALS_FLUSH_TIME);
}

From source file:io.hops.metadata.util.RMUtilities.java

/**
 * Create a DelegationToken and the respective sequence number for that token
 * at NDB These operations take place in the same transaction.
 *
 * @param rmDTIdentifier/*from   ww w  .  ja  va  2s.co  m*/
 * @param renewDate
 * @param latestSequenceNumber
 * @throws IOException
 */
public static void setTokenAndSequenceNumber(final RMDelegationTokenIdentifier rmDTIdentifier,
        final Long renewDate, final int latestSequenceNumber) throws IOException {
    LightWeightRequestHandler setTokenAndSequenceNumberHandler = new LightWeightRequestHandler(
            YARNOperationType.TEST) {
        @Override
        public Object performTask() throws IOException {
            connector.beginTransaction();
            connector.writeLock();
            DelegationTokenDataAccess DA = (DelegationTokenDataAccess) RMStorageFactory
                    .getDataAccess(DelegationTokenDataAccess.class);
            //Create byte array for RMDelegationTokenIdentifier and renewdate
            ByteArrayOutputStream tokenOs = new ByteArrayOutputStream();
            DataOutputStream tokenOut = new DataOutputStream(tokenOs);
            byte[] identifierBytes;
            try {
                rmDTIdentifier.write(tokenOut);
                tokenOut.writeLong(renewDate);
                identifierBytes = tokenOs.toByteArray();
            } finally {
                tokenOs.close();
            }
            DA.createDelegationTokenEntry(
                    new DelegationToken(rmDTIdentifier.getSequenceNumber(), identifierBytes));

            //Persist sequence number
            SequenceNumberDataAccess SDA = (SequenceNumberDataAccess) RMStorageFactory
                    .getDataAccess(SequenceNumberDataAccess.class);
            SequenceNumber sn = new SequenceNumber(NDBRMStateStore.SEQNUMBER_ID, latestSequenceNumber);
            SDA.add(sn);
            connector.commit();
            return null;
        }
    };
    setTokenAndSequenceNumberHandler.handle();
}

From source file:org.globus.gsi.gssapi.test.GlobusGSSContextTest.java

private void runWrapTests(boolean privacy, boolean reqConf, int qop) throws Exception {

    assertTrue("client ctx not established.", clientContext.isEstablished());
    assertTrue("server ctx not established.", serverContext.isEstablished());

    int[] msgSize = { 10, 100, 1000, 10000, 16384, 100000 };

    for (int i = 0; i < msgSize.length; i++) {

        ByteArrayOutputStream out = new ByteArrayOutputStream();
        DataOutputStream dout = new DataOutputStream(out);

        while (dout.size() < msgSize[i]) {
            dout.writeLong(System.currentTimeMillis());
        }/*from w ww .  j a v a2s.c o m*/

        byte[] msg = out.toByteArray();

        MessageProp wProp = new MessageProp(qop, reqConf);

        byte[] wToken = clientContext.wrap(msg, 0, msg.length, wProp);

        assertEquals(privacy, wProp.getPrivacy());
        assertEquals(qop, wProp.getQOP());

        MessageProp uwProp = new MessageProp(reqConf);

        logger.debug("UNWRAPING HALF (" + (wToken.length / 2) + " BYTES) OF TOKEN OF LENGTH: " + wToken.length);
        byte[] uwToken1 = serverContext.unwrap(wToken, 0, wToken.length / 2, uwProp);

        byte[] uwToken2 = serverContext.unwrap(wToken, wToken.length / 2, wToken.length - (wToken.length / 2),
                uwProp);
        if (uwToken2 == null) {
            fail("unwrap of token unsuccessful; length: " + wToken.length);
        }

        assertEquals(privacy, uwProp.getPrivacy());
        assertEquals(qop, uwProp.getQOP());

        assertEquals(msg.length, ((uwToken1 != null) ? uwToken1.length : 0) + uwToken2.length);

        if (uwToken1 != null) {
            for (int j = 0; j < uwToken1.length; j++) {
                assertEquals(msg[j], uwToken1[j]);
            }
        }
        for (int j = 0; j < uwToken2.length; j++) {
            assertEquals(msg[((uwToken1 != null) ? uwToken1.length : 0) + j], uwToken2[j]);
        }

    }
}

From source file:com.splout.db.dnode.HttpFileExchanger.java

public void send(final String tablespace, final int partition, final long version, final File binaryFile,
        final String url, boolean blockUntilComplete) {
    Future<?> future = clientExecutors.submit(new Runnable() {
        @Override/*from  w ww .  j  a  v  a 2  s  .c o m*/
        public void run() {
            DataOutputStream writer = null;
            InputStream input = null;
            try {
                HttpURLConnection connection = (HttpURLConnection) new URL(url).openConnection();
                connection.setChunkedStreamingMode(config.getInt(FetcherProperties.DOWNLOAD_BUFFER));
                connection.setDoOutput(true);
                connection.setRequestProperty("filename", binaryFile.getName());
                connection.setRequestProperty("tablespace", tablespace);
                connection.setRequestProperty("partition", partition + "");
                connection.setRequestProperty("version", version + "");

                Checksum checkSum = new CRC32();

                writer = new DataOutputStream(new GZIPOutputStream(connection.getOutputStream()));
                // 1 - write file size
                writer.writeLong(binaryFile.length());
                writer.flush();
                // 2 - write file content
                input = new FileInputStream(binaryFile);
                byte[] buffer = new byte[config.getInt(FetcherProperties.DOWNLOAD_BUFFER)];
                long wrote = 0;
                for (int length = 0; (length = input.read(buffer)) > 0;) {
                    writer.write(buffer, 0, length);
                    checkSum.update(buffer, 0, length);
                    wrote += length;
                }
                // 3 - add the CRC so that we can verify the download
                writer.writeLong(checkSum.getValue());
                writer.flush();
                log.info("Sent file " + binaryFile + " to " + url + " with #bytes: " + wrote + " and checksum: "
                        + checkSum.getValue());
            } catch (IOException e) {
                log.error(e);
            } finally {
                try {
                    if (input != null) {
                        input.close();
                    }
                    if (writer != null) {
                        writer.close();
                    }
                } catch (IOException ignore) {
                }
            }
        }
    });
    try {
        if (blockUntilComplete) {
            while (future.isDone() || future.isCancelled()) {
                Thread.sleep(1000);
            }
        }
    } catch (InterruptedException e) {
        // interrupted!
    }
}

From source file:com.ebay.erl.mobius.core.collection.BigTupleList.java

/**
 * Flush {@link Tuple}s in {@link #buffer_in_memory} into
 * disk, and new local file will be created by {@link #newLocalFile()}
 * and store the {@link File} reference in {@link #buffer_on_disk} for
 * future reference.//from  ww w .java  2  s  . c  om
 */
private void flushToDisk() {
    this.flushing = true;
    File localFile;

    if (this.buffer_in_memory.size() == 0) {
        // no tuple in memory
        return;
    }
    long start = System.currentTimeMillis();
    long availableMemory = this.availableMemory();

    String message = Thread.currentThread().toString() + " BID[" + this._ID + "] "
            + "writing in-memory tuples (" + getNumberFormat().format(this.buffer_in_memory.size())
            + " entries) into disk, " + "available memory:" + availableMemory / _MB + "MB.";

    LOGGER.info(message);
    if (this.reporter != null) {
        this.reporter.setStatus(message);
        this.reporter.progress();
    }

    try {
        // check if we still have enough local space to prevent 
        // full of disk exception.
        long freeDiskSpace = this.workOutput.getFreeSpace() / _MB;
        if (freeDiskSpace < 300) {
            // less than 300MB free space left, throw
            // exceptions
            throw new IOException("Not enough space left (" + freeDiskSpace + "MB remaining) on "
                    + this.workOutput.getAbsolutePath() + ".");
        }

        localFile = this.newLocalFile();
        DataOutputStream out = new DataOutputStream(
                new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(localFile))));

        // write the tuple schema in the header
        String[] tupleSchema = this.buffer_in_memory.get(0).getSchema();
        out.writeInt(tupleSchema.length);
        if (tupleSchema.length == 0)
            throw new IllegalArgumentException("Tuple with empty schema!");
        for (String aColumn : tupleSchema) {
            out.writeUTF(aColumn);
        }

        // write number of tuple in this file
        out.writeLong(this.buffer_in_memory.size());

        if (this.comparator != null) {
            // sort the Tuple in memory first
            Collections.sort(this.buffer_in_memory, this.comparator);
        }

        // write all the tuple in memory buffer
        long counts = 0L;
        for (Tuple aTuple : this.buffer_in_memory) {
            aTuple.write(out);
            counts++;
            if (counts % 5000 == 0 && this.reporter != null)// report every 5000 IO
                this.reporter.progress();
        }
        out.flush();
        out.close();

        // clear memory buffer
        this.buffer_in_memory.clear();

        long end = System.currentTimeMillis();

        LOGGER.info(Thread.currentThread().toString() + " BID[" + this._ID + "] " + "Write has completed, cost "
                + ((end - start) / 1000) + " seconds, " + "available memory:" + this.availableMemory() / _MB
                + "MB, " + "wrote to:" + localFile.getAbsolutePath() + "(size:"
                + localFile.getTotalSpace() / _MB + "MB) , " + "in memory tuples numbers:"
                + this.buffer_in_memory.size());

        this.flushing = false;
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}