List of usage examples for java.io DataOutputStream writeLong
public final void writeLong(long v) throws IOException
long
to the underlying output stream as eight bytes, high byte first. From source file:com.jivesoftware.os.amza.service.AmzaService.java
@Override public void availableRowsStream(boolean system, ChunkWriteable writeable, RingMember remoteRingMember, TimestampedRingHost remoteTimestampedRingHost, long takeSessionId, long sharedKey, long heartbeatIntervalMillis) throws Exception { ringStoreWriter.register(remoteRingMember, remoteTimestampedRingHost.ringHost, remoteTimestampedRingHost.timestampId, false); ByteArrayOutputStream out = new ByteArrayOutputStream(); DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(new SnappyOutputStream(out), 8192)); takeCoordinator.availableRowsStream(system, ringStoreReader, partitionStripeProvider, remoteRingMember, takeSessionId, sharedKey, heartbeatIntervalMillis, (partitionName, txId) -> { dos.write(1);//from w w w. ja v a 2s . c om byte[] bytes = partitionName.toBytes(); dos.writeInt(bytes.length); dos.write(bytes); dos.writeLong(txId); }, () -> { if (dos.size() > 0) { dos.flush(); byte[] chunk = out.toByteArray(); writeable.write(chunk); /*LOG.info("Offered rows for {} length={}", remoteRingMember, chunk.length);*/ out.reset(); } return null; }, () -> { dos.write(1); dos.writeInt(0); dos.flush(); writeable.write(out.toByteArray()); out.reset(); return null; }); dos.write(0); dos.flush(); writeable.write(out.toByteArray()); }
From source file:org.apache.fontbox.ttf.TTFSubsetter.java
private void writeLongDateTime(DataOutputStream out, Calendar calendar) throws IOException { // inverse operation of TTFDataStream.readInternationalDate() GregorianCalendar cal = new GregorianCalendar(1904, 0, 1); long millisFor1904 = cal.getTimeInMillis(); long secondsSince1904 = (calendar.getTimeInMillis() - millisFor1904) / 1000L; out.writeLong(secondsSince1904); }
From source file:edu.umn.cs.spatialHadoop.nasa.StockQuadTree.java
/** * Merges multiple trees of the same spatial resolution into one tree of * lower temporal resolution (larger time) and the same spatial resolution. * @param inTrees//from w w w. ja v a 2s . c o m * @param outTree * @throws IOException */ public static void merge(DataInputStream[] inTrees, DataOutputStream outTree) throws IOException { // Write the spatial resolution of the output as the same of all input trees int resolution = inTrees[0].readInt(); short fillValue = inTrees[0].readShort(); for (int iTree = 1; iTree < inTrees.length; iTree++) { int iResolution = inTrees[iTree].readInt(); int iFillValue = inTrees[iTree].readShort(); if (resolution != iResolution || fillValue != iFillValue) throw new RuntimeException("Tree #0 has a resolution of " + resolution + " not compatible with resolution" + iResolution + " of Tree #" + iTree); } outTree.writeInt(resolution); outTree.writeShort(fillValue); // Sum up the cardinality of all input trees int cardinality = 0; int cardinalities[] = new int[inTrees.length]; for (int iTree = 0; iTree < inTrees.length; iTree++) cardinality += (cardinalities[iTree] = inTrees[iTree].readInt()); outTree.writeInt(cardinality); // Write timestamps of all trees for (int iTree = 0; iTree < inTrees.length; iTree++) { outTree.writeLong(inTrees[iTree].readLong()); } // Merge sorted values in all input trees byte[] buffer = new byte[1024 * 1024]; int size = resolution * resolution; while (size-- > 0) { for (int iTree = 0; iTree < inTrees.length; iTree++) { int sizeToRead = ValueSize * cardinalities[iTree]; // sizeof(short) * c while (sizeToRead > 0) { int bytesRead = inTrees[iTree].read(buffer, 0, Math.min(sizeToRead, buffer.length)); outTree.write(buffer, 0, bytesRead); sizeToRead -= bytesRead; } } } // Merge aggregate values of all nodes Node treeNode = new Node(); StockQuadTree stockQuadTree = getOrCreateStockQuadTree(resolution); int numOfNodes = stockQuadTree.nodesID.length; for (int iNode = 0; iNode < numOfNodes; iNode++) { Node outputNode = new Node(); for (int iTree = 0; iTree < inTrees.length; iTree++) { treeNode.readFields(inTrees[iTree]); outputNode.accumulate(treeNode); } outputNode.write(outTree); } }
From source file:tvbrowser.core.PluginLoader.java
/** * Saves the information of a plugin to disk, so it does not need to be loaded next time * @param pluginFile full plugin file name * @param proxy proxy of the plugin/* ww w . j a va 2s .c o m*/ */ private void saveProxyInfo(File pluginFile, JavaPluginProxy proxy) { try { String proxyFileName = getProxyFileName(pluginFile); DataOutputStream out = new DataOutputStream( new BufferedOutputStream(new FileOutputStream(proxyFileName))); PluginInfo info = proxy.getInfo(); out.writeUTF(info.getName()); out.writeUTF(info.getAuthor()); out.writeUTF(info.getDescription()); String license = info.getLicense(); if (license == null) { license = ""; } out.writeUTF(license); info.getVersion().writeData(out); //write version out.writeUTF(proxy.getId()); out.writeLong(pluginFile.length()); out.writeUTF(proxy.getPluginFileName()); out.close(); // also store the plugin icon, if it is not yet available String iconFileName = getProxyIconFileName(pluginFile); File iconFile = new File(iconFileName); if (!iconFile.exists()) { Icon pluginIcon = proxy.getPluginIcon(); if (pluginIcon != null && pluginIcon instanceof ImageIcon) { IOUtilities.writeImageIconToFile((ImageIcon) pluginIcon, "png", iconFile); } } } catch (Exception e) { } }
From source file:org.apache.jmeter.protocol.mqttws.client.MqttPublisher.java
public byte[] createPayload(String message, String useTimeStamp, String useNumSeq, String type_value, String format, String charset) throws IOException, NumberFormatException { ByteArrayOutputStream b = new ByteArrayOutputStream(); DataOutputStream d = new DataOutputStream(b); // flags byte flags = 0x00; if ("TRUE".equals(useTimeStamp)) flags |= 0x80;//w w w . j a va 2 s . c o m if ("TRUE".equals(useNumSeq)) flags |= 0x40; if (MQTTPublisherGui.INT.equals(type_value)) flags |= 0x20; if (MQTTPublisherGui.LONG.equals(type_value)) flags |= 0x10; if (MQTTPublisherGui.FLOAT.equals(type_value)) flags |= 0x08; if (MQTTPublisherGui.DOUBLE.equals(type_value)) flags |= 0x04; if (MQTTPublisherGui.STRING.equals(type_value)) flags |= 0x02; if (!"TEXT".equals(type_value)) { d.writeByte(flags); } // TimeStamp if ("TRUE".equals(useTimeStamp)) { Date date = new java.util.Date(); d.writeLong(date.getTime()); } // Number Sequence if ("TRUE".equals(useNumSeq)) { d.writeInt(numSeq++); } // Value if (MQTTPublisherGui.INT.equals(type_value)) { d.writeInt(Integer.parseInt(message)); } else if (MQTTPublisherGui.LONG.equals(type_value)) { d.writeLong(Long.parseLong(message)); } else if (MQTTPublisherGui.DOUBLE.equals(type_value)) { d.writeDouble(Double.parseDouble(message)); } else if (MQTTPublisherGui.FLOAT.equals(type_value)) { d.writeDouble(Float.parseFloat(message)); } else if (MQTTPublisherGui.STRING.equals(type_value)) { d.write(message.getBytes()); } else if ("TEXT".equals(type_value)) { d.write(message.getBytes()); } else if ("TEXT_POOL".equals(type_value)) { String random_message = createRandomMessageFromPool(message); d.write(random_message.getBytes()); } // Format: Encoding if (MQTTPublisherGui.BINARY.equals(format)) { BinaryCodec encoder = new BinaryCodec(); return encoder.encode(b.toByteArray()); } else if (MQTTPublisherGui.BASE64.equals(format)) { return Base64.encodeBase64(b.toByteArray()); } else if (MQTTPublisherGui.BINHEX.equals(format)) { Hex encoder = new Hex(); return encoder.encode(b.toByteArray()); } else if (MQTTPublisherGui.PLAIN_TEXT.equals(format)) { String s = new String(b.toByteArray(), charset); return s.getBytes(); } else return b.toByteArray(); }
From source file:bobs.is.compress.sevenzip.SevenZOutputFile.java
private void writeFileATimes(final DataOutput header) throws IOException { int numAccessDates = 0; for (final SevenZArchiveEntry entry : files) { if (entry.getHasAccessDate()) { ++numAccessDates;/*from w w w .j a v a2s . c om*/ } } if (numAccessDates > 0) { header.write(NID.kATime); final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final DataOutputStream out = new DataOutputStream(baos); if (numAccessDates != files.size()) { out.write(0); final BitSet aTimes = new BitSet(files.size()); for (int i = 0; i < files.size(); i++) { aTimes.set(i, files.get(i).getHasAccessDate()); } writeBits(out, aTimes, files.size()); } else { out.write(1); // "allAreDefined" == true } out.write(0); for (final SevenZArchiveEntry entry : files) { if (entry.getHasAccessDate()) { out.writeLong(Long.reverseBytes(SevenZArchiveEntry.javaTimeToNtfsTime(entry.getAccessDate()))); } } out.flush(); final byte[] contents = baos.toByteArray(); writeUint64(header, contents.length); header.write(contents); } }
From source file:bobs.is.compress.sevenzip.SevenZOutputFile.java
private void writeFileCTimes(final DataOutput header) throws IOException { int numCreationDates = 0; for (final SevenZArchiveEntry entry : files) { if (entry.getHasCreationDate()) { ++numCreationDates;/*from w w w .ja v a 2 s .c o m*/ } } if (numCreationDates > 0) { header.write(NID.kCTime); final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final DataOutputStream out = new DataOutputStream(baos); if (numCreationDates != files.size()) { out.write(0); final BitSet cTimes = new BitSet(files.size()); for (int i = 0; i < files.size(); i++) { cTimes.set(i, files.get(i).getHasCreationDate()); } writeBits(out, cTimes, files.size()); } else { out.write(1); // "allAreDefined" == true } out.write(0); for (final SevenZArchiveEntry entry : files) { if (entry.getHasCreationDate()) { out.writeLong( Long.reverseBytes(SevenZArchiveEntry.javaTimeToNtfsTime(entry.getCreationDate()))); } } out.flush(); final byte[] contents = baos.toByteArray(); writeUint64(header, contents.length); header.write(contents); } }
From source file:bobs.is.compress.sevenzip.SevenZOutputFile.java
private void writeFileMTimes(final DataOutput header) throws IOException { int numLastModifiedDates = 0; for (final SevenZArchiveEntry entry : files) { if (entry.getHasLastModifiedDate()) { ++numLastModifiedDates;/*from ww w . j a va 2 s . c o m*/ } } if (numLastModifiedDates > 0) { header.write(NID.kMTime); final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final DataOutputStream out = new DataOutputStream(baos); if (numLastModifiedDates != files.size()) { out.write(0); final BitSet mTimes = new BitSet(files.size()); for (int i = 0; i < files.size(); i++) { mTimes.set(i, files.get(i).getHasLastModifiedDate()); } writeBits(out, mTimes, files.size()); } else { out.write(1); // "allAreDefined" == true } out.write(0); for (final SevenZArchiveEntry entry : files) { if (entry.getHasLastModifiedDate()) { out.writeLong( Long.reverseBytes(SevenZArchiveEntry.javaTimeToNtfsTime(entry.getLastModifiedDate()))); } } out.flush(); final byte[] contents = baos.toByteArray(); writeUint64(header, contents.length); header.write(contents); } }
From source file:org.apache.hadoop.hdfs.server.datanode.IABlockSender.java
/** * sendBlock() is used to read (and encode) block and its metadata and stream the data to * either a client or to another datanode * //from w w w .ja v a 2 s.c o m * @param out stream to which the block is written to * @param baseStream optional. if non-null, <code>out</code> is assumed to * be a wrapper over this stream. This enables optimizations for * sending the data, e.g. * {@link SocketOutputStream#transferToFully(FileChannel, * long, int)}. * @param throttler for sending data. * @return total bytes reads, including crc. */ long sendBlock(DataOutputStream out, OutputStream baseStream, DataTransferThrottler throttler) throws IOException { if (out == null) { throw new IOException("out stream is null"); } this.throttler = throttler; if (throttler == null) LOG.info("throttler is null"); else LOG.info("throttler bandwidth: " + throttler.getBandwidth()); long initialOffset = offset; long totalRead = 0; OutputStream streamForSendChunks = out; final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; try { try { checksum.writeHeader(out); if (chunkOffsetOK) { out.writeLong(offset); } out.flush(); } catch (IOException e) { //socket error throw ioeToSocketException(e); } int maxChunksPerPacket = 1; int pktSize = PacketHeader.PKT_HEADER_LEN; if (transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream && blockIn instanceof FileInputStream) { //FileChannel fileChannel = ((FileInputStream)blockIn).getChannel(); // blockInPosition also indicates sendChunks() uses transferTo. //blockInPosition = fileChannel.position(); //streamForSendChunks = baseStream; // assure a mininum buffer size. //maxChunksPerPacket = (Math.max(BUFFER_SIZE, // MIN_BUFFER_WITH_TRANSFERTO) // + bytesPerChecksum - 1)/bytesPerChecksum; // allocate smaller buffer while using transferTo(). //pktSize += checksumSize * maxChunksPerPacket; } else { maxChunksPerPacket = Math.max(1, (BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum); pktSize += ((bytesPerChecksum + checksumSize) * maxChunksPerPacket); } //queue for passing data from encode to output BlockingQueue<ByteBuffer> q = new ArrayBlockingQueue<ByteBuffer>(64); //Encode thread IAREncoder encoder = new IAREncoder(q); new Thread(encoder).start(); //LOG.info("before allocate buf, we have pktSize " + pktSize + " maxchunksperpacket " // + maxChunksPerPacket + " byteperchecksum " + bytesPerChecksum + " checksum size " + // checksumSize); ByteBuffer pktBuf = ByteBuffer.allocate(pktSize); //output, send chunks while (endOffset > offset) { long len = sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks, q); //LOG.info("Send chunks with len:"+len+" and seq:"+seqno); //LOG.info("sendChunks offset:"+offset+" endOffset:"+endOffset); offset += len; totalRead += len + ((len + bytesPerChecksum - 1) / bytesPerChecksum * checksumSize); seqno++; } try { // send an empty packet to mark the end of the block sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks, q); out.flush(); LOG.info("Send last Chunk"); } catch (IOException e) { //socket error LOG.info("IOException in sendChunks"); throw ioeToSocketException(e); } sentEntireByteRange = true; } finally { if (clientTraceFmt != null) { final long endTime = System.nanoTime(); ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime)); } close(); } blockReadFully = initialOffset == 0 && offset >= replicaVisibleLength; return totalRead; }
From source file:org.apache.hadoop.hdfs.server.datanode.PMBlockSender.java
/** * sendBlock() is used to read (and encode) block and its metadata and stream the data to * either a client or to another datanode * /*w w w . j av a2s .c o m*/ * @param out stream to which the block is written to * @param baseStream optional. if non-null, <code>out</code> is assumed to * be a wrapper over this stream. This enables optimizations for * sending the data, e.g. * {@link SocketOutputStream#transferToFully(FileChannel, * long, int)}. * @param throttler for sending data. * @return total bytes reads, including crc. */ long sendBlock(DataOutputStream out, OutputStream baseStream, DataTransferThrottler throttler) throws IOException { if (out == null) { throw new IOException("out stream is null"); } this.throttler = throttler; if (throttler == null) LOG.info("throttler is null"); else LOG.info("throttler bandwidth: " + throttler.getBandwidth()); long initialOffset = offset; long totalRead = 0; OutputStream streamForSendChunks = out; final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; try { try { checksum.writeHeader(out); if (chunkOffsetOK) { out.writeLong(offset); } out.flush(); } catch (IOException e) { //socket error throw ioeToSocketException(e); } int maxChunksPerPacket = 1; int pktSize = PacketHeader.PKT_HEADER_LEN; if (transferToAllowed && !verifyChecksum && baseStream instanceof SocketOutputStream && blockIn instanceof FileInputStream) { //FileChannel fileChannel = ((FileInputStream)blockIn).getChannel(); // blockInPosition also indicates sendChunks() uses transferTo. //blockInPosition = fileChannel.position(); //streamForSendChunks = baseStream; // assure a mininum buffer size. //maxChunksPerPacket = (Math.max(BUFFER_SIZE, // MIN_BUFFER_WITH_TRANSFERTO) // + bytesPerChecksum - 1)/bytesPerChecksum; // allocate smaller buffer while using transferTo(). //pktSize += checksumSize * maxChunksPerPacket; } else { maxChunksPerPacket = Math.max(1, (BUFFER_SIZE + bytesPerChecksum - 1) / bytesPerChecksum); pktSize += ((bytesPerChecksum + checksumSize) * maxChunksPerPacket); } //queue for passing data from encode to output BlockingQueue<ByteBuffer> q = new ArrayBlockingQueue<ByteBuffer>(64); //Encode thread PMREncoder encoder = new PMREncoder(q); new Thread(encoder).start(); //LOG.info("before allocate buf, we have pktSize " + pktSize + " maxchunksperpacket " // + maxChunksPerPacket + " byteperchecksum " + bytesPerChecksum + " checksum size " + // checksumSize); ByteBuffer pktBuf = ByteBuffer.allocate(pktSize); //output, send chunks while (endOffset > offset) { long len = sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks, q); //LOG.info("Send chunks with len:"+len+" and seq:"+seqno); //LOG.info("sendChunks offset:"+offset+" endOffset:"+endOffset); offset += len; totalRead += len + ((len + bytesPerChecksum - 1) / bytesPerChecksum * checksumSize); seqno++; } try { // send an empty packet to mark the end of the block sendChunks(pktBuf, maxChunksPerPacket, streamForSendChunks, q); out.flush(); LOG.info("Send last Chunk"); } catch (IOException e) { //socket error LOG.info("IOException in sendChunks"); throw ioeToSocketException(e); } sentEntireByteRange = true; } finally { if (clientTraceFmt != null) { final long endTime = System.nanoTime(); ClientTraceLog.info(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime)); } close(); } blockReadFully = initialOffset == 0 && offset >= replicaVisibleLength; return totalRead; }