List of usage examples for java.io DataOutput write
void write(byte b[]) throws IOException;
b
. From source file:edu.umn.cs.spatialHadoop.nasa.HDFRasterLayer.java
@Override public void write(DataOutput out) throws IOException { super.write(out); out.writeLong(timestamp);//from w w w . ja v a 2 s. co m ByteArrayOutputStream baos = new ByteArrayOutputStream(); GZIPOutputStream gzos = new GZIPOutputStream(baos); ByteBuffer bbuffer = ByteBuffer.allocate(getHeight() * 2 * 8 + 8); bbuffer.putInt(getWidth()); bbuffer.putInt(getHeight()); gzos.write(bbuffer.array(), 0, bbuffer.position()); for (int x = 0; x < getWidth(); x++) { bbuffer.clear(); for (int y = 0; y < getHeight(); y++) { bbuffer.putLong(sum[x][y]); bbuffer.putLong(count[x][y]); } gzos.write(bbuffer.array(), 0, bbuffer.position()); } gzos.close(); byte[] serializedData = baos.toByteArray(); out.writeInt(serializedData.length); out.write(serializedData); }
From source file:com.ebay.erl.mobius.core.model.Tuple.java
/** * Serialize this tuple to the output <code>out</code>. * <p>// w ww . j ava2s.c o m * * When serialize, the values are stored in the order * of schema name's ordering. See {@link #setSchema(String[])} * for more explanation. */ @Override public void write(DataOutput out) throws IOException { // write the size of the column of this tuple out.writeInt(this.values.size()); if (this.values.size() != this.namesToIdxMapping.size()) { StringBuffer sb = new StringBuffer(); for (Object v : values) sb.append(v.toString()).append(","); throw new IllegalArgumentException(this.getClass().getCanonicalName() + ", the length of values and schmea is not the same, " + "very likely the schema of this tuple has not been set yet, please set it using Tuple#setSchema(String[])." + " Values:[" + sb.toString() + "] schema:" + this.namesToIdxMapping.keySet()); } WriteImpl writeImpl = new WriteImpl(out); for (String aColumnName : getSorted(this.namesToIdxMapping.keySet())) { Object value = this.values.get(this.namesToIdxMapping.get(aColumnName)); byte type = getType(value); out.write(type); writeImpl.setValue(value); writeImpl.handle(type); } }
From source file:org.apache.accumulo.core.client.BatchWriterConfig.java
@Override public void write(DataOutput out) throws IOException { // write this out in a human-readable way ArrayList<String> fields = new ArrayList<String>(); if (maxMemory != null) addField(fields, "maxMemory", maxMemory); if (maxLatency != null) addField(fields, "maxLatency", maxLatency); if (maxWriteThreads != null) addField(fields, "maxWriteThreads", maxWriteThreads); if (timeout != null) addField(fields, "timeout", timeout); if (durability != Durability.DEFAULT) addField(fields, "durability", durability); String output = StringUtils.join(",", fields); byte[] bytes = output.getBytes(UTF_8); byte[] len = String.format("%6s#", Integer.toString(bytes.length, 36)).getBytes(UTF_8); if (len.length != 7) throw new IllegalStateException("encoded length does not match expected value"); out.write(len); out.write(bytes);// ww w .ja v a2 s. co m }
From source file:org.apache.carbondata.core.metadata.blocklet.BlockletInfo.java
/** * Serialize datachunks as well for older versions like V1 and V2 *//*from ww w . j a va 2 s . c o m*/ private void writeChunkInfoForOlderVersions(DataOutput output) throws IOException { int dimChunksSize = dimensionColumnChunk != null ? dimensionColumnChunk.size() : 0; output.writeShort(dimChunksSize); for (int i = 0; i < dimChunksSize; i++) { byte[] bytes = serializeDataChunk(dimensionColumnChunk.get(i)); output.writeInt(bytes.length); output.write(bytes); } int msrChunksSize = measureColumnChunk != null ? measureColumnChunk.size() : 0; output.writeShort(msrChunksSize); for (int i = 0; i < msrChunksSize; i++) { byte[] bytes = serializeDataChunk(measureColumnChunk.get(i)); output.writeInt(bytes.length); output.write(bytes); } }
From source file:org.apache.drill.exec.cache.CachedVectorContainer.java
@Override public void write(DataOutput output) throws IOException { output.writeInt(data.length); output.write(data); }
From source file:org.apache.eagle.alert.engine.serialization.impl.JavaObjectSerializer.java
@Override public void serialize(Object value, DataOutput dataOutput) throws IOException { byte[] bytes = SerializationUtils.serialize((Serializable) value); dataOutput.writeInt(bytes.length);/*from w w w. java 2s . c om*/ dataOutput.write(bytes); }
From source file:org.apache.geode.internal.cache.DiskInitFile.java
static void writeDiskRegionID(DataOutput dos, long drId) throws IOException { // If the drId is <= 255 (max unsigned byte) then // encode it as a single byte. // Otherwise write a byte whose value is the number of bytes // it will be encoded by and then follow it with that many bytes. // Note that drId are not allowed to have a value in the range 1..8 inclusive. if (drId >= 0 && drId <= 255) { dos.write((byte) drId); } else {// w w w.j a v a2 s. co m byte bytesNeeded = (byte) Oplog.bytesNeeded(drId); dos.write(bytesNeeded); byte[] bytes = new byte[bytesNeeded]; for (int i = bytesNeeded - 1; i >= 0; i--) { bytes[i] = (byte) (drId & 0xFF); drId >>= 8; } dos.write(bytes); } }
From source file:org.apache.geode.internal.InternalDataSerializer.java
/** * write an object in java Serializable form with a SERIALIZABLE DSCODE so that it can be * deserialized with DataSerializer.readObject() * /*from w ww. j av a 2 s . c o m*/ * @param o the object to serialize * @param out the data output to serialize to */ public static void writeSerializableObject(Object o, DataOutput out) throws IOException { out.writeByte(SERIALIZABLE); if (out instanceof ObjectOutputStream) { ((ObjectOutputStream) out).writeObject(o); } else { OutputStream stream; if (out instanceof OutputStream) { stream = (OutputStream) out; } else { final DataOutput out2 = out; stream = new OutputStream() { @Override public void write(int b) throws IOException { out2.write(b); } }; } boolean wasDoNotCopy = false; if (out instanceof HeapDataOutputStream) { // To fix bug 52197 disable doNotCopy mode // while serialize with an ObjectOutputStream. // The problem is that ObjectOutputStream keeps // an internal byte array that it reuses while serializing. wasDoNotCopy = ((HeapDataOutputStream) out).setDoNotCopy(false); } try { ObjectOutput oos = new ObjectOutputStream(stream); if (stream instanceof VersionedDataStream) { Version v = ((VersionedDataStream) stream).getVersion(); if (v != null && v != Version.CURRENT) { oos = new VersionedObjectOutput(oos, v); } } oos.writeObject(o); // To fix bug 35568 just call flush. We can't call close because // it calls close on the wrapped OutputStream. oos.flush(); } finally { if (wasDoNotCopy) { ((HeapDataOutputStream) out).setDoNotCopy(true); } } } }
From source file:org.apache.geode.internal.InternalDataSerializer.java
/** * Write a variable length long the old way (pre 7.0). Use this only in contexts where you might * need to communicate with pre 7.0 members or files. *//*from w w w. j av a 2 s . c om*/ public static void writeVLOld(long data, DataOutput out) throws IOException { if (data < 0) { Assert.fail("Data expected to be >=0 is " + data); } if (data <= MAX_BYTE_VL) { out.writeByte((byte) data); } else if (data <= 0x7FFF) { // set the sign bit to indicate a short out.write(((int) data >>> 8 | 0x80) & 0xFF); out.write((int) data >>> 0 & 0xFF); } else if (data <= Integer.MAX_VALUE) { out.writeByte(INT_VL); out.writeInt((int) data); } else { out.writeByte(LONG_VL); out.writeLong(data); } }
From source file:org.apache.geode.pdx.internal.PdxInstanceImpl.java
public void sendTo(DataOutput out) throws IOException { PdxReaderImpl ur = getUnmodifiableReader(); if (ur.getPdxType().getHasDeletedField()) { PdxWriterImpl writer = convertToTypeWithNoDeletedFields(ur); writer.sendTo(out);//from w ww .jav a 2 s . c om } else { out.write(DSCODE.PDX); out.writeInt(ur.basicSize()); out.writeInt(ur.getPdxType().getTypeId()); ur.basicSendTo(out); } }