List of usage examples for java.io DataOutput writeLong
void writeLong(long v) throws IOException;
long
value, which is comprised of eight bytes, to the output stream. From source file:org.kiji.schema.mapreduce.KijiPut.java
/** {@inheritDoc} */ @Override/*from www . j ava2s. c om*/ public void write(DataOutput out) throws IOException { // EntityId. final byte[] bytes = mEntityId.getHBaseRowKey(); out.writeInt(bytes.length); out.write(bytes); // Family/Qualifier/Timestamp. out.writeUTF(mFamily); out.writeUTF(mQualifier); out.writeLong(mTimestamp); // Avro. final KijiCellEncoder encoder = new KijiCellEncoder(null); final byte[] cellData = encoder.encode(mCell, KijiCellFormat.NONE); out.writeUTF(mCell.getWriterSchema().toString()); out.writeInt(cellData.length); out.write(cellData); }
From source file:edu.umn.cs.spatialHadoop.nasa.HDFRasterLayer.java
@Override public void write(DataOutput out) throws IOException { super.write(out); out.writeLong(timestamp); ByteArrayOutputStream baos = new ByteArrayOutputStream(); GZIPOutputStream gzos = new GZIPOutputStream(baos); ByteBuffer bbuffer = ByteBuffer.allocate(getHeight() * 2 * 8 + 8); bbuffer.putInt(getWidth());/*from w w w . j a va 2 s .c o m*/ bbuffer.putInt(getHeight()); gzos.write(bbuffer.array(), 0, bbuffer.position()); for (int x = 0; x < getWidth(); x++) { bbuffer.clear(); for (int y = 0; y < getHeight(); y++) { bbuffer.putLong(sum[x][y]); bbuffer.putLong(count[x][y]); } gzos.write(bbuffer.array(), 0, bbuffer.position()); } gzos.close(); byte[] serializedData = baos.toByteArray(); out.writeInt(serializedData.length); out.write(serializedData); }
From source file:org.apache.hadoop.hbase.ccindex.TimeRangeFilter.java
public void write(final DataOutput out) throws IOException { Bytes.writeByteArray(out, this.columnFamily); Bytes.writeByteArray(out, this.columnQualifier); out.writeLong(this.startTs); out.writeLong(this.endTs); out.writeBoolean(foundColumn);//from www. j a va 2s. co m out.writeBoolean(filterIfMissing); out.writeBoolean(latestVersionOnly); }
From source file:parquet.hadoop.ParquetInputSplit.java
private void writeBlock(DataOutput out, BlockMetaData block) throws IOException { out.writeInt(block.getColumns().size()); for (ColumnChunkMetaData column : block.getColumns()) { writeColumn(out, column);//from w ww . j a v a 2 s . c o m } out.writeLong(block.getRowCount()); out.writeLong(block.getTotalByteSize()); out.writeBoolean(block.getPath() == null); if (block.getPath() != null) { out.writeUTF(block.getPath()); } }
From source file:com.cloudera.sqoop.lib.LobRef.java
@Override public void write(DataOutput out) throws IOException { out.writeBoolean(isExternal());//w ww .jav a 2 s .c o m if (isExternal()) { Text.writeString(out, "lf"); // storage type "lf" for LobFile. Text.writeString(out, fileName); out.writeLong(offset); out.writeLong(length); } else { writeInternal(out); } }
From source file:org.apache.hama.bsp.TaskStatus.java
@Override public void write(DataOutput out) throws IOException { jobId.write(out);//www. j a v a 2s .c o m taskId.write(out); out.writeFloat(progress); WritableUtils.writeEnum(out, runState); Text.writeString(out, stateString); WritableUtils.writeEnum(out, phase); out.writeLong(startTime); out.writeLong(finishTime); counters.write(out); }
From source file:org.apache.hadoop.hbase.index.IndexSpecification.java
/** * @param Data Output Stream/*from w w w .jav a 2 s . c o m*/ * @throws IOException */ public void write(DataOutput out) throws IOException { Bytes.writeByteArray(out, this.name); out.writeInt(this.indexColumns.size()); for (ColumnQualifier cq : this.indexColumns) { cq.write(out); } out.writeInt(maxVersions); out.writeLong(ttl); }
From source file:org.cloudata.core.tabletserver.CommitLog.java
public void write(DataOutput out) throws IOException { writeBytes = 0;//from www. j a v a 2s . com out.writeInt(operation); writeBytes += CWritableUtils.getIntByteSize(); rowKey.write(out); writeBytes += rowKey.getByteSize(); writeBytes += CWritableUtils.writeString(out, columnName); columnKey.write(out); writeBytes += columnKey.getByteSize(); out.writeLong(timestamp); writeBytes += CWritableUtils.getLongByteSize(); out.writeInt(value == null ? -1 : value.length); writeBytes += CWritableUtils.getIntByteSize(); if (value != null) { out.write(value); writeBytes += value.length; } }
From source file:org.apache.tez.mapreduce.hadoop.MRTaskStatus.java
@Override public void write(DataOutput out) throws IOException { taskAttemptId.write(out);/*from ww w.j a v a2 s . com*/ WritableUtils.writeEnum(out, state); out.writeFloat(progress); WritableUtils.writeString(out, diagnostics); WritableUtils.writeString(out, userStatusInfo); WritableUtils.writeEnum(out, phase); counters.write(out); out.writeLong(localOutputSize); out.writeLong(startTime); out.writeLong(finishTime); out.writeLong(sortFinishTime); out.writeLong(mapFinishTime); out.writeLong(shuffleFinishTime); out.writeInt(failedTaskDependencies.size()); for (TezTaskAttemptID taskAttemptId : failedTaskDependencies) { taskAttemptId.write(out); } }
From source file:org.apache.hadoop.hbase.io.hfile.FixedFileTrailer.java
/** * Write the trailer to a data stream. We support writing version 1 for * testing and for determining version 1 trailer size. It is also easy to see * what fields changed in version 2.//from ww w. j ava2s.c o m * * @param outputStream * @throws IOException */ void serialize(DataOutputStream outputStream) throws IOException { HFile.checkFormatVersion(majorVersion); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutput baosDos = new DataOutputStream(baos); BlockType.TRAILER.write(baosDos); baosDos.writeLong(fileInfoOffset); baosDos.writeLong(loadOnOpenDataOffset); baosDos.writeInt(dataIndexCount); if (majorVersion == 1) { // This used to be metaIndexOffset, but it was not used in version 1. baosDos.writeLong(0); } else { baosDos.writeLong(uncompressedDataIndexSize); } baosDos.writeInt(metaIndexCount); baosDos.writeLong(totalUncompressedBytes); if (majorVersion == 1) { baosDos.writeInt((int) Math.min(Integer.MAX_VALUE, entryCount)); } else { // This field is long from version 2 onwards. baosDos.writeLong(entryCount); } baosDos.writeInt(compressionCodec.ordinal()); if (majorVersion > 1) { baosDos.writeInt(numDataIndexLevels); baosDos.writeLong(firstDataBlockOffset); baosDos.writeLong(lastDataBlockOffset); Bytes.writeStringFixedSize(baosDos, comparatorClassName, MAX_COMPARATOR_NAME_LENGTH); } // serialize the major and minor versions baosDos.writeInt(materializeVersion(majorVersion, minorVersion)); outputStream.write(baos.toByteArray()); }