Example usage for java.io DataOutput writeLong

List of usage examples for java.io DataOutput writeLong

Introduction

In this page you can find the example usage for java.io DataOutput writeLong.

Prototype

void writeLong(long v) throws IOException;

Source Link

Document

Writes a long value, which is comprised of eight bytes, to the output stream.

Usage

From source file:org.apache.hadoop.hbase.HRegionInfo.java

/**
 * @deprecated Use protobuf serialization instead.  See {@link #toByteArray()} and
 * {@link #toDelimitedByteArray()}/*w w w .  j a va  2  s.c  o m*/
 */
@Deprecated
public void write(DataOutput out) throws IOException {
    out.writeByte(getVersion());
    Bytes.writeByteArray(out, endKey);
    out.writeBoolean(offLine);
    out.writeLong(regionId);
    Bytes.writeByteArray(out, regionName);
    out.writeBoolean(split);
    Bytes.writeByteArray(out, startKey);
    Bytes.writeByteArray(out, tableName.getName());
    out.writeInt(hashCode);
}

From source file:org.apache.isis.objectstore.nosql.db.file.server.FileServer.java

private void startSyncing() {
    final String syncHost = config.getString("fileserver.sync-host", DEFAULT_HOST);
    final int syncPort = config.getInt("fileserver.sync-port", DEFAULT_SYNC_PORT);
    final int connectionTimeout = config.getInt("fileserver.connection.timeout", 5000);

    LOG.info("preparing to sync to secondary server on " + syncHost + " port " + syncPort);

    final InetAddress address;
    try {/*w  w w.java  2s.  c o  m*/
        address = InetAddress.getByName(syncHost);
    } catch (final UnknownHostException e) {
        LOG.error("Unknown host " + syncHost, e);
        System.exit(0);
        return;
    }

    while (awaitConnections) {
        Socket socket = null;
        try {
            socket = new Socket(address, syncPort);
            LOG.info("sync connected to " + socket.getInetAddress().getHostAddress() + " port "
                    + socket.getLocalPort());

            final CRC32 crc32 = new CRC32();
            final DataOutput output = new DataOutputStream(
                    new CheckedOutputStream(socket.getOutputStream(), crc32));
            final DataInput input = new DataInputStream(socket.getInputStream());
            output.writeByte(INIT);
            long logId = input.readLong();
            do {
                final long nextLogId = logId + 1;
                final File file = Util.logFile(nextLogId);
                if (file.exists() && server.getLogger().isWritten(nextLogId)) {
                    logId++;

                    output.writeByte(RECOVERY_LOG);
                    crc32.reset();
                    output.writeLong(logId);

                    LOG.info("sending recovery file: " + file.getName());
                    final BufferedInputStream fileInput = new BufferedInputStream(new FileInputStream(file));

                    final byte[] buffer = new byte[8092];
                    int read;
                    while ((read = fileInput.read(buffer)) > 0) {
                        output.writeInt(read);
                        output.write(buffer, 0, read);
                    }
                    output.writeInt(0);

                    output.writeLong(crc32.getValue());
                }
                try {
                    Thread.sleep(300);
                } catch (final InterruptedException ignore) {
                }

                while (isQuiescent) {
                    try {
                        Thread.sleep(300);
                    } catch (final InterruptedException ignore) {
                    }
                }
            } while (awaitConnections);

        } catch (final ConnectException e) {
            LOG.warn("not yet connected to secondary server at " + syncHost + " port " + syncPort);
            try {
                Thread.sleep(connectionTimeout);
            } catch (final InterruptedException ignore) {
            }
        } catch (final IOException e) {
            LOG.error("start failure - networking not set up for " + syncHost, e);
            try {
                Thread.sleep(300);
            } catch (final InterruptedException ignore) {
            }
        } catch (final RuntimeException e) {
            LOG.error("start failure", e);
            try {
                Thread.sleep(300);
            } catch (final InterruptedException ignore) {
            }
        }
    }

}

From source file:org.apache.hadoop.hive.ql.io.orc.OrcSplit.java

@Override
public void write(DataOutput out) throws IOException {
    //serialize path, offset, length using FileSplit
    super.write(out);

    int flags = (hasBase ? BASE_FLAG : 0) | (isOriginal ? ORIGINAL_FLAG : 0) | (hasFooter ? FOOTER_FLAG : 0)
            | (fileId != null ? HAS_FILEID_FLAG : 0);
    out.writeByte(flags);/* w  w w  .ja  v a2s.co m*/
    out.writeInt(deltas.size());
    for (AcidInputFormat.DeltaMetaData delta : deltas) {
        delta.write(out);
    }
    if (hasFooter) {
        // serialize FileMetaInfo fields
        Text.writeString(out, fileMetaInfo.compressionType);
        WritableUtils.writeVInt(out, fileMetaInfo.bufferSize);
        WritableUtils.writeVInt(out, fileMetaInfo.metadataSize);

        // serialize FileMetaInfo field footer
        ByteBuffer footerBuff = fileMetaInfo.footerBuffer;
        footerBuff.reset();

        // write length of buffer
        WritableUtils.writeVInt(out, footerBuff.limit() - footerBuff.position());
        out.write(footerBuff.array(), footerBuff.position(), footerBuff.limit() - footerBuff.position());
        WritableUtils.writeVInt(out, fileMetaInfo.writerVersion.getId());
    }
    if (fileId != null) {
        out.writeLong(fileId.longValue());
    }
}

From source file:org.apache.hadoop.mapred.ClusterStatus.java

public void write(DataOutput out) throws IOException {
    if (activeTrackers.size() == 0) {
        out.writeInt(numActiveTrackers);
        out.writeInt(0);/*ww w. j a v  a  2s  .c  om*/
    } else {
        out.writeInt(activeTrackers.size());
        out.writeInt(activeTrackers.size());
        for (String tracker : activeTrackers) {
            Text.writeString(out, tracker);
        }
    }
    if (blacklistedTrackers.size() == 0) {
        out.writeInt(numBlacklistedTrackers);
        out.writeInt(0);
    } else {
        out.writeInt(blacklistedTrackers.size());
        out.writeInt(blacklistedTrackers.size());
        for (String tracker : blacklistedTrackers) {
            Text.writeString(out, tracker);
        }
    }
    if (graylistedTrackers.size() == 0) {
        out.writeInt(numGraylistedTrackers);
        out.writeInt(0);
    } else {
        out.writeInt(graylistedTrackers.size());
        out.writeInt(graylistedTrackers.size());
        for (String tracker : graylistedTrackers) {
            Text.writeString(out, tracker);
        }
    }
    out.writeInt(numExcludedNodes);
    out.writeLong(ttExpiryInterval);
    out.writeInt(map_tasks);
    out.writeInt(reduce_tasks);
    out.writeInt(max_map_tasks);
    out.writeInt(max_reduce_tasks);
    out.writeLong(used_memory);
    out.writeLong(max_memory);
    WritableUtils.writeEnum(out, state);
}

From source file:org.apache.giraph.graph.BspServiceWorker.java

@Override
public void storeCheckpoint() throws IOException {
    getContext()/*from  w w w.jav  a  2s  .  co m*/
            .setStatus("storeCheckpoint: Starting checkpoint " + getGraphMapper().getMapFunctions().toString()
                    + " - Attempt=" + getApplicationAttempt() + ", Superstep=" + getSuperstep());

    // Algorithm:
    // For each partition, dump vertices and messages
    Path metadataFilePath = new Path(getCheckpointBasePath(getSuperstep()) + "." + getHostnamePartitionId()
            + CHECKPOINT_METADATA_POSTFIX);
    Path verticesFilePath = new Path(getCheckpointBasePath(getSuperstep()) + "." + getHostnamePartitionId()
            + CHECKPOINT_VERTICES_POSTFIX);
    Path validFilePath = new Path(
            getCheckpointBasePath(getSuperstep()) + "." + getHostnamePartitionId() + CHECKPOINT_VALID_POSTFIX);

    // Remove these files if they already exist (shouldn't though, unless
    // of previous failure of this worker)
    if (getFs().delete(validFilePath, false)) {
        LOG.warn("storeCheckpoint: Removed valid file " + validFilePath);
    }
    if (getFs().delete(metadataFilePath, false)) {
        LOG.warn("storeCheckpoint: Removed metadata file " + metadataFilePath);
    }
    if (getFs().delete(verticesFilePath, false)) {
        LOG.warn("storeCheckpoint: Removed file " + verticesFilePath);
    }

    FSDataOutputStream verticesOutputStream = getFs().create(verticesFilePath);
    ByteArrayOutputStream metadataByteStream = new ByteArrayOutputStream();
    DataOutput metadataOutput = new DataOutputStream(metadataByteStream);
    for (Partition<I, V, E, M> partition : workerPartitionMap.values()) {
        long startPos = verticesOutputStream.getPos();
        partition.write(verticesOutputStream);
        // Write the metadata for this partition
        // Format:
        // <index count>
        //   <index 0 start pos><partition id>
        //   <index 1 start pos><partition id>
        metadataOutput.writeLong(startPos);
        metadataOutput.writeInt(partition.getPartitionId());
        if (LOG.isDebugEnabled()) {
            LOG.debug("storeCheckpoint: Vertex file starting " + "offset = " + startPos + ", length = "
                    + (verticesOutputStream.getPos() - startPos) + ", partition = " + partition.toString());
        }
    }
    // Metadata is buffered and written at the end since it's small and
    // needs to know how many partitions this worker owns
    FSDataOutputStream metadataOutputStream = getFs().create(metadataFilePath);
    metadataOutputStream.writeInt(workerPartitionMap.size());
    metadataOutputStream.write(metadataByteStream.toByteArray());
    metadataOutputStream.close();
    verticesOutputStream.close();
    if (LOG.isInfoEnabled()) {
        LOG.info("storeCheckpoint: Finished metadata (" + metadataFilePath + ") and vertices ("
                + verticesFilePath + ").");
    }

    getFs().createNewFile(validFilePath);
}

From source file:org.cloudata.core.common.io.CObjectWritable.java

/** Write a {@link CWritable}, {@link String}, primitive type, or an array of
 * the preceding. *///from w w  w . j  a v a2  s. c  om
public static void writeObject(DataOutput out, Object instance, Class declaredClass, CloudataConf conf,
        boolean arrayComponent) throws IOException {

    if (instance == null) { // null
        instance = new NullInstance(declaredClass, conf);
        declaredClass = CWritable.class;
        arrayComponent = false;
    }

    if (!arrayComponent) {
        CUTF8.writeString(out, declaredClass.getName()); // always write declared
        //System.out.println("Write:declaredClass.getName():" + declaredClass.getName());
    }

    if (declaredClass.isArray()) { // array
        int length = Array.getLength(instance);
        out.writeInt(length);
        //System.out.println("Write:length:" + length);

        if (declaredClass.getComponentType() == Byte.TYPE) {
            out.write((byte[]) instance);
        } else if (declaredClass.getComponentType() == ColumnValue.class) {
            //ColumnValue?  Deserialize? ?? ?   ?? ?  .
            writeColumnValue(out, instance, declaredClass, conf, length);
        } else {
            for (int i = 0; i < length; i++) {
                writeObject(out, Array.get(instance, i), declaredClass.getComponentType(), conf,
                        !declaredClass.getComponentType().isArray());
            }
        }
    } else if (declaredClass == String.class) { // String
        CUTF8.writeString(out, (String) instance);

    } else if (declaredClass.isPrimitive()) { // primitive type

        if (declaredClass == Boolean.TYPE) { // boolean
            out.writeBoolean(((Boolean) instance).booleanValue());
        } else if (declaredClass == Character.TYPE) { // char
            out.writeChar(((Character) instance).charValue());
        } else if (declaredClass == Byte.TYPE) { // byte
            out.writeByte(((Byte) instance).byteValue());
        } else if (declaredClass == Short.TYPE) { // short
            out.writeShort(((Short) instance).shortValue());
        } else if (declaredClass == Integer.TYPE) { // int
            out.writeInt(((Integer) instance).intValue());
        } else if (declaredClass == Long.TYPE) { // long
            out.writeLong(((Long) instance).longValue());
        } else if (declaredClass == Float.TYPE) { // float
            out.writeFloat(((Float) instance).floatValue());
        } else if (declaredClass == Double.TYPE) { // double
            out.writeDouble(((Double) instance).doubleValue());
        } else if (declaredClass == Void.TYPE) { // void
        } else {
            throw new IllegalArgumentException("Not a primitive: " + declaredClass);
        }
    } else if (declaredClass.isEnum()) { // enum
        CUTF8.writeString(out, ((Enum) instance).name());
    } else if (CWritable.class.isAssignableFrom(declaredClass)) { // Writable
        if (instance.getClass() == declaredClass) {
            out.writeShort(TYPE_SAME); // ? ?? ? ?? 
            //System.out.println("Write:TYPE_SAME:" + TYPE_SAME);

        } else {
            out.writeShort(TYPE_DIFF);
            //System.out.println("Write:TYPE_DIFF:" + TYPE_DIFF);
            CUTF8.writeString(out, instance.getClass().getName());
            //System.out.println("Write:instance.getClass().getName():" + instance.getClass().getName());
        }
        ((CWritable) instance).write(out);
        //System.out.println("Write:instance value");

    } else {
        throw new IOException("Can't write: " + instance + " as " + declaredClass);
    }
}

From source file:org.commoncrawl.service.pagerank.slave.PageRankUtils.java

public static final void writeURLFPToStream(DataOutput stream, URLFPV2 key) throws IOException {
    stream.writeLong(key.getDomainHash());
    stream.writeLong(key.getRootDomainHash());
    stream.writeLong(key.getUrlHash());/*from   www .  j  ava2  s.  com*/
}

From source file:com.linkedin.cubert.io.rubix.RubixFile.java

private static void extract(List<RubixFile<Tuple, Object>> rfiles, long blockId, int numBlocks, String output)
        throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException {
    Configuration conf = new JobConf();
    File outFile = new File(output);
    if (outFile.exists()) {
        outFile.delete();/*from   ww  w. ja  v  a 2s .  c om*/
    }
    outFile.createNewFile();
    BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(outFile));
    ByteArrayOutputStream keySectionStream = new ByteArrayOutputStream();
    DataOutput keySectionOut = new DataOutputStream(keySectionStream);
    SerializationFactory serializationFactory = new SerializationFactory(conf);
    RubixFile<Tuple, Object> lastrFile = null;
    JsonNode json;
    long totalLength = 0;

    final int BUF_SIZE = 32 * 1024;
    long blockIds[] = new long[numBlocks];
    int foundBlocks = 0;

    for (int i = 0; i < numBlocks; i++)
        blockIds[i] = blockId + i;

    for (int i = 0; i < numBlocks; i++) {
        boolean found = false;
        for (RubixFile<Tuple, Object> rfile : rfiles) {
            print.f("Checking %s", rfile.path.toString());
            List<KeyData<Tuple>> keyDataList = rfile.getKeyData();
            for (KeyData<Tuple> keyData : keyDataList) {
                if (keyData.getBlockId() == blockIds[i]) {
                    long offset = keyData.getOffset();
                    long length = keyData.getLength();
                    Tuple key = keyData.getKey();
                    print.f("Extracting block %d (off=%d len=%d) from %s", keyData.getBlockId(), offset, length,
                            rfile.path.toString());

                    // copy the data
                    if (length > 0) {
                        FileSystem fs = FileSystem.get(conf);
                        FSDataInputStream in = fs.open(rfile.path);
                        in.seek(offset);

                        byte[] data = new byte[BUF_SIZE];
                        long toRead = length;
                        while (toRead > 0) {
                            int thisRead = toRead > BUF_SIZE ? BUF_SIZE : (int) toRead;
                            in.readFully(data, 0, thisRead);
                            bos.write(data, 0, thisRead);
                            toRead -= thisRead;
                            System.out.print(".");
                        }
                        System.out.println();
                    }
                    // copy the key section
                    Serializer<Tuple> keySerializer = serializationFactory.getSerializer(rfile.getKeyClass());
                    keySerializer.open(keySectionStream);

                    keySerializer.serialize(key);
                    keySectionOut.writeLong(totalLength); // position
                    keySectionOut.writeLong(keyData.getBlockId());
                    keySectionOut.writeLong(keyData.getNumRecords());
                    foundBlocks++;
                    totalLength += length;
                    lastrFile = rfile;

                    found = true;
                    break;

                }
            }
            if (found) {
                break;
            }
        }
        if (!found)
            System.err.println("Cannot locate block with id " + blockIds[i]);
    }
    byte[] trailerBytes = keySectionStream.toByteArray();

    json = JsonUtils.cloneNode(lastrFile.metadataJson);
    ((ObjectNode) json).put("numberOfBlocks", foundBlocks);

    DataOutput out = new DataOutputStream(bos);
    out.writeUTF(json.toString());
    out.writeInt(trailerBytes.length);
    out.write(trailerBytes);
    out.writeLong(totalLength); // trailer start offset
    bos.close();
}

From source file:org.apache.sysml.runtime.matrix.data.FrameBlock.java

@Override
public void write(DataOutput out) throws IOException {
    boolean isDefaultMeta = isColNamesDefault() && isColumnMetadataDefault();
    //write header (rows, cols, default)
    out.writeInt(getNumRows());/*from   ww w . j  a  v  a2s .  c o  m*/
    out.writeInt(getNumColumns());
    out.writeBoolean(isDefaultMeta);
    //write columns (value type, data)
    for (int j = 0; j < getNumColumns(); j++) {
        out.writeByte(_schema[j].ordinal());
        if (!isDefaultMeta) {
            out.writeUTF(getColumnName(j));
            out.writeLong(_colmeta[j].getNumDistinct());
            out.writeUTF((_colmeta[j].getMvValue() != null) ? _colmeta[j].getMvValue() : "");
        }
        _coldata[j].write(out);
    }
}

From source file:org.commoncrawl.service.pagerank.slave.PageRankUtils.java

public static final void writeURLFPAndCountToStream(DataOutput stream, URLFPV2 key, int urlCount)
        throws IOException {
    stream.writeLong(key.getDomainHash());
    stream.writeLong(key.getRootDomainHash());
    stream.writeLong(key.getUrlHash());//ww  w.j a va  2s .  c om
    WritableUtils.writeVInt(stream, urlCount);
}