Example usage for java.io DataInput readFully

List of usage examples for java.io DataInput readFully

Introduction

In this page you can find the example usage for java.io DataInput readFully.

Prototype

void readFully(byte b[]) throws IOException;

Source Link

Document

Reads some bytes from an input stream and stores them into the buffer array b .

Usage

From source file:org.apache.hadoop.hdfs.server.namenode.FSImageSerialization.java

public static byte[] readLocalName(DataInput in) throws IOException {
    byte[] createdNodeName = new byte[in.readShort()];
    in.readFully(createdNodeName);
    return createdNodeName;
}

From source file:org.apache.hadoop.hive.serde2.io.BigDecimalWritable.java

@Override
public void readFields(DataInput in) throws IOException {
    scale = WritableUtils.readVInt(in);/* ww  w .j  a va 2s  . c o  m*/
    int byteArrayLen = WritableUtils.readVInt(in);
    if (internalStorage.length != byteArrayLen) {
        internalStorage = new byte[byteArrayLen];
    }
    in.readFully(internalStorage);
}

From source file:org.apache.hadoop.mapred.HappyJobClient.java

/**
 * Read a splits file into a list of raw splits
 * @param in the stream to read from// w  w  w  .jav  a  2  s  . co m
 * @return the complete list of splits
 * @throws IOException
 */
static RawSplit[] readSplitFile(DataInput in) throws IOException {
    byte[] header = new byte[SPLIT_FILE_HEADER.length];
    in.readFully(header);
    if (!Arrays.equals(SPLIT_FILE_HEADER, header)) {
        throw new IOException("Invalid header on split file");
    }
    int vers = WritableUtils.readVInt(in);
    if (vers != CURRENT_SPLIT_FILE_VERSION) {
        throw new IOException("Unsupported split version " + vers);
    }
    int len = WritableUtils.readVInt(in);
    RawSplit[] result = new RawSplit[len];
    for (int i = 0; i < len; ++i) {
        result[i] = new RawSplit();
        result[i].readFields(in);
    }
    return result;
}

From source file:org.apache.hadoop.security.Credentials.java

/**
 * Loads all the keys/*w  w  w . j  a  v  a 2 s  .  c o m*/
 * @param in
 * @throws IOException
 */
@Override
public void readFields(DataInput in) throws IOException {
    secretKeysMap.clear();
    tokenMap.clear();

    int size = WritableUtils.readVInt(in);
    for (int i = 0; i < size; i++) {
        Text alias = new Text();
        alias.readFields(in);
        Token<? extends TokenIdentifier> t = new Token<TokenIdentifier>();
        t.readFields(in);
        tokenMap.put(alias, t);
    }

    size = WritableUtils.readVInt(in);
    for (int i = 0; i < size; i++) {
        Text alias = new Text();
        alias.readFields(in);
        int len = WritableUtils.readVInt(in);
        byte[] value = new byte[len];
        in.readFully(value);
        secretKeysMap.put(alias, value);
    }
}

From source file:org.apache.hadoop.security.token.Token.java

/** {@inheritDoc} */
public void readFields(DataInput in) throws IOException {
    int len = WritableUtils.readVInt(in);
    if (identifier == null || identifier.length != len) {
        identifier = new byte[len];
    }/*from  ww  w.  j a  va2  s  .  c om*/
    in.readFully(identifier);
    len = WritableUtils.readVInt(in);
    if (password == null || password.length != len) {
        password = new byte[len];
    }
    in.readFully(password);
    kind.readFields(in);
    service.readFields(in);
}

From source file:org.apache.hama.bsp.BSPJobClient.java

/**
 * Read a splits file into a list of raw splits
 * //from w  w  w  .j  a va  2 s.c om
 * @param in the stream to read from
 * @return the complete list of splits
 * @throws IOException
 */
static RawSplit[] readSplitFile(DataInput in) throws IOException {
    byte[] header = new byte[SPLIT_FILE_HEADER.length];
    in.readFully(header);
    if (!Arrays.equals(SPLIT_FILE_HEADER, header)) {
        throw new IOException("Invalid header on split file");
    }
    int vers = WritableUtils.readVInt(in);
    if (vers != CURRENT_SPLIT_FILE_VERSION) {
        throw new IOException("Unsupported split version " + vers);
    }
    int len = WritableUtils.readVInt(in);
    RawSplit[] result = new RawSplit[len];
    for (int i = 0; i < len; ++i) {
        RawSplit split = new RawSplit();
        split.readFields(in);
        if (split.getPartitionID() != Integer.MIN_VALUE)
            result[split.getPartitionID()] = split;
        else
            result[i] = split;
    }
    return result;
}

From source file:org.apache.hawq.pxf.service.io.GPDBWritable.java

@Override
public void readFields(DataInput in) throws IOException {
    /*/*ww  w  . jav  a  2s  .  c om*/
     * extract pkt len.
    *
    * GPSQL-1107:
    * The DataInput might already be empty (EOF), but we can't check it beforehand.
    * If that's the case, pktlen is updated to -1, to mark that the object is still empty.
    * (can be checked with isEmpty()).
    */
    pktlen = readPktLen(in);
    if (isEmpty()) {
        return;
    }

    /* extract the version and col cnt */
    int version = in.readShort();
    int curOffset = 4 + 2;
    int colCnt;

    /* !!! Check VERSION !!! */
    if (version != GPDBWritable.VERSION && version != GPDBWritable.PREV_VERSION) {
        throw new IOException("Current GPDBWritable version(" + GPDBWritable.VERSION
                + ") does not match input version(" + version + ")");
    }

    if (version == GPDBWritable.VERSION) {
        errorFlag = in.readByte();
        curOffset += 1;
    }

    colCnt = in.readShort();
    curOffset += 2;

    /* Extract Column Type */
    colType = new int[colCnt];
    DBType[] coldbtype = new DBType[colCnt];
    for (int i = 0; i < colCnt; i++) {
        int enumType = (in.readByte());
        curOffset += 1;
        if (enumType == DBType.BIGINT.ordinal()) {
            colType[i] = BIGINT.getOID();
            coldbtype[i] = DBType.BIGINT;
        } else if (enumType == DBType.BOOLEAN.ordinal()) {
            colType[i] = BOOLEAN.getOID();
            coldbtype[i] = DBType.BOOLEAN;
        } else if (enumType == DBType.FLOAT8.ordinal()) {
            colType[i] = FLOAT8.getOID();
            coldbtype[i] = DBType.FLOAT8;
        } else if (enumType == DBType.INTEGER.ordinal()) {
            colType[i] = INTEGER.getOID();
            coldbtype[i] = DBType.INTEGER;
        } else if (enumType == DBType.REAL.ordinal()) {
            colType[i] = REAL.getOID();
            coldbtype[i] = DBType.REAL;
        } else if (enumType == DBType.SMALLINT.ordinal()) {
            colType[i] = SMALLINT.getOID();
            coldbtype[i] = DBType.SMALLINT;
        } else if (enumType == DBType.BYTEA.ordinal()) {
            colType[i] = BYTEA.getOID();
            coldbtype[i] = DBType.BYTEA;
        } else if (enumType == DBType.TEXT.ordinal()) {
            colType[i] = TEXT.getOID();
            coldbtype[i] = DBType.TEXT;
        } else {
            throw new IOException("Unknown GPDBWritable.DBType ordinal value");
        }
    }

    /* Extract null bit array */
    byte[] nullbytes = new byte[getNullByteArraySize(colCnt)];
    in.readFully(nullbytes);
    curOffset += nullbytes.length;
    boolean[] colIsNull = byteArrayToBooleanArray(nullbytes, colCnt);

    /* extract column value */
    colValue = new Object[colCnt];
    for (int i = 0; i < colCnt; i++) {
        if (!colIsNull[i]) {
            /* Skip the alignment padding */
            int skipbytes = roundUpAlignment(curOffset, coldbtype[i].getAlignment()) - curOffset;
            for (int j = 0; j < skipbytes; j++) {
                in.readByte();
            }
            curOffset += skipbytes;

            /* For fixed length type, increment the offset according to type type length here.
                 * For var length type (BYTEA, TEXT), we'll read 4 byte length header and the
             * actual payload.
             */
            int varcollen = -1;
            if (coldbtype[i].isVarLength()) {
                varcollen = in.readInt();
                curOffset += 4 + varcollen;
            } else {
                curOffset += coldbtype[i].getTypeLength();
            }

            switch (DataType.get(colType[i])) {
            case BIGINT: {
                colValue[i] = in.readLong();
                break;
            }
            case BOOLEAN: {
                colValue[i] = in.readBoolean();
                break;
            }
            case FLOAT8: {
                colValue[i] = in.readDouble();
                break;
            }
            case INTEGER: {
                colValue[i] = in.readInt();
                break;
            }
            case REAL: {
                colValue[i] = in.readFloat();
                break;
            }
            case SMALLINT: {
                colValue[i] = in.readShort();
                break;
            }

            /* For BYTEA column, it has a 4 byte var length header. */
            case BYTEA: {
                colValue[i] = new byte[varcollen];
                in.readFully((byte[]) colValue[i]);
                break;
            }
            /* For text formatted column, it has a 4 byte var length header
             * and it's always null terminated string.
            * So, we can remove the last "\0" when constructing the string.
            */
            case TEXT: {
                byte[] data = new byte[varcollen];
                in.readFully(data, 0, varcollen);
                colValue[i] = new String(data, 0, varcollen - 1, CHARSET);
                break;
            }

            default:
                throw new IOException("Unknown GPDBWritable ColType");
            }
        }
    }

    /* Skip the ending alignment padding */
    int skipbytes = roundUpAlignment(curOffset, 8) - curOffset;
    for (int j = 0; j < skipbytes; j++) {
        in.readByte();
    }
    curOffset += skipbytes;

    if (errorFlag != 0) {
        throw new IOException("Received error value " + errorFlag + " from format");
    }
}

From source file:org.apache.marmotta.kiwi.io.KiWiIO.java

/**
 * Read a potentially compressed string from the data input.
 *
 * @param in//ww w.  j  a va2  s .c o  m
 * @return
 * @throws IOException
 */
private static String readContent(DataInput in) throws IOException {
    int mode = in.readByte();

    if (mode == MODE_COMPRESSED) {
        try {
            int strlen = in.readInt();
            int buflen = in.readInt();

            byte[] buffer = new byte[buflen];
            in.readFully(buffer);

            Inflater decompressor = new Inflater(true);
            decompressor.setInput(buffer);

            byte[] data = new byte[strlen];
            decompressor.inflate(data);
            decompressor.end();

            return new String(data, "UTF-8");
        } catch (DataFormatException ex) {
            throw new IllegalStateException("input data is not valid", ex);
        }
    } else {
        return DataIO.readString(in);
    }
}

From source file:org.apache.nutch.crawl.CrawlDatum.java

public void readFields(DataInput in) throws IOException {
    byte version = in.readByte(); // read version
    if (version > CUR_VERSION) // check version
        throw new VersionMismatchException(CUR_VERSION, version);

    status = in.readByte();//from   ww  w. jav a 2  s . c  o  m
    fetchTime = in.readLong();
    retries = in.readByte();
    if (version > 5) {
        fetchInterval = in.readInt();
    } else
        fetchInterval = Math.round(in.readFloat());
    score = in.readFloat();
    if (version > 2) {
        modifiedTime = in.readLong();
        int cnt = in.readByte();
        if (cnt > 0) {
            signature = new byte[cnt];
            in.readFully(signature);
        } else
            signature = null;
    }

    if (version > 3) {
        boolean hasMetadata = false;
        if (version < 7) {
            org.apache.hadoop.io.MapWritable oldMetaData = new org.apache.hadoop.io.MapWritable();
            if (in.readBoolean()) {
                hasMetadata = true;
                metaData = new org.apache.hadoop.io.MapWritable();
                oldMetaData.readFields(in);
            }
            for (Writable key : oldMetaData.keySet()) {
                metaData.put(key, oldMetaData.get(key));
            }
        } else {
            if (in.readBoolean()) {
                hasMetadata = true;
                metaData = new org.apache.hadoop.io.MapWritable();
                metaData.readFields(in);
            }
        }
        if (hasMetadata == false)
            metaData = null;
    }
    // translate status codes
    if (version < 5) {
        if (oldToNew.containsKey(status))
            status = oldToNew.get(status);
        else
            status = STATUS_DB_UNFETCHED;

    }
}

From source file:org.apache.nutch.protocol.Content.java

private final void readFieldsCompressed(DataInput in) throws IOException {
    byte oldVersion = in.readByte();
    switch (oldVersion) {
    case 0://  ww w. ja v a2s . c  o  m
    case 1:
        url = Text.readString(in); // read url
        base = Text.readString(in); // read base

        content = new byte[in.readInt()]; // read content
        in.readFully(content);

        contentType = Text.readString(in); // read contentType
        // reconstruct metadata
        int keySize = in.readInt();
        String key;
        for (int i = 0; i < keySize; i++) {
            key = Text.readString(in);
            int valueSize = in.readInt();
            for (int j = 0; j < valueSize; j++) {
                metadata.add(key, Text.readString(in));
            }
        }
        break;
    case 2:
        url = Text.readString(in); // read url
        base = Text.readString(in); // read base

        content = new byte[in.readInt()]; // read content
        in.readFully(content);

        contentType = Text.readString(in); // read contentType
        metadata.readFields(in); // read meta data
        break;
    default:
        throw new VersionMismatchException((byte) 2, oldVersion);
    }

}