Example usage for java.nio ByteBuffer getShort

List of usage examples for java.nio ByteBuffer getShort

Introduction

In this page you can find the example usage for java.nio ByteBuffer getShort.

Prototype

public abstract short getShort();

Source Link

Document

Returns the short at the current position and increases the position by 2.

Usage

From source file:org.opendaylight.controller.protocol_plugin.openflow.vendorextension.v6extension.V6Match.java

private void readFromInternal(ByteBuffer data) {
    this.match_len = 0;
    while (data.remaining() > 0) {
        if (data.remaining() < 4) {
            /*/*from   w  w w .ja  v  a2s  .c  om*/
             * at least 4 bytes for each match header
             */
            logger.error("Invalid Vendor Extension Header. Size {}", data.remaining());
            return;
        }
        /*
         * read the 4 byte match header
         */
        int nxmVendor = data.getShort();
        int b = data.get();
        int nxmField = b >> 1;
        boolean hasMask = ((b & 0x01) == 1) ? true : false;
        int nxmLen = data.get();
        if (nxmVendor == Extension_Types.OF_10.getValue()) {
            if (nxmField == OF_Match_Types.MATCH_OF_IN_PORT.getValue()) {
                readInPort(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_ETH_DST.getValue()) {
                readDataLinkDestination(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_ETH_SRC.getValue()) {
                readDataLinkSource(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_ETH_TYPE.getValue()) {
                readEtherType(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_VLAN_TCI.getValue()) {
                readVlanTci(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_IP_TOS.getValue()) {
                readIpTos(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_IP_PROTO.getValue()) {
                readIpProto(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_IP_SRC.getValue()) {
                readIpv4Src(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_IP_DST.getValue()) {
                readIpv4Dst(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_TCP_SRC.getValue()) {
                readTcpSrc(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_TCP_DST.getValue()) {
                readTcpDst(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_UDP_SRC.getValue()) {
                readUdpSrc(data, nxmLen, hasMask);
            } else if (nxmField == OF_Match_Types.MATCH_OF_UDP_DST.getValue()) {
                readUdpDst(data, nxmLen, hasMask);
            } else {
                // unexpected nxmField
                return;
            }
        } else if (nxmVendor == Extension_Types.IPV6EXT.getValue()) {
            if (nxmField == IPv6Extension_Match_Types.MATCH_IPV6EXT_IPV6_SRC.getValue()) {
                readIpv6Src(data, nxmLen, hasMask);
            } else if (nxmField == IPv6Extension_Match_Types.MATCH_IPV6EXT_IPV6_DST.getValue()) {
                readIpv6Dst(data, nxmLen, hasMask);
            } else {
                // unexpected nxmField
                return;
            }
        } else {
            // invalid nxmVendor
            return;
        }
    }
}

From source file:edu.umass.cs.gigapaxos.paxospackets.RequestPacket.java

public RequestPacket(ByteBuffer bbuf) throws UnsupportedEncodingException, UnknownHostException {
    super(bbuf);/*from  w w  w .j  a v a2 s.co m*/
    int exactLength = bbuf.position();

    this.requestID = bbuf.getLong();
    this.stop = bbuf.get() == (byte) 1;
    exactLength += (8 + 1);

    // addresses
    byte[] ca = new byte[4];
    bbuf.get(ca);
    int cport = (int) bbuf.getShort();
    cport = cport >= 0 ? cport : cport + 2 * (Short.MAX_VALUE + 1);
    this.clientAddress = cport != 0 ? new InetSocketAddress(InetAddress.getByAddress(ca), cport) : null;
    byte[] la = new byte[4];
    bbuf.get(la);
    int lport = (int) bbuf.getShort();
    lport = lport >= 0 ? lport : lport + 2 * (Short.MAX_VALUE + 1);
    this.listenAddress = lport != 0 ? new InetSocketAddress(InetAddress.getByAddress(la), lport) : null;
    exactLength += (4 + 2 + 4 + 2);

    // other non-final fields
    this.entryReplica = bbuf.getInt();
    this.entryTime = bbuf.getLong();
    this.shouldReturnRequestValue = bbuf.get() == (byte) 1;
    this.forwardCount = bbuf.getInt();
    exactLength += (4 + 8 + 1 + 4);

    // digest related fields
    this.broadcasted = bbuf.get() == (byte) 1;
    int digestLength = bbuf.getInt();
    if (digestLength > 0)
        bbuf.get(this.digest = new byte[digestLength]);

    // highly variable length fields

    // requestValue
    int reqValLen = bbuf.getInt();
    byte[] reqValBytes = new byte[reqValLen];
    bbuf.get(reqValBytes);
    this.requestValue = reqValBytes.length > 0 ? new String(reqValBytes, CHARSET) : null;
    exactLength += (4 + reqValBytes.length);

    // responseValue
    int respValLen = bbuf.getInt();
    byte[] respValBytes = new byte[respValLen];
    bbuf.get(respValBytes);
    this.responseValue = respValBytes.length > 0 ? new String(respValBytes, CHARSET) : null;
    exactLength += (4 + respValBytes.length);

    int numBatched = bbuf.getInt();
    if (numBatched == 0)
        return;
    // else
    // batched requests
    this.batched = new RequestPacket[numBatched];
    for (int i = 0; i < numBatched; i++) {
        int len = bbuf.getInt();
        byte[] element = new byte[len];
        bbuf.get(element);
        this.batched[i] = new RequestPacket(element);
    }
    assert (exactLength > 0);
}

From source file:com.healthmarketscience.jackcess.impl.ColumnImpl.java

/**
 * Deserialize a raw byte value for this column into an Object
 * @param data The raw byte value/*  ww w.jav  a  2s.  c om*/
 * @param order Byte order in which the raw value is stored
 * @return The deserialized Object
 * @usage _advanced_method_
 */
public Object read(byte[] data, ByteOrder order) throws IOException {
    ByteBuffer buffer = ByteBuffer.wrap(data).order(order);

    switch (getType()) {
    case BOOLEAN:
        throw new IOException("Tried to read a boolean from data instead of null mask.");
    case BYTE:
        return Byte.valueOf(buffer.get());
    case INT:
        return Short.valueOf(buffer.getShort());
    case LONG:
        return Integer.valueOf(buffer.getInt());
    case DOUBLE:
        return Double.valueOf(buffer.getDouble());
    case FLOAT:
        return Float.valueOf(buffer.getFloat());
    case SHORT_DATE_TIME:
        return readDateValue(buffer);
    case BINARY:
        return data;
    case TEXT:
        return decodeTextValue(data);
    case MONEY:
        return readCurrencyValue(buffer);
    case NUMERIC:
        return readNumericValue(buffer);
    case GUID:
        return readGUIDValue(buffer, order);
    case UNKNOWN_0D:
    case UNKNOWN_11:
        // treat like "binary" data
        return data;
    case COMPLEX_TYPE:
        return new ComplexValueForeignKeyImpl(this, buffer.getInt());
    default:
        throw new IOException("Unrecognized data type: " + _type);
    }
}

From source file:edu.harvard.iq.dvn.ingest.statdataio.impl.plugins.dta.DTAFileReader.java

private void decodeHeader(BufferedInputStream stream) throws IOException {
    dbgLog.fine("***** decodeHeader(): start *****");

    if (stream == null) {
        throw new IllegalArgumentException("stream == null!");
    }/*from w  w  w .  j av  a2 s .  com*/

    dbgLog.fine("reading the header segument 1: 4 byte\n");
    byte[] magic_number = new byte[DTA_MAGIC_NUMBER_LENGTH];

    int nbytes = stream.read(magic_number, 0, DTA_MAGIC_NUMBER_LENGTH);

    if (nbytes == 0) {
        throw new IOException();
    }

    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("hex dump: 1st 4bytes =>" + new String(Hex.encodeHex(magic_number)) + "<-");

    if (magic_number[2] != 1) {
        dbgLog.fine("3rd byte is not 1: given file is not stata-dta type");
        throw new IllegalArgumentException("given file is not stata-dta type");
    } else if ((magic_number[1] != 1) && (magic_number[1] != 2)) {
        dbgLog.fine("2nd byte is neither 0 nor 1: this file is not stata-dta type");
        throw new IllegalArgumentException("given file is not stata-dta type");
    } else if (!STATA_RELEASE_NUMBER.containsKey((int) magic_number[0])) {
        dbgLog.fine("1st byte (" + magic_number[0] + ") is not within the ingestable range [rel. 3-10]:"
                + "we cannot ingest this Stata file.");
        throw new IllegalArgumentException("given file is not stata-dta type");
    } else {
        releaseNumber = (int) magic_number[0];
        smd.getFileInformation().put("releaseNumber", releaseNumber);
        smd.getFileInformation().put("byteOrder", (int) magic_number[1]);
        smd.getFileInformation().put("OSByteOrder", ByteOrder.nativeOrder().toString());

        smd.getFileInformation().put("mimeType", MIME_TYPE[0]);
        smd.getFileInformation().put("fileFormat", MIME_TYPE[0]);
        init();

        if (dbgLog.isLoggable(Level.FINE))
            dbgLog.fine("this file is stata-dta type: " + STATA_RELEASE_NUMBER.get((int) magic_number[0])
                    + "(Number=" + magic_number[0] + ")");
        if (dbgLog.isLoggable(Level.FINE))
            dbgLog.fine("Endian(file)(Big: 1; Little:2)=" + magic_number[1]);

        if ((int) magic_number[1] == 2) {
            isLittleEndian = true;
            dbgLog.fine("Reveral of the bytes is necessary to decode " + "multi-byte fields");
        }
        if (dbgLog.isLoggable(Level.FINE))
            dbgLog.fine("Endian of this platform:" + ByteOrder.nativeOrder().toString());
    }

    dbgLog.fine("reading the remaining header segument 2: 60 or 109-byte");

    byte[] header = new byte[headerLength];
    nbytes = stream.read(header, 0, headerLength);
    //printHexDump(header, "header:\n");

    // 1. number of variables: short (2 bytes)
    ByteBuffer bbnvar = ByteBuffer.wrap(header, 0, NVAR_FIELD_LENGTH);
    ByteBuffer dupnvar = bbnvar.duplicate();
    short short_nvar = dupnvar.getShort();

    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("get original short view(nvar)=" + short_nvar);
    if (isLittleEndian) {
        bbnvar.order(ByteOrder.LITTLE_ENDIAN);

    }

    short shrt_nvar = bbnvar.getShort();
    smd.getFileInformation().put("varQnty", new Integer(shrt_nvar));

    // setup variableTypeList
    int nvar = shrt_nvar;
    variableTypelList = new String[nvar];

    // 2. number of observations: int (4 bytes)
    ByteBuffer nobs = ByteBuffer.wrap(header, NVAR_FIELD_LENGTH, NOBS_FIELD_LENGTH);
    ByteBuffer dupnobs = nobs.duplicate();
    int int_dupnobs = dupnobs.getInt();
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("raw nobs=" + int_dupnobs);
    if (isLittleEndian) {
        nobs.order(ByteOrder.LITTLE_ENDIAN);
    }
    int int_nobs = nobs.getInt();
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("reversed nobs=" + int_nobs);

    smd.getFileInformation().put("caseQnty", new Integer(int_nobs));

    // 3. data_label: 32 or 81 bytes
    int dl_offset = NVAR_FIELD_LENGTH + NOBS_FIELD_LENGTH;
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("dl_offset=" + dl_offset);
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("data_label_length=" + dataLabelLength);

    String data_label = new String(Arrays.copyOfRange(header, dl_offset, (dl_offset + dataLabelLength)),
            "ISO-8859-1");

    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("data_label_length=" + data_label.length());
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("loation of the null character=" + data_label.indexOf(0));

    String dataLabel = getNullStrippedString(data_label);
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("data_label_length=" + dataLabel.length());
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("data_label=[" + dataLabel + "]");

    smd.getFileInformation().put("dataLabel", dataLabel);

    // 4. time_stamp: ASCII String (18 bytes)
    // added after release 4
    if (releaseNumber > 104) {
        int ts_offset = dl_offset + dataLabelLength;
        String time_stamp = new String(Arrays.copyOfRange(header, ts_offset, ts_offset + TIME_STAMP_LENGTH),
                "ISO-8859-1");
        if (dbgLog.isLoggable(Level.FINE))
            dbgLog.fine("time_stamp_length=" + time_stamp.length());
        if (dbgLog.isLoggable(Level.FINE))
            dbgLog.fine("loation of the null character=" + time_stamp.indexOf(0));

        String timeStamp = getNullStrippedString(time_stamp);
        if (dbgLog.isLoggable(Level.FINE))
            dbgLog.fine("timeStamp_length=" + timeStamp.length());
        if (dbgLog.isLoggable(Level.FINE))
            dbgLog.fine("timeStamp=[" + timeStamp + "]");

        smd.getFileInformation().put("timeStamp", timeStamp);
        smd.getFileInformation().put("fileDate", timeStamp);
        smd.getFileInformation().put("fileTime", timeStamp);
        smd.getFileInformation().put("varFormat_schema", "STATA");

    }

    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("smd dump:" + smd.toString());
        dbgLog.fine("***** decodeHeader(): end *****");
    }
}

From source file:edu.harvard.iq.dvn.ingest.statdataio.impl.plugins.dta.DTAFileReader.java

private void decodeDescriptorVarSortList(BufferedInputStream stream, int nvar) throws IOException {
    int length_var_sort_list = VAR_SORT_FIELD_LENGTH * (nvar + 1);
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("length_var_sort_list=" + length_var_sort_list);

    byte[] varSortList = new byte[length_var_sort_list];
    short[] variableSortList = new short[nvar + 1];

    int nbytes = stream.read(varSortList, 0, length_var_sort_list);

    if (nbytes == 0) {
        throw new IOException("reading error: the varSortList");
    }/*from  w w  w  . j  a  va2s .com*/

    int offset_start = 0;
    for (int i = 0; i <= nvar; i++) {

        ByteBuffer bb_varSortList = ByteBuffer.wrap(varSortList, offset_start, VAR_SORT_FIELD_LENGTH);
        if (isLittleEndian) {
            bb_varSortList.order(ByteOrder.LITTLE_ENDIAN);
        }
        variableSortList[i] = bb_varSortList.getShort();

        offset_start += VAR_SORT_FIELD_LENGTH;
    }
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("variableSortList=" + Arrays.toString(variableSortList));

}

From source file:edu.harvard.iq.dataverse.ingest.tabulardata.impl.plugins.dta.DTAFileReader.java

private void decodeHeader(BufferedInputStream stream) throws IOException {
    dbgLog.fine("***** decodeHeader(): start *****");

    if (stream == null) {
        throw new IllegalArgumentException("stream == null!");
    }// w w  w .j av  a  2s.  co m

    dbgLog.fine("reading the header segument 1: 4 byte\n");
    byte[] magic_number = new byte[DTA_MAGIC_NUMBER_LENGTH];

    int nbytes = stream.read(magic_number, 0, DTA_MAGIC_NUMBER_LENGTH);

    if (nbytes == 0) {
        throw new IOException();
    }

    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("hex dump: 1st 4bytes =>" + new String(Hex.encodeHex(magic_number)) + "<-");
    }

    if (magic_number[2] != 1) {
        dbgLog.fine("3rd byte is not 1: given file is not stata-dta type");
        throw new IllegalArgumentException("The file is not in a STATA format that we can read or support.");
    } else if ((magic_number[1] != 1) && (magic_number[1] != 2)) {
        dbgLog.fine("2nd byte is neither 0 nor 1: this file is not stata-dta type");
        throw new IllegalArgumentException("given file is not stata-dta type");
    } else if (!STATA_RELEASE_NUMBER.containsKey((int) magic_number[0])) {
        dbgLog.fine("1st byte (" + magic_number[0] + ") is not within the ingestable range [rel. 3-10]:"
                + "we cannot ingest this Stata file.");
        throw new IllegalArgumentException("given file is not stata-dta type");
    } else {
        releaseNumber = magic_number[0];
        init();

        dataTable.setOriginalFileFormat(MIME_TYPE[0]);
        /* 
         * releaseNumber: 
         * for storing in the datatable, we are converting the numeric Stata
         * release number into a more user friendly "version number"; 
         * e.g., "release number 115" = "Stata v. 12"
         * -- L.A. 4.0 
         */
        dataTable.setOriginalFormatVersion(STATA_RELEASE_NUMBER.get(releaseNumber));
        dataTable.setUnf("UNF:6:FILEFILEFILEFILE");

        if (dbgLog.isLoggable(Level.FINE)) {
            dbgLog.fine("this file is stata-dta type: " + STATA_RELEASE_NUMBER.get(releaseNumber)
                    + " (that means Stata version " + releaseNumber + ")");
        }
        if (dbgLog.isLoggable(Level.FINE)) {
            dbgLog.fine("Endian(file)(Big: 1; Little:2)=" + magic_number[1]);
        }

        /* 
         * byte order: defined in the second byte of the "magic number": 
         */
        if (magic_number[1] == 2) {
            isLittleEndian = true;
            dbgLog.fine("Reversal of the bytes is necessary to decode " + "multi-byte fields");
        }
        if (dbgLog.isLoggable(Level.FINE)) {
            dbgLog.fine("Endian of this platform:" + ByteOrder.nativeOrder().toString());
        }
    }

    dbgLog.fine("reading the remaining header segument 2: 60 or 109-byte");

    byte[] header = new byte[headerLength];
    nbytes = stream.read(header, 0, headerLength);

    // 1. number of variables: short (2 bytes)
    ByteBuffer bbnvar = ByteBuffer.wrap(header, 0, NVAR_FIELD_LENGTH);
    ByteBuffer dupnvar = bbnvar.duplicate();
    short short_nvar = dupnvar.getShort();

    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("get original short view(nvar)=" + short_nvar);
    }
    if (isLittleEndian) {
        bbnvar.order(ByteOrder.LITTLE_ENDIAN);

    }

    short shrt_nvar = bbnvar.getShort();
    dataTable.setVarQuantity(new Long(shrt_nvar));
    int nvar = shrt_nvar;

    if (dbgLog.isLoggable(Level.INFO)) {
        dbgLog.info("number of variables(nvar)=" + nvar);
    }

    // 4.0 Initialize dataverse variable objects: 
    List<DataVariable> variableList = new ArrayList<>();

    for (int i = 0; i < nvar; i++) {
        DataVariable dv = new DataVariable();
        dv.setInvalidRanges(new ArrayList<>());
        dv.setSummaryStatistics(new ArrayList<>());
        dv.setUnf("UNF:6:XXX");
        dv.setCategories(new ArrayList<>());
        variableList.add(dv);

        dv.setFileOrder(i);

        dv.setDataTable(dataTable);
    }

    dataTable.setDataVariables(variableList);

    // setup variableTypeList
    variableTypes = new String[nvar];
    // and the date/time format list:
    dateVariableFormats = new String[nvar];

    // 2. number of observations: int (4 bytes)
    ByteBuffer nobs = ByteBuffer.wrap(header, NVAR_FIELD_LENGTH, NOBS_FIELD_LENGTH);
    ByteBuffer dupnobs = nobs.duplicate();
    int int_dupnobs = dupnobs.getInt();
    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("raw nobs=" + int_dupnobs);
    }
    if (isLittleEndian) {
        nobs.order(ByteOrder.LITTLE_ENDIAN);
    }
    int int_nobs = nobs.getInt();
    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("reversed nobs=" + int_nobs);
    }

    // smd.getFileInformation().put("caseQnty", new Integer(int_nobs));
    dataTable.setCaseQuantity(new Long(int_nobs));

    /* 
     the "data label" - 
     note that we are not using this label for anything 
     (wonder what it is though? can we use it somewhere?)
     but we still need to extract it from the byte stream, 
     since the offsets of the objects stored further up
     are calculated relative to it. -- L.A., 4.0
     */
    // 3. data_label: 32 or 81 bytes
    int dl_offset = NVAR_FIELD_LENGTH + NOBS_FIELD_LENGTH;
    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("dl_offset=" + dl_offset);
    }
    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("data_label_length=" + dataLabelLength);
    }

    String data_label = new String(Arrays.copyOfRange(header, dl_offset, (dl_offset + dataLabelLength)),
            "ISO-8859-1");

    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("data_label_length=" + data_label.length());
    }
    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("loation of the null character=" + data_label.indexOf(0));
    }

    String dataLabel = getNullStrippedString(data_label);
    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("data_label_length=" + dataLabel.length());
    }
    if (dbgLog.isLoggable(Level.FINE)) {
        dbgLog.fine("data_label=[" + dataLabel + "]");
    }

    // smd.getFileInformation().put("dataLabel", dataLabel);

    /* end of "data label" */
    // 4. time_stamp: ASCII String (18 bytes)
    // added after release 4
    if (releaseNumber > 104) {
        int ts_offset = dl_offset + dataLabelLength;
        String time_stamp = new String(Arrays.copyOfRange(header, ts_offset, ts_offset + TIME_STAMP_LENGTH),
                "ISO-8859-1");
        if (dbgLog.isLoggable(Level.FINE)) {
            dbgLog.fine("time_stamp_length=" + time_stamp.length());
        }
        if (dbgLog.isLoggable(Level.FINE)) {
            dbgLog.fine("loation of the null character=" + time_stamp.indexOf(0));
        }

        String timeStamp = getNullStrippedString(time_stamp);
        if (dbgLog.isLoggable(Level.FINE)) {
            dbgLog.fine("timeStamp_length=" + timeStamp.length());
        }
        if (dbgLog.isLoggable(Level.FINE)) {
            dbgLog.fine("timeStamp=[" + timeStamp + "]");
        }

    }
}

From source file:edu.jhu.cvrg.services.nodeDataService.DataStaging.java

/** Reads the WFDB file from the brokerURL and stores it as the RdtData of a VisualizationData.
 * It is assuming that the file is in RDT format, with 3 leads.
 *
 * @param tempFile - name of a local RDT file containing ECG data. 
 * @param fileSize - used to size the file reading buffer.
 * @param offsetMilliSeconds - number of milliseconds from the beginning of the ECG at which to start the graph.
 * @param durationMilliSeconds - The requested length of the returned data subset, in milliseconds.
 * @param graphWidthPixels - Width of the zoomed graph in pixels(zoom factor*unzoomed width), hence the maximum points needed in the returned VisualizationData.
 * @param callback - call back handler class.
 *     //from  w w  w. ja v  a2  s .  com
 * @see org.cvrgrid.widgets.node.client.BrokerService#fetchSubjectVisualization(java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, long, int, int)
 */
private VisualizationData fetchWFDBdataSegment(String tempFile, long fileSize, int offsetMilliSeconds,
        int durationMilliSeconds, int graphWidthPixels) {
    BufferedInputStream rdtBis = null;
    VisualizationData visualizationData = new VisualizationData();
    try {
        //******************************************
        try {
            FileInputStream isFile = new FileInputStream(tempFile);
            //*******************************************
            //    * @param skippedSamples - number of samples to skip after each one returned. To adjust for graph resolution.
            int samplesPerPixel, skippedSamples, durationInSamples;
            rdtBis = new BufferedInputStream(isFile);

            //Read the 4 header bytes
            byte[] header = new byte[HEADERBYTES];
            int result = rdtBis.read(header, 0, HEADERBYTES);

            if (result == HEADERBYTES) {
                ByteBuffer bbHead = ByteBuffer.wrap(header);
                bbHead.order(BYTEORDER);

                short channels = bbHead.getShort();
                short samplingRate = bbHead.getShort(); // replaced with subjectData.setSamplingRate() 
                float fRateMsec = (float) (samplingRate / 1000.0);
                if (offsetMilliSeconds < 0)
                    offsetMilliSeconds = 0; // cannot read before the beginning of the file.
                int vizOffset = (int) (offsetMilliSeconds * fRateMsec);

                //-------------------------------------------------
                // Calculate and Set Visualization parameters
                final int REALBUFFERSIZE = (int) fileSize - HEADERBYTES;
                if (REALBUFFERSIZE % (channels * SHORTBYTES) != 0) {
                    System.err.println("rdt file is not aligned.");
                }
                int counts = REALBUFFERSIZE / (channels * SHORTBYTES);
                byte[][] body = new byte[counts][(channels * SHORTBYTES)];
                byte[] sample = new byte[(channels * SHORTBYTES)]; /** A single reading from all leads. **/
                try {

                    int requestedMaxPoints;
                    durationInSamples = (int) (fRateMsec * durationMilliSeconds);
                    if (durationInSamples > graphWidthPixels) {
                        samplesPerPixel = durationInSamples / graphWidthPixels;
                        requestedMaxPoints = graphWidthPixels;
                    } else {
                        samplesPerPixel = 1;
                        requestedMaxPoints = durationInSamples;
                    }
                    skippedSamples = samplesPerPixel - 1;

                    int availableSamples = counts - vizOffset; // total number of remaining samples from this offset.
                    int availablePoints = availableSamples / samplesPerPixel; // total number of graphable points from this offset.
                    int maxPoints = 0; // maximum data points that can be returned.
                    // ensure that the copying loop doesn't try to go past the end of the data file.
                    if (availablePoints > requestedMaxPoints) {
                        maxPoints = requestedMaxPoints;
                    } else { // Requested duration is longer than the remainder after the offset.
                        if (durationInSamples < counts) { // Requested duration is less than the file contains.
                            // move the offset back so the requested amount of samples can be returned.
                            vizOffset = counts - durationInSamples;
                            maxPoints = requestedMaxPoints;
                        } else { // Requested duration is longer than the file contains.
                            maxPoints = availablePoints;
                        }
                    }
                    visualizationData.setRdtDataLength(maxPoints);
                    visualizationData.setRdtDataLeads(channels);
                    visualizationData.setOffset(vizOffset);
                    visualizationData.setSkippedSamples(skippedSamples);
                    int msDuration = (counts * 1000) / samplingRate;
                    visualizationData.setMsDuration(msDuration);

                    //------------------------------------------------
                    // Read the rest of the file to get the data.
                    ByteBuffer bbSample;
                    double[][] tempData = new double[maxPoints][channels];
                    int fileOffset = vizOffset * channels * SHORTBYTES; //offset in bytes from the beginning of the file.

                    int index1, index2, s, outSample = 0;
                    index2 = vizOffset; // index of the first sample to return data for, index is in samples not bytes.
                    int length, bisOffset, bisLen = sample.length;
                    // read entire file into the local byte array "body"
                    for (index1 = 0; index1 < counts; index1++) {
                        bisOffset = HEADERBYTES + (index1 * bisLen);
                        s = 0;
                        for (int c = 0; c < (bisLen * 4); c++) { // make up to 4 attempts to read 
                            length = rdtBis.read(sample, s, 1);// read one byte into the byte array "sample", explicitly specifying which byte to read.
                            if (length == 1)
                                s++; // successfully read the byte, go to the next one.
                            if (s == bisLen)
                                break; // last byte has been read.
                        }

                        if (index1 == index2) { // add this sample the output data
                            bbSample = ByteBuffer.wrap(sample);
                            bbSample.order(BYTEORDER);

                            for (int ch = 0; ch < channels; ch++) {
                                short value = bbSample.getShort(); // reads a Short, increments position() by 2 bytes.
                                tempData[outSample][ch] = (double) value;
                            }

                            bbSample.clear();
                            index2 = index2 + 1 + skippedSamples;
                            outSample++;
                            if (outSample == maxPoints)
                                break;
                        }
                    }

                    visualizationData.setRdtData(tempData);

                    //*******************************************
                    isFile.close();
                } catch (FileNotFoundException e) {
                    e.printStackTrace();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            } else {
                System.err.println(
                        "fetchSubjectVisualization failed, error occured while reading header of the RDT file:"
                                + tempFile);
            }
            //*******************************************
        } catch (IOException e1) {
            e1.printStackTrace();
        } finally {
            try {
                rdtBis.close();
            } catch (IOException e2) {
                e2.printStackTrace();
            }
        }
    } catch (Exception ex) {
        ex.printStackTrace();
    }
    return visualizationData;
}

From source file:edu.jhu.cvrg.services.nodeDataService.DataStaging.java

/** Reads the file from the brokerURL and stores it as the RdtData of a VisualizationData.
 * It is assuming that the file is in RDT format, with 3 leads.
 *
 * @param tempFile - name of a local RDT file containing ECG data. 
 * @param fileSize - used to size the file reading buffer.
 * @param offsetMilliSeconds - number of milliseconds from the beginning of the ECG at which to start the graph.
 * @param durationMilliSeconds - The requested length of the returned data subset, in milliseconds.
 * @param graphWidthPixels - Width of the zoomed graph in pixels(zoom factor*unzoomed width), hence the maximum points needed in the returned VisualizationData.
 * @param callback - call back handler class.
 *     /*from w  w w .ja  v a  2 s.c o  m*/
 * @see org.cvrgrid.widgets.node.client.BrokerService#fetchSubjectVisualization(java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, long, int, int)
 */
private VisualizationData fetchSubjectVisualization(String tempFile, long fileSize, int offsetMilliSeconds,
        int durationMilliSeconds, int graphWidthPixels) {
    BufferedInputStream rdtBis = null;
    VisualizationData visualizationData = new VisualizationData();
    try {
        //******************************************
        try {
            FileInputStream isFile = new FileInputStream(tempFile);
            //*******************************************

            int samplesPerPixel, skippedSamples, durationInSamples;
            rdtBis = new BufferedInputStream(isFile);

            //Read the 4 header bytes
            byte[] header = new byte[HEADERBYTES];
            int result = rdtBis.read(header, 0, HEADERBYTES);

            if (result == HEADERBYTES) {
                ByteBuffer bbHead = ByteBuffer.wrap(header);
                bbHead.order(BYTEORDER);

                short channels = bbHead.getShort();
                short samplingRate = bbHead.getShort(); // replaced with subjectData.setSamplingRate() 
                float fRateMsec = (float) (samplingRate / 1000.0);
                if (offsetMilliSeconds < 0)
                    offsetMilliSeconds = 0; // cannot read before the beginning of the file.
                int vizOffset = (int) (offsetMilliSeconds * fRateMsec);

                //-------------------------------------------------
                // Calculate and Set Visualization parameters
                final int REALBUFFERSIZE = (int) fileSize - HEADERBYTES;
                if (REALBUFFERSIZE % (channels * SHORTBYTES) != 0) {
                    System.err.println("rdt file is not aligned.");
                }
                int counts = REALBUFFERSIZE / (channels * SHORTBYTES);
                byte[][] body = new byte[counts][(channels * SHORTBYTES)];
                byte[] sample = new byte[(channels * SHORTBYTES)]; /** A single reading from all leads. **/
                try {
                    @SuppressWarnings("unused") // used to test rdtBis.read for exceptions

                    int requestedMaxPoints;
                    durationInSamples = (int) (fRateMsec * durationMilliSeconds);
                    if (durationInSamples > graphWidthPixels) {
                        samplesPerPixel = durationInSamples / graphWidthPixels;
                        requestedMaxPoints = graphWidthPixels;
                    } else {
                        samplesPerPixel = 1;
                        requestedMaxPoints = durationInSamples;
                    }
                    skippedSamples = samplesPerPixel - 1;

                    int availableSamples = counts - vizOffset; // total number of remaining samples from this offset.
                    int availablePoints = availableSamples / samplesPerPixel; // total number of graphable points from this offset.
                    int maxPoints = 0; // maximum data points that can be returned.
                    // ensure that the copying loop doesn't try to go past the end of the data file.
                    if (availablePoints > requestedMaxPoints) {
                        maxPoints = requestedMaxPoints;
                    } else { // Requested duration is longer than the remainder after the offset.
                        if (durationInSamples < counts) { // Requested duration is less than the file contains.
                            // move the offset back so the requested amount of samples can be returned.
                            vizOffset = counts - durationInSamples;
                            maxPoints = requestedMaxPoints;
                        } else { // Requested duration is longer than the file contains.
                            maxPoints = availablePoints;
                        }
                    }
                    visualizationData.setRdtDataLength(maxPoints);
                    visualizationData.setRdtDataLeads(channels);
                    visualizationData.setOffset(vizOffset);
                    visualizationData.setSkippedSamples(skippedSamples);
                    int msDuration = (counts * 1000) / samplingRate;
                    visualizationData.setMsDuration(msDuration);

                    //------------------------------------------------
                    // Read the rest of the file to get the data.
                    ByteBuffer bbSample;
                    double[][] tempData = new double[maxPoints][channels];
                    int fileOffset = vizOffset * channels * SHORTBYTES; //offset in bytes from the beginning of the file.

                    int index1, index2, s, outSample = 0;
                    index2 = vizOffset; // index of the first sample to return data for, index is in samples not bytes.
                    int length, bisOffset, bisLen = sample.length;
                    // read entire file into the local byte array "body"
                    for (index1 = 0; index1 < counts; index1++) {
                        bisOffset = HEADERBYTES + (index1 * bisLen);
                        s = 0;
                        for (int c = 0; c < (bisLen * 4); c++) { // make up to 4 attempts to read 
                            length = rdtBis.read(sample, s, 1);// read one byte into the byte array "sample", explicitly specifying which byte to read.
                            if (length == 1)
                                s++; // successfully read the byte, go to the next one.
                            if (s == bisLen)
                                break; // last byte has been read.
                        }

                        if (index1 == index2) { // add this sample the output data
                            bbSample = ByteBuffer.wrap(sample);
                            bbSample.order(BYTEORDER);

                            for (int ch = 0; ch < channels; ch++) {
                                short value = bbSample.getShort(); // reads a Short, increments position() by 2 bytes.
                                tempData[outSample][ch] = (double) value;
                            }

                            bbSample.clear();
                            index2 = index2 + 1 + skippedSamples;
                            outSample++;
                            if (outSample == maxPoints)
                                break;
                        }
                    }

                    visualizationData.setRdtData(tempData);

                    //*******************************************
                    isFile.close();
                    //             br.close();
                } catch (FileNotFoundException e) {
                    e.printStackTrace();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            } else {
                System.err.println(
                        "fetchSubjectVisualization failed, error occured while reading header of the RDT file:"
                                + tempFile);
            }
            //*******************************************
        } catch (IOException e1) {
            e1.printStackTrace();
        } finally {
            try {
                rdtBis.close();
            } catch (IOException e2) {
                e2.printStackTrace();
            }
        }
    } catch (Exception ex) {
        ex.printStackTrace();
    }
    return visualizationData;
}

From source file:edu.harvard.iq.dataverse.ingest.tabulardata.impl.plugins.dta.DTAFileReader.java

private void decodeDescriptorVarSortList(BufferedInputStream stream, int nvar) throws IOException {
    /* /*from w  ww .  java  2s.c  om*/
     * Whatever this "var sort list" is, we don't seem to be using this 
     * information for any purposes in particular. However, we need to read
     * the bytes, to skip to the next section in the stream, if nothing else. 
     * -- L.A. 4.0
     */
    int length_var_sort_list = VAR_SORT_FIELD_LENGTH * (nvar + 1);
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("length_var_sort_list=" + length_var_sort_list);

    byte[] varSortList = new byte[length_var_sort_list];
    short[] variableSortList = new short[nvar + 1];

    int nbytes = stream.read(varSortList, 0, length_var_sort_list);

    if (nbytes == 0) {
        throw new IOException("reading error: the varSortList");
    }

    int offset_start = 0;
    for (int i = 0; i <= nvar; i++) {

        ByteBuffer bb_varSortList = ByteBuffer.wrap(varSortList, offset_start, VAR_SORT_FIELD_LENGTH);
        if (isLittleEndian) {
            bb_varSortList.order(ByteOrder.LITTLE_ENDIAN);
        }
        variableSortList[i] = bb_varSortList.getShort();

        offset_start += VAR_SORT_FIELD_LENGTH;
    }
    if (dbgLog.isLoggable(Level.FINE))
        dbgLog.fine("variableSortList=" + Arrays.toString(variableSortList));

}

From source file:com.healthmarketscience.jackcess.IndexData.java

/**
 * Read the rest of the index info from a tableBuffer
 * @param tableBuffer table definition buffer to read from initial info
 * @param availableColumns Columns that this index may use
 *//*from ww w. j av  a 2s  .  c  om*/
public void read(ByteBuffer tableBuffer, List<Column> availableColumns) throws IOException {
    ByteUtil.forward(tableBuffer, getFormat().SKIP_BEFORE_INDEX); //Forward past Unknown

    for (int i = 0; i < MAX_COLUMNS; i++) {
        short columnNumber = tableBuffer.getShort();
        byte colFlags = tableBuffer.get();
        if (columnNumber != COLUMN_UNUSED) {
            // find the desired column by column number (which is not necessarily
            // the same as the column index)
            Column idxCol = null;
            for (Column col : availableColumns) {
                if (col.getColumnNumber() == columnNumber) {
                    idxCol = col;
                    break;
                }
            }
            if (idxCol == null) {
                throw new IOException("Could not find column with number " + columnNumber + " for index");
            }
            _columns.add(newColumnDescriptor(idxCol, colFlags));
        }
    }

    int umapRowNum = tableBuffer.get();
    int umapPageNum = ByteUtil.get3ByteInt(tableBuffer);
    _ownedPages = UsageMap.read(getTable().getDatabase(), umapPageNum, umapRowNum, false);

    _rootPageNumber = tableBuffer.getInt();

    ByteUtil.forward(tableBuffer, getFormat().SKIP_BEFORE_INDEX_FLAGS); //Forward past Unknown
    _indexFlags = tableBuffer.get();
    ByteUtil.forward(tableBuffer, getFormat().SKIP_AFTER_INDEX_FLAGS); //Forward past other stuff
}