List of usage examples for java.nio ByteOrder LITTLE_ENDIAN
ByteOrder LITTLE_ENDIAN
To view the source code for java.nio ByteOrder LITTLE_ENDIAN.
Click Source Link
From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java
@Override public byte[] encodeSetCannedMessages(CannedMessagesSpec cannedMessagesSpec) { if (cannedMessagesSpec.cannedMessages == null || cannedMessagesSpec.cannedMessages.length == 0) { return null; }// w w w .j a v a 2 s.c o m String blobDBKey; switch (cannedMessagesSpec.type) { case CannedMessagesSpec.TYPE_MISSEDCALLS: blobDBKey = "com.pebble.android.phone"; break; case CannedMessagesSpec.TYPE_NEWSMS: blobDBKey = "com.pebble.sendText"; break; default: return null; } int replies_length = -1; for (String reply : cannedMessagesSpec.cannedMessages) { replies_length += reply.getBytes().length + 1; } ByteBuffer buf = ByteBuffer.allocate(12 + replies_length); buf.order(ByteOrder.LITTLE_ENDIAN); buf.putInt(0x00000000); // unknown buf.put((byte) 0x00); // attributes count? buf.put((byte) 0x01); // actions count? // action buf.put((byte) 0x00); // action id buf.put((byte) 0x03); // action type = reply buf.put((byte) 0x01); // attributes count buf.put((byte) 0x08); // canned messages buf.putShort((short) replies_length); for (int i = 0; i < cannedMessagesSpec.cannedMessages.length - 1; i++) { buf.put(cannedMessagesSpec.cannedMessages[i].getBytes()); buf.put((byte) 0x00); } // last one must not be zero terminated, else we get an additional empty reply buf.put(cannedMessagesSpec.cannedMessages[cannedMessagesSpec.cannedMessages.length - 1].getBytes()); return encodeBlobdb(blobDBKey, BLOBDB_INSERT, BLOBDB_CANNED_MESSAGES, buf.array()); }
From source file:au.org.ala.layers.intersect.Grid.java
/** * @param points input array for longitude and latitude * double[number_of_points][2] and sorted latitude then longitude * @return array of .gri file values corresponding to the * points provided//from w ww .ja v a 2 s . co m */ public float[] getValues3(double[][] points, int bufferSize) { //confirm inputs since they come from somewhere else if (points == null || points.length == 0) { return null; } if (subgrids != null) { return getValuesSubgrids(points, bufferSize); } //use preloaded grid data if available Grid g = Grid.getLoadedGrid(filename); if (g != null && g.grid_data != null) { return g.getValues2(points); } int length = points.length; int size, i; byte[] b; RandomAccessFile afile = null; File f2 = new File(filename + ".GRI"); try { //read of random access file can throw an exception if (!f2.exists()) { afile = new RandomAccessFile(filename + ".gri", "r"); } else { afile = new RandomAccessFile(filename + ".GRI", "r"); } //do not cache subgrids (using getValues2) if (!subgrid && afile.length() < 80 * 1024 * 1024) { try { afile.close(); afile = null; } catch (Exception e) { } return getValues2(points); } byte[] buffer = new byte[bufferSize]; //must be multiple of 64 Long bufferOffset = afile.length(); float[] ret = new float[points.length]; //get cell numbers long[][] cells = new long[points.length][2]; for (int j = 0; j < points.length; j++) { if (Double.isNaN(points[j][0]) || Double.isNaN(points[j][1])) { cells[j][0] = -1; cells[j][1] = j; } else { cells[j][0] = getcellnumber(points[j][0], points[j][1]); cells[j][1] = j; } } java.util.Arrays.sort(cells, new Comparator<long[]>() { @Override public int compare(long[] o1, long[] o2) { if (o1[0] == o2[0]) { return o1[1] > o2[1] ? 1 : -1; } else { return o1[0] > o2[0] ? 1 : -1; } } }); if (datatype.equalsIgnoreCase("BYTE")) { size = 1; for (i = 0; i < length; i++) { if (i > 0 && cells[i - 1][0] == cells[i][0]) { ret[(int) cells[i][1]] = ret[(int) cells[i - 1][1]]; continue; } if (cells[i][0] >= 0) { ret[(int) cells[i][1]] = getByte(afile, buffer, bufferOffset, cells[i][0] * size); } else { ret[(int) cells[i][1]] = Float.NaN; } } } else if (datatype.equalsIgnoreCase("UBYTE")) { size = 1; for (i = 0; i < length; i++) { if (i > 0 && cells[i - 1][0] == cells[i][0]) { ret[(int) cells[i][1]] = ret[(int) cells[i - 1][1]]; continue; } if (cells[i][0] >= 0) { ret[(int) cells[i][1]] = getByte(afile, buffer, bufferOffset, cells[i][0] * size); if (ret[(int) cells[i][1]] < 0) { ret[(int) cells[i][1]] += 256; } } else { ret[(int) cells[i][1]] = Float.NaN; } } } else if (datatype.equalsIgnoreCase("SHORT")) { size = 2; b = new byte[size]; for (i = 0; i < length; i++) { if (i > 0 && cells[i - 1][0] == cells[i][0]) { ret[(int) cells[i][1]] = ret[(int) cells[i - 1][1]]; continue; } if (cells[i][0] >= 0) { bufferOffset = getBytes(afile, buffer, bufferOffset, cells[i][0] * (long) size, b); if (byteorderLSB) { ret[(int) cells[i][1]] = (short) (((0xFF & b[1]) << 8) | (b[0] & 0xFF)); } else { ret[(int) cells[i][1]] = (short) (((0xFF & b[0]) << 8) | (b[1] & 0xFF)); } } else { ret[(int) cells[i][1]] = Float.NaN; } } } else if (datatype.equalsIgnoreCase("INT")) { size = 4; b = new byte[size]; for (i = 0; i < length; i++) { if (i > 0 && cells[i - 1][0] == cells[i][0]) { ret[(int) cells[i][1]] = ret[(int) cells[i - 1][1]]; continue; } if (cells[i][0] >= 0) { bufferOffset = getBytes(afile, buffer, bufferOffset, cells[i][0] * (long) size, b); if (byteorderLSB) { ret[(int) cells[i][1]] = ((0xFF & b[3]) << 24) | ((0xFF & b[2]) << 16) + ((0xFF & b[1]) << 8) + (b[0] & 0xFF); } else { ret[(int) cells[i][1]] = ((0xFF & b[0]) << 24) | ((0xFF & b[1]) << 16) + ((0xFF & b[2]) << 8) + ((0xFF & b[3]) & 0xFF); } } else { ret[(int) cells[i][1]] = Float.NaN; } } } else if (datatype.equalsIgnoreCase("LONG")) { size = 8; b = new byte[size]; for (i = 0; i < length; i++) { if (i > 0 && cells[i - 1][0] == cells[i][0]) { ret[(int) cells[i][1]] = ret[(int) cells[i - 1][1]]; continue; } if (cells[i][0] >= 0) { bufferOffset = getBytes(afile, buffer, bufferOffset, cells[i][0] * (long) size, b); if (byteorderLSB) { ret[(int) cells[i][1]] = ((long) (0xFF & b[7]) << 56) + ((long) (0xFF & b[6]) << 48) + ((long) (0xFF & b[5]) << 40) + ((long) (0xFF & b[4]) << 32) + ((long) (0xFF & b[3]) << 24) + ((long) (0xFF & b[2]) << 16) + ((long) (0xFF & b[1]) << 8) + (0xFF & b[0]); } else { ret[(int) cells[i][1]] = ((long) (0xFF & b[0]) << 56) + ((long) (0xFF & b[1]) << 48) + ((long) (0xFF & b[2]) << 40) + ((long) (0xFF & b[3]) << 32) + ((long) (0xFF & b[4]) << 24) + ((long) (0xFF & b[5]) << 16) + ((long) (0xFF & b[6]) << 8) + (0xFF & b[7]); } } else { ret[(int) cells[i][1]] = Float.NaN; } } } else if (datatype.equalsIgnoreCase("FLOAT")) { size = 4; b = new byte[size]; for (i = 0; i < length; i++) { if (i > 0 && cells[i - 1][0] == cells[i][0]) { ret[(int) cells[i][1]] = ret[(int) cells[i - 1][1]]; continue; } if (cells[i][0] >= 0) { bufferOffset = getBytes(afile, buffer, bufferOffset, cells[i][0] * (long) size, b); ByteBuffer bb = ByteBuffer.wrap(b); if (byteorderLSB) { bb.order(ByteOrder.LITTLE_ENDIAN); } ret[(int) cells[i][1]] = bb.getFloat(); } else { ret[(int) cells[i][1]] = Float.NaN; } } } else if (datatype.equalsIgnoreCase("DOUBLE")) { size = 8; b = new byte[8]; for (i = 0; i < length; i++) { if (i > 0 && cells[i - 1][0] == cells[i][0]) { ret[(int) cells[i][1]] = ret[(int) cells[i - 1][1]]; continue; } if (cells[i][0] >= 0) { getBytes(afile, buffer, bufferOffset, cells[i][0] * (long) size, b); ByteBuffer bb = ByteBuffer.wrap(b); if (byteorderLSB) { bb.order(ByteOrder.LITTLE_ENDIAN); } ret[(int) cells[i][1]] = (float) bb.getDouble(); } else { ret[(int) cells[i][1]] = Float.NaN; } } } else { logger.error("datatype not supported in Grid.getValues: " + datatype); // / should not happen; catch anyway... for (i = 0; i < length; i++) { ret[i] = Float.NaN; } } //replace not a number for (i = 0; i < length; i++) { if ((float) ret[i] == (float) nodatavalue) { ret[i] = Float.NaN; } else { ret[i] *= rescale; } } return ret; } catch (Exception e) { logger.error("error getting grid file values", e); } finally { if (afile != null) { try { afile.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } return null; }
From source file:edu.harvard.iq.dataverse.ingest.tabulardata.impl.plugins.sav.SAVFileReader.java
void decodeRecordType3and4(BufferedInputStream stream) throws IOException { dbgLog.fine("decodeRecordType3and4(): start"); Map<String, Map<String, String>> valueLabelTable = new LinkedHashMap<String, Map<String, String>>(); int safteyCounter = 0; while (true) { try {/*from w w w. ja va 2 s.c o m*/ if (stream == null) { throw new IllegalArgumentException("stream == null!"); } // this secton may not exit so first check the 4-byte header value //if (stream.markSupported()){ stream.mark(1000); //} // 3.0 check the first 4 bytes byte[] headerCode = new byte[LENGTH_RECORD_TYPE3_CODE]; int nbytes_rt3 = stream.read(headerCode, 0, LENGTH_RECORD_TYPE3_CODE); // to-do check against nbytes //printHexDump(headerCode, "RT3 header test"); ByteBuffer bb_header_code = ByteBuffer.wrap(headerCode, 0, LENGTH_RECORD_TYPE3_CODE); if (isLittleEndian) { bb_header_code.order(ByteOrder.LITTLE_ENDIAN); } int intRT3test = bb_header_code.getInt(); dbgLog.fine("header test value: RT3=" + intRT3test); if (intRT3test != 3) { //if (stream.markSupported()){ dbgLog.fine("iteration=" + safteyCounter); // We have encountered a record that's not type 3. This means we've // processed all the type 3/4 record pairs. So we want to rewind // the stream and return -- so that the appropriate record type // reader can be called on it. // But before we return, we need to save all the value labels // we have found: //smd.setValueLabelTable(valueLabelTable); assignValueLabels(valueLabelTable); stream.reset(); return; //} } // 3.1 how many value-label pairs follow byte[] number_of_labels = new byte[LENGTH_RT3_HOW_MANY_LABELS]; int nbytes_3_1 = stream.read(number_of_labels); if (nbytes_3_1 == 0) { throw new IOException("RT 3: reading recordType3.1: no byte was read"); } ByteBuffer bb_number_of_labels = ByteBuffer.wrap(number_of_labels, 0, LENGTH_RT3_HOW_MANY_LABELS); if (isLittleEndian) { bb_number_of_labels.order(ByteOrder.LITTLE_ENDIAN); } int numberOfValueLabels = bb_number_of_labels.getInt(); dbgLog.fine("number of value-label pairs=" + numberOfValueLabels); ByteBuffer[] tempBB = new ByteBuffer[numberOfValueLabels]; String valueLabel[] = new String[numberOfValueLabels]; for (int i = 0; i < numberOfValueLabels; i++) { // read 8-byte as value byte[] value = new byte[LENGTH_RT3_VALUE]; int nbytes_3_value = stream.read(value); if (nbytes_3_value == 0) { throw new IOException("RT 3: reading recordType3 value: no byte was read"); } // note these 8 bytes are interpreted later // currently no information about which variable's (=> type unknown) ByteBuffer bb_value = ByteBuffer.wrap(value, 0, LENGTH_RT3_VALUE); if (isLittleEndian) { bb_value.order(ByteOrder.LITTLE_ENDIAN); } tempBB[i] = bb_value; dbgLog.fine("bb_value=" + Hex.encodeHex(bb_value.array())); /* double valueD = bb_value.getDouble(); dbgLog.fine("value="+valueD); */ // read 1st byte as unsigned integer = label_length // read label_length byte as label byte[] labelLengthByte = new byte[LENGTH_RT3_LABEL_LENGTH]; int nbytes_3_label_length = stream.read(labelLengthByte); // add check-routine here dbgLog.fine("labelLengthByte" + Hex.encodeHex(labelLengthByte)); dbgLog.fine("label length = " + labelLengthByte[0]); // the net-length of a value label is saved as // unsigned byte; however, the length is less than 127 // byte should be ok int rawLabelLength = labelLengthByte[0] & 0xFF; dbgLog.fine("rawLabelLength=" + rawLabelLength); // -1 =>1-byte already read int labelLength = getSAVobsAdjustedBlockLength(rawLabelLength + 1) - 1; byte[] valueLabelBytes = new byte[labelLength]; int nbytes_3_value_label = stream.read(valueLabelBytes); // ByteBuffer bb_label = ByteBuffer.wrap(valueLabel,0,labelLength); valueLabel[i] = StringUtils.stripEnd( new String(Arrays.copyOfRange(valueLabelBytes, 0, rawLabelLength), defaultCharSet), " "); dbgLog.fine(i + "-th valueLabel=" + valueLabel[i] + "<-"); } // iter rt3 dbgLog.fine("end of RT3 block"); dbgLog.fine("start of RT4 block"); // 4.0 check the first 4 bytes byte[] headerCode4 = new byte[LENGTH_RECORD_TYPE4_CODE]; int nbytes_rt4 = stream.read(headerCode4, 0, LENGTH_RECORD_TYPE4_CODE); if (nbytes_rt4 == 0) { throw new IOException("RT4: reading recordType4 value: no byte was read"); } //printHexDump(headerCode4, "RT4 header test"); ByteBuffer bb_header_code_4 = ByteBuffer.wrap(headerCode4, 0, LENGTH_RECORD_TYPE4_CODE); if (isLittleEndian) { bb_header_code_4.order(ByteOrder.LITTLE_ENDIAN); } int intRT4test = bb_header_code_4.getInt(); dbgLog.fine("header test value: RT4=" + intRT4test); if (intRT4test != 4) { throw new IOException("RT 4: reading recordType4 header: no byte was read"); } // 4.1 read the how-many-variables bytes byte[] howManyVariablesfollow = new byte[LENGTH_RT4_HOW_MANY_VARIABLES]; int nbytes_rt4_1 = stream.read(howManyVariablesfollow, 0, LENGTH_RT4_HOW_MANY_VARIABLES); ByteBuffer bb_howManyVariablesfollow = ByteBuffer.wrap(howManyVariablesfollow, 0, LENGTH_RT4_HOW_MANY_VARIABLES); if (isLittleEndian) { bb_howManyVariablesfollow.order(ByteOrder.LITTLE_ENDIAN); } int howManyVariablesRT4 = bb_howManyVariablesfollow.getInt(); dbgLog.fine("how many variables follow: RT4=" + howManyVariablesRT4); int length_indicies = LENGTH_RT4_VARIABLE_INDEX * howManyVariablesRT4; byte[] variableIdicesBytes = new byte[length_indicies]; int nbytes_rt4_2 = stream.read(variableIdicesBytes, 0, length_indicies); // !!!!! Caution: variableIndex in RT4 starts from 1 NOT ** 0 ** int[] variableIndex = new int[howManyVariablesRT4]; int offset = 0; for (int i = 0; i < howManyVariablesRT4; i++) { ByteBuffer bb_variable_index = ByteBuffer.wrap(variableIdicesBytes, offset, LENGTH_RT4_VARIABLE_INDEX); offset += LENGTH_RT4_VARIABLE_INDEX; if (isLittleEndian) { bb_variable_index.order(ByteOrder.LITTLE_ENDIAN); } variableIndex[i] = bb_variable_index.getInt(); dbgLog.fine(i + "-th variable index number=" + variableIndex[i]); } dbgLog.fine("variable index set=" + ArrayUtils.toString(variableIndex)); dbgLog.fine("subtract 1 from variableIndex for getting a variable info"); boolean isNumeric = OBSwiseTypelList.get(variableIndex[0] - 1) == 0 ? true : false; Map<String, String> valueLabelPair = new LinkedHashMap<String, String>(); if (isNumeric) { // numeric variable dbgLog.fine("processing of a numeric value-label table"); for (int j = 0; j < numberOfValueLabels; j++) { valueLabelPair.put(doubleNumberFormatter.format(tempBB[j].getDouble()), valueLabel[j]); } } else { // String variable dbgLog.fine("processing of a string value-label table"); for (int j = 0; j < numberOfValueLabels; j++) { valueLabelPair.put( StringUtils.stripEnd(new String((tempBB[j].array()), defaultCharSet), " "), valueLabel[j]); } } dbgLog.fine("valueLabePair=" + valueLabelPair); dbgLog.fine("key variable's (raw) index =" + variableIndex[0]); valueLabelTable.put(OBSIndexToVariableName.get(variableIndex[0] - 1), valueLabelPair); dbgLog.fine("valueLabelTable=" + valueLabelTable); // create a mapping table that finds the key variable for this mapping table String keyVariableName = OBSIndexToVariableName.get(variableIndex[0] - 1); for (int vn : variableIndex) { valueVariableMappingTable.put(OBSIndexToVariableName.get(vn - 1), keyVariableName); } dbgLog.fine("valueVariableMappingTable:\n" + valueVariableMappingTable); } catch (IOException ex) { //ex.printStackTrace(); throw ex; } safteyCounter++; if (safteyCounter >= 1000000) { break; } } //while ///smd.setValueLabelTable(valueLabelTable); assignValueLabels(valueLabelTable); dbgLog.fine("***** decodeRecordType3and4(): end *****"); }
From source file:edu.harvard.iq.dvn.ingest.statdataio.impl.plugins.dta.DTAFileReader.java
private void parseValueLabelsReleasel108(BufferedInputStream stream) throws IOException { dbgLog.fine("***** parseValueLabelsRelease108(): start *****"); if (stream == null) { throw new IllegalArgumentException("stream == null!"); }// ww w . j a va 2s .com int nvar = (Integer) smd.getFileInformation().get("varQnty"); int length_label_name = constantTable.get("NAME"); int length_value_label_header = value_label_table_length + length_label_name + VALUE_LABEL_HEADER_PADDING_LENGTH; if (dbgLog.isLoggable(Level.FINE)) dbgLog.fine("value_label_table_length=" + value_label_table_length); if (dbgLog.isLoggable(Level.FINE)) dbgLog.fine("length_value_label_header=" + length_value_label_header); /* Seg field byte type 1-1. len_vlt(Seg.2) 4 int 1-2. vlt_name 9/33 char+(\0) == name used in Sec2.part 5 1-3. padding 3 byte ----------------------------------- 16/40 2-1. n(# of vls) 4 int 2-2. m(len_labels) 4 int 2-3. label_offsets 4*n int[] 2-4. values 4*n int[] 2-5. labels m char */ for (int i = 0; i < nvar; i++) { if (dbgLog.isLoggable(Level.FINE)) dbgLog.fine("\n\n" + i + "th value-label table header"); byte[] valueLabelHeader = new byte[length_value_label_header]; // Part 1: reading the header of a value-label table if exists int nbytes = stream.read(valueLabelHeader, 0, length_value_label_header); if (nbytes == 0) { throw new IOException("reading value label header: no datum"); } // 1.1 length_value_label_table ByteBuffer bb_value_label_header = ByteBuffer.wrap(valueLabelHeader, 0, value_label_table_length); if (isLittleEndian) { bb_value_label_header.order(ByteOrder.LITTLE_ENDIAN); } int length_value_label_table = bb_value_label_header.getInt(); if (dbgLog.isLoggable(Level.FINE)) dbgLog.fine("length of this value-label table=" + length_value_label_table); // 1.2 labelName String rawLabelName = new String(Arrays.copyOfRange(valueLabelHeader, value_label_table_length, (value_label_table_length + length_label_name)), "ISO-8859-1"); String labelName = getNullStrippedString(rawLabelName); if (dbgLog.isLoggable(Level.FINE)) dbgLog.fine("label name = " + labelName + "\n"); if (dbgLog.isLoggable(Level.FINE)) dbgLog.fine(i + "-th value-label table"); // Part 2: reading the value-label table byte[] valueLabelTable_i = new byte[length_value_label_table]; int noBytes = stream.read(valueLabelTable_i, 0, length_value_label_table); if (noBytes == 0) { throw new IOException("reading value label table: no datum"); } // 2-1. 4-byte-integer: number of units in this table (n) int valueLabelTable_offset = 0; ByteBuffer bb_value_label_pairs = ByteBuffer.wrap(valueLabelTable_i, valueLabelTable_offset, value_label_table_length); if (isLittleEndian) { bb_value_label_pairs.order(ByteOrder.LITTLE_ENDIAN); } int no_value_label_pairs = bb_value_label_pairs.getInt(); valueLabelTable_offset += value_label_table_length; if (dbgLog.isLoggable(Level.FINE)) dbgLog.fine("no_value_label_pairs=" + no_value_label_pairs); // 2-2. 4-byte-integer: length of the label section (m bytes) ByteBuffer bb_length_label_segment = ByteBuffer.wrap(valueLabelTable_i, valueLabelTable_offset, value_label_table_length); if (isLittleEndian) { bb_length_label_segment.order(ByteOrder.LITTLE_ENDIAN); } int length_label_segment = bb_length_label_segment.getInt(); valueLabelTable_offset += value_label_table_length; // 2-3. 4-byte-integer array (4xm): offset values for the label sec. // these "label offsets" actually appear to represent the byte // offsets of the label strings, as stored in the next section. // as of now, these are not used for anything, and the code // below assumes that the labels are already in the same // order as the numeric values! -- L.A. int[] label_offsets = new int[no_value_label_pairs]; int byte_offset = valueLabelTable_offset; for (int j = 0; j < no_value_label_pairs; j++) { // note: 4-byte singed, not java's long ByteBuffer bb_label_offset = ByteBuffer.wrap(valueLabelTable_i, byte_offset, value_label_table_length); if (isLittleEndian) { bb_label_offset.order(ByteOrder.LITTLE_ENDIAN); dbgLog.fine("label offset: byte reversed"); } label_offsets[j] = bb_label_offset.getInt(); dbgLog.fine("label offset [" + j + "]: " + label_offsets[j]); byte_offset += value_label_table_length; } // 2-4. 4-byte-integer array (4xm): value array (sorted) dbgLog.fine("value array"); int[] valueList = new int[no_value_label_pairs]; int offset_value = byte_offset; for (int k = 0; k < no_value_label_pairs; k++) { ByteBuffer bb_value_list = ByteBuffer.wrap(valueLabelTable_i, offset_value, value_label_table_length); if (isLittleEndian) { bb_value_list.order(ByteOrder.LITTLE_ENDIAN); } valueList[k] = bb_value_list.getInt(); offset_value += value_label_table_length; } // 2-5. m-byte chars that store label data (m units of labels) String label_segment = new String( Arrays.copyOfRange(valueLabelTable_i, offset_value, (length_label_segment + offset_value)), "ISO-8859-1"); // L.A. -- 2011.2.25: // This assumes that the labels are already stored in the right // order: (see my comment for the section 2.3 above) //String[] labelList = label_segment.split("\0"); // Instead, we should be using the offset values obtained in // the section 2.3 above, and select the corresponding // substrings: String[] labelList = new String[no_value_label_pairs]; for (int l = 0; l < no_value_label_pairs; l++) { String lblString = null; int lblOffset = label_offsets[l]; lblString = label_segment.substring(lblOffset); int nullIndx = lblString.indexOf('\000'); if (nullIndx > -1) { lblString = lblString.substring(0, nullIndx); } labelList[l] = lblString; } // this should work! -- L.A. // (TODO: check the v105 value label parsing method, to see if // something similar applies there) Map<String, String> tmpValueLabelTable = new LinkedHashMap<String, String>(); for (int l = 0; l < no_value_label_pairs; l++) { if (dbgLog.isLoggable(Level.FINE)) dbgLog.fine(l + "-th pair:" + valueList[l] + "[" + labelList[l] + "]"); tmpValueLabelTable.put(Integer.toString(valueList[l]), labelList[l]); } valueLabelTable.put(labelName, tmpValueLabelTable); if (stream.available() == 0) { // reached the end of this file // do exit-processing dbgLog.fine("***** reached the end of the file at " + i + "th value-label Table *****"); break; } } // for loop if (dbgLog.isLoggable(Level.FINE)) dbgLog.fine("valueLabelTable:\n" + valueLabelTable); smd.setValueLabelTable(valueLabelTable); dbgLog.fine("***** parseValueLabelsRelease108(): end *****"); }
From source file:nodomain.freeyourgadget.gadgetbridge.service.devices.pebble.PebbleProtocol.java
private ArrayList<Pair<Integer, Object>> decodeDict(ByteBuffer buf) { ArrayList<Pair<Integer, Object>> dict = new ArrayList<>(); buf.order(ByteOrder.LITTLE_ENDIAN); byte dictSize = buf.get(); while (dictSize-- > 0) { Integer key = buf.getInt(); byte type = buf.get(); short length = buf.getShort(); switch (type) { case TYPE_INT: case TYPE_UINT: if (length == 1) { dict.add(new Pair<Integer, Object>(key, buf.get())); } else if (length == 2) { dict.add(new Pair<Integer, Object>(key, buf.getShort())); } else { dict.add(new Pair<Integer, Object>(key, buf.getInt())); }/*from w w w . j av a2 s .co m*/ break; case TYPE_CSTRING: case TYPE_BYTEARRAY: byte[] bytes = new byte[length]; buf.get(bytes); if (type == TYPE_BYTEARRAY) { dict.add(new Pair<Integer, Object>(key, bytes)); } else { dict.add(new Pair<Integer, Object>(key, new String(bytes))); } break; default: } } return dict; }
From source file:edu.harvard.iq.dvn.ingest.statdataio.impl.plugins.sav.SAVFileReader.java
void decodeRecordType6(BufferedInputStream stream) throws IOException { dbgLog.fine("***** decodeRecordType6(): start *****"); try {/*from w w w .j a v a 2s. com*/ if (stream == null) { throw new IllegalArgumentException("stream == null!"); } // this secton may not exit so first check the 4-byte header value //if (stream.markSupported()){ stream.mark(1000); //} // 6.0 check the first 4 bytes byte[] headerCodeRt6 = new byte[LENGTH_RECORD_TYPE6_CODE]; int nbytes_rt6 = stream.read(headerCodeRt6, 0, LENGTH_RECORD_TYPE6_CODE); // to-do check against nbytes //printHexDump(headerCodeRt6, "RT6 header test"); ByteBuffer bb_header_code_rt6 = ByteBuffer.wrap(headerCodeRt6, 0, LENGTH_RECORD_TYPE6_CODE); if (isLittleEndian) { bb_header_code_rt6.order(ByteOrder.LITTLE_ENDIAN); } int intRT6test = bb_header_code_rt6.getInt(); dbgLog.fine("RT6: header test value=" + intRT6test); if (intRT6test != 6) { //if (stream.markSupported()){ //out.print("iteration="+safteyCounter); //dbgLog.fine("iteration="+safteyCounter); dbgLog.fine("intRT6test failed=" + intRT6test); stream.reset(); return; //} } // 6.1 check 4-byte integer that tells how many lines follow byte[] length_how_many_line_bytes = new byte[LENGTH_RT6_HOW_MANY_LINES]; int nbytes_rt6_1 = stream.read(length_how_many_line_bytes, 0, LENGTH_RT6_HOW_MANY_LINES); // to-do check against nbytes //printHexDump(length_how_many_line_bytes, "RT6 how_many_line_bytes"); ByteBuffer bb_how_many_lines = ByteBuffer.wrap(length_how_many_line_bytes, 0, LENGTH_RT6_HOW_MANY_LINES); if (isLittleEndian) { bb_how_many_lines.order(ByteOrder.LITTLE_ENDIAN); } int howManyLinesRt6 = bb_how_many_lines.getInt(); dbgLog.fine("how Many lines follow=" + howManyLinesRt6); // 6.2 read 80-char-long lines String[] documentRecord = new String[howManyLinesRt6]; for (int i = 0; i < howManyLinesRt6; i++) { byte[] line = new byte[80]; int nbytes_rt6_line = stream.read(line); documentRecord[i] = StringUtils.stripEnd( new String(Arrays.copyOfRange(line, 0, LENGTH_RT6_DOCUMENT_LINE), defaultCharSet), " "); dbgLog.fine(i + "-th line =" + documentRecord[i] + "<-"); } dbgLog.fine("documentRecord:\n" + StringUtils.join(documentRecord, "\n")); } catch (IOException ex) { //ex.printStackTrace(); throw ex; } dbgLog.fine("***** decodeRecordType6(): end *****"); }
From source file:edu.hawaii.soest.kilonalu.dvp2.DavisWxParser.java
/** * get the value from the timeOfSunrise field * * @return timeOfSunrise - the timeOfSunrise value as a String *//* w w w.j av a2 s . c o m*/ public String getTimeOfSunrise() { this.timeOfSunrise.flip(); short timeOfSunrise = this.timeOfSunrise.order(ByteOrder.LITTLE_ENDIAN).getShort(); double timeOfSunriseAsDouble = (double) timeOfSunrise; timeOfSunriseAsDouble = timeOfSunriseAsDouble / 100; String hour = ""; String minute = ""; String timeOfSunriseString = ""; // use the modulo to get the fraction and integer of the time double fraction = timeOfSunriseAsDouble % 1; double integral = timeOfSunriseAsDouble - fraction; fraction = fraction * 100; int integralInt = (new Double(integral)).intValue(); //convert the exponent to a minute string hour = String.format("%02d", (Object) integralInt); //convert the exponent to a minute string minute = String.format("%02d", (Object) Math.round(fraction)); timeOfSunriseString = hour + ":" + minute; return timeOfSunriseString; }
From source file:au.org.ala.layers.intersect.Grid.java
public void mergeMissingValues(Grid sourceOfMissingValues, boolean hideMissing) { float[] cells = sourceOfMissingValues.getGrid(); float[] actual = getGrid(); int length = actual.length; int i;//from w w w.ja v a 2 s . c o m RandomAccessFile afile = null; File f2 = new File(filename + ".GRI"); try { //read of random access file can throw an exception if (!f2.exists()) { afile = new RandomAccessFile(filename + ".gri", "rw"); } else { afile = new RandomAccessFile(filename + ".GRI", "rw"); } byte[] b = new byte[(int) afile.length()]; ByteBuffer bb = ByteBuffer.wrap(b); if (byteorderLSB) { bb.order(ByteOrder.LITTLE_ENDIAN); } afile.seek(0); if (datatype.equalsIgnoreCase("UBYTE")) { for (i = 0; i < length; i++) { if (hideMissing == Float.isNaN(cells[i])) { if (nodatavalue >= 128) { bb.put((byte) (nodatavalue - 256)); } else { bb.put((byte) nodatavalue); } } else { if (actual[i] >= 128) { bb.put((byte) (actual[i] - 256)); } else { bb.put((byte) actual[i]); } } } } else if (datatype.equalsIgnoreCase("BYTE")) { for (i = 0; i < length; i++) { bb.put((byte) actual[i]); } } else if (datatype.equalsIgnoreCase("SHORT")) { for (i = 0; i < length; i++) { if (hideMissing == Float.isNaN(cells[i])) { bb.putShort((short) nodatavalue); } else { bb.putShort((short) actual[i]); } } } else if (datatype.equalsIgnoreCase("INT")) { for (i = 0; i < length; i++) { if (hideMissing == Float.isNaN(cells[i])) { bb.putInt((int) nodatavalue); } else { bb.putInt((int) actual[i]); } } } else if (datatype.equalsIgnoreCase("LONG")) { for (i = 0; i < length; i++) { if (hideMissing == Float.isNaN(cells[i])) { bb.putLong((long) nodatavalue); } else { bb.putLong((long) actual[i]); } } } else if (datatype.equalsIgnoreCase("FLOAT")) { for (i = 0; i < length; i++) { if (hideMissing == Float.isNaN(cells[i])) { bb.putFloat((float) nodatavalue); } else { bb.putFloat(actual[i]); } } } else if (datatype.equalsIgnoreCase("DOUBLE")) { for (i = 0; i < length; i++) { if (hideMissing == Float.isNaN(cells[i])) { bb.putDouble((double) nodatavalue); } else { bb.putDouble((double) actual[i]); } } } else { // should not happen logger.error("unsupported grid data type: " + datatype); } afile.write(bb.array()); } catch (Exception e) { logger.error("error getting grid file values", e); } finally { if (afile != null) { try { afile.close(); } catch (Exception e) { logger.error(e.getMessage(), e); } } } }
From source file:edu.harvard.iq.dataverse.ingest.tabulardata.impl.plugins.dta.DTAFileReader.java
private void decodeData(BufferedInputStream stream) throws IOException { dbgLog.fine("\n***** decodeData(): start *****"); if (stream == null) { throw new IllegalArgumentException("stream == null!"); }// w w w . j a va 2s .co m //int nvar = (Integer)smd.getFileInformation().get("varQnty"); int nvar = dataTable.getVarQuantity().intValue(); //int nobs = (Integer)smd.getFileInformation().get("caseQnty"); int nobs = dataTable.getCaseQuantity().intValue(); if (dbgLog.isLoggable(Level.FINE)) { dbgLog.fine("data dimensions[observations x variables] = (" + nobs + "x" + nvar + ")"); } if (dbgLog.isLoggable(Level.FINE)) { dbgLog.fine("bytes per row=" + bytes_per_row + " bytes"); } if (dbgLog.isLoggable(Level.FINE)) { dbgLog.fine("variableTypes=" + Arrays.deepToString(variableTypes)); } if (dbgLog.isLoggable(Level.FINE)) { dbgLog.fine("StringLengthTable=" + StringLengthTable); } // create a File object to save the tab-delimited data file FileOutputStream fileOutTab = null; PrintWriter pwout = null; File tabDelimitedDataFile = File.createTempFile("tempTabfile.", ".tab"); // save the temp tab-delimited file in the return ingest object: ingesteddata.setTabDelimitedFile(tabDelimitedDataFile); fileOutTab = new FileOutputStream(tabDelimitedDataFile); pwout = new PrintWriter(new OutputStreamWriter(fileOutTab, "utf8"), true); /* Should we lose this dateFormat thing in 4.0? * the UNF should be calculatable on the app side solely from the data * stored in the tab file and the type information stored the dataVariable * object. * furthermore, the very idea of storing a format entry not just for * every variable, but for every value/observation is a bit strange. * TODO: review and confirm that, in the 3.* implementation, every * entry in dateFormat[nvar][*] is indeed the same - except for the * missing value entries. -- L.A. 4.0 (OK, I got rid of the dateFormat; instead I kinda sorta assume that the format is the same for every value in a column, save for the missing values... like this: dataTable.getDataVariables().get(columnCounter).setFormatSchemaName(ddt.format); BUT, this needs to be reviewed/confirmed etc! */ //String[][] dateFormat = new String[nvar][nobs]; for (int i = 0; i < nobs; i++) { byte[] dataRowBytes = new byte[bytes_per_row]; Object[] dataRow = new Object[nvar]; int nbytes = stream.read(dataRowBytes, 0, bytes_per_row); if (nbytes == 0) { String errorMessage = "reading data: no data were read at(" + i + "th row)"; throw new IOException(errorMessage); } // decoding each row int byte_offset = 0; for (int columnCounter = 0; columnCounter < variableTypes.length; columnCounter++) { Integer varType = variableTypeMap.get(variableTypes[columnCounter]); // 4.0 Check if this is a time/date variable: boolean isDateTimeDatum = false; String formatCategory = dataTable.getDataVariables().get(columnCounter).getFormatCategory(); if (formatCategory != null && (formatCategory.equals("time") || formatCategory.equals("date"))) { isDateTimeDatum = true; } String variableFormat = dateVariableFormats[columnCounter]; switch (varType != null ? varType : 256) { case -5: // Byte case // note: 1 byte signed byte byte_datum = dataRowBytes[byte_offset]; if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row " + columnCounter + "=th column byte =" + byte_datum); } if (byte_datum >= BYTE_MISSING_VALUE) { if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row " + columnCounter + "=th column byte MV=" + byte_datum); } dataRow[columnCounter] = MissingValueForTabDelimitedFile; } else { dataRow[columnCounter] = byte_datum; } byte_offset++; break; case -4: // Stata-int (=java's short: 2byte) case // note: 2-byte signed int, not java's int ByteBuffer int_buffer = ByteBuffer.wrap(dataRowBytes, byte_offset, 2); if (isLittleEndian) { int_buffer.order(ByteOrder.LITTLE_ENDIAN); } short short_datum = int_buffer.getShort(); if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row " + columnCounter + "=th column stata int =" + short_datum); } if (short_datum >= INT_MISSIG_VALUE) { if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row " + columnCounter + "=th column stata long missing value=" + short_datum); } dataRow[columnCounter] = MissingValueForTabDelimitedFile; } else { if (isDateTimeDatum) { DecodedDateTime ddt = decodeDateTimeData("short", variableFormat, Short.toString(short_datum)); if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row , decodedDateTime " + ddt.decodedDateTime + ", format=" + ddt.format); } dataRow[columnCounter] = ddt.decodedDateTime; //dateFormat[columnCounter][i] = ddt.format; dataTable.getDataVariables().get(columnCounter).setFormat(ddt.format); } else { dataRow[columnCounter] = short_datum; } } byte_offset += 2; break; case -3: // stata-Long (= java's int: 4 byte) case // note: 4-byte singed, not java's long //dbgLog.fine("DATreader: stata long"); ByteBuffer long_buffer = ByteBuffer.wrap(dataRowBytes, byte_offset, 4); if (isLittleEndian) { long_buffer.order(ByteOrder.LITTLE_ENDIAN); } int int_datum = long_buffer.getInt(); if (dbgLog.isLoggable(Level.FINE)) { //dbgLog.fine(i + "-th row " + columnCounter // + "=th column stata long =" + int_datum); } if (int_datum >= LONG_MISSING_VALUE) { if (dbgLog.isLoggable(Level.FINE)) { //dbgLog.fine(i + "-th row " + columnCounter // + "=th column stata long missing value=" + int_datum); } dataRow[columnCounter] = MissingValueForTabDelimitedFile; } else { if (isDateTimeDatum) { DecodedDateTime ddt = decodeDateTimeData("int", variableFormat, Integer.toString(int_datum)); if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row , decodedDateTime " + ddt.decodedDateTime + ", format=" + ddt.format); } dataRow[columnCounter] = ddt.decodedDateTime; dataTable.getDataVariables().get(columnCounter).setFormat(ddt.format); } else { dataRow[columnCounter] = int_datum; } } byte_offset += 4; break; case -2: // float case // note: 4-byte ByteBuffer float_buffer = ByteBuffer.wrap(dataRowBytes, byte_offset, 4); if (isLittleEndian) { float_buffer.order(ByteOrder.LITTLE_ENDIAN); } float float_datum = float_buffer.getFloat(); if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row " + columnCounter + "=th column float =" + float_datum); } if (FLOAT_MISSING_VALUE_SET.contains(float_datum)) { if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row " + columnCounter + "=th column float missing value=" + float_datum); } dataRow[columnCounter] = MissingValueForTabDelimitedFile; } else { if (isDateTimeDatum) { DecodedDateTime ddt = decodeDateTimeData("float", variableFormat, doubleNumberFormatter.format(float_datum)); if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row , decodedDateTime " + ddt.decodedDateTime + ", format=" + ddt.format); } dataRow[columnCounter] = ddt.decodedDateTime; dataTable.getDataVariables().get(columnCounter).setFormat(ddt.format); } else { dataRow[columnCounter] = float_datum; // This may be temporary - but for now (as in, while I'm testing // 4.0 ingest against 3.* ingest, I need to be able to tell if a // floating point value was a single, or double float in the // original STATA file: -- L.A. Jul. 2014 dataTable.getDataVariables().get(columnCounter).setFormat("float"); } } byte_offset += 4; break; case -1: // double case // note: 8-byte ByteBuffer double_buffer = ByteBuffer.wrap(dataRowBytes, byte_offset, 8); if (isLittleEndian) { double_buffer.order(ByteOrder.LITTLE_ENDIAN); } double double_datum = double_buffer.getDouble(); if (DOUBLE_MISSING_VALUE_SET.contains(double_datum)) { if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row " + columnCounter + "=th column double missing value=" + double_datum); } dataRow[columnCounter] = MissingValueForTabDelimitedFile; } else { if (isDateTimeDatum) { DecodedDateTime ddt = decodeDateTimeData("double", variableFormat, doubleNumberFormatter.format(double_datum)); if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row , decodedDateTime " + ddt.decodedDateTime + ", format=" + ddt.format); } dataRow[columnCounter] = ddt.decodedDateTime; dataTable.getDataVariables().get(columnCounter).setFormat(ddt.format); } else { dataRow[columnCounter] = doubleNumberFormatter.format(double_datum); } } byte_offset += 8; break; case 0: // String case int strVarLength = StringLengthTable.get(columnCounter); String raw_datum = new String( Arrays.copyOfRange(dataRowBytes, byte_offset, (byte_offset + strVarLength)), "ISO-8859-1"); // TODO: // is it the right thing to do, to default to "ISO-8859-1"? // (it may be; since there's no mechanism for specifying // alternative encodings in Stata, this may be their default; // it just needs to be verified. -- L.A. Jul. 2014) String string_datum = getNullStrippedString(raw_datum); if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row " + columnCounter + "=th column string =" + string_datum); } if (string_datum.isEmpty()) { if (dbgLog.isLoggable(Level.FINER)) { dbgLog.finer(i + "-th row " + columnCounter + "=th column string missing value=" + string_datum); } // TODO: /* Is this really a missing value case? * Or is it an honest empty string? * Is there such a thing as a missing value for a String in Stata? * -- L.A. 4.0 */ dataRow[columnCounter] = MissingValueForTabDelimitedFile; } else { /* * Some special characters, like new lines and tabs need to * be escaped - otherwise they will break our TAB file * structure! * But before we escape anything, all the back slashes * already in the string need to be escaped themselves. */ String escapedString = string_datum.replace("\\", "\\\\"); // escape quotes: escapedString = escapedString.replaceAll("\"", Matcher.quoteReplacement("\\\"")); // escape tabs and new lines: escapedString = escapedString.replaceAll("\t", Matcher.quoteReplacement("\\t")); escapedString = escapedString.replaceAll("\n", Matcher.quoteReplacement("\\n")); escapedString = escapedString.replaceAll("\r", Matcher.quoteReplacement("\\r")); // the escaped version of the string is stored in the tab file // enclosed in double-quotes; this is in order to be able // to differentiate between an empty string (tab-delimited empty string in // double quotes) and a missing value (tab-delimited empty string). // Although the question still remains - is it even possible // to store an empty string, that's not a missing value, in Stata? // - see the comment in the missing value case above. -- L.A. 4.0 dataRow[columnCounter] = "\"" + escapedString + "\""; } byte_offset += strVarLength; break; default: dbgLog.fine("unknown variable type found"); String errorMessage = "unknow variable Type found at data section"; throw new InvalidObjectException(errorMessage); } // switch } // for-columnCounter // Dump the row of data to the tab-delimited file we are producing: pwout.println(StringUtils.join(dataRow, "\t")); if (dbgLog.isLoggable(Level.FINE)) { //dbgLog.fine(i + "-th row's data={" + StringUtils.join(dataRow, ",") + "};"); } } // for- i (row) pwout.close(); if (dbgLog.isLoggable(Level.FINE)) { dbgLog.fine("variableTypes:\n" + Arrays.deepToString(variableTypes)); } dbgLog.fine("DTA Ingest: decodeData(): end."); }
From source file:edu.hawaii.soest.kilonalu.dvp2.DavisWxParser.java
/** * get the value from the timeOfSunset field * * @return timeOfSunset - the timeOfSunset value as a String *//*w ww . j a v a 2s . c om*/ public String getTimeOfSunset() { this.timeOfSunset.flip(); short timeOfSunset = this.timeOfSunset.order(ByteOrder.LITTLE_ENDIAN).getShort(); double timeOfSunsetAsDouble = (double) timeOfSunset; timeOfSunsetAsDouble = timeOfSunsetAsDouble / 100; String hour = ""; String minute = ""; String timeOfSunsetString = ""; // use the modulo to get the fraction and integer of the time double fraction = timeOfSunsetAsDouble % 1; double integral = timeOfSunsetAsDouble - fraction; fraction = fraction * 100; int integralInt = (new Double(integral)).intValue(); //convert the exponent to a minute string hour = String.format("%02d", (Object) integralInt); //convert the exponent to a minute string minute = String.format("%02d", (Object) Math.round(fraction)); timeOfSunsetString = hour + ":" + minute; return timeOfSunsetString; }