List of usage examples for java.io DataInputStream skip
public long skip(long n) throws IOException
n
bytes of data from the input stream. From source file:com.facebook.infrastructure.db.SuperColumn.java
public void skip(DataInputStream dis) throws IOException { defreezeSuperColumn(dis);/*from w w w. j av a2 s . c o m*/ /* read the number of columns stored */ dis.readInt(); /* read the size of all columns to skip */ int size = dis.readInt(); dis.skip(size); }
From source file:com.facebook.infrastructure.db.Column.java
/** * Here we need to get the column and apply the filter. *//*from ww w . j a va 2 s . c o m*/ public IColumn deserialize(DataInputStream dis, IFilter filter) throws IOException { if (dis.available() == 0) return null; String name = dis.readUTF(); IColumn column = new Column(name); column = filter.filter(column, dis); if (column != null) { column = defreeze(dis, name); } else { /* Skip a boolean and the timestamp */ dis.skip(DBConstants.boolSize_ + DBConstants.tsSize_); int size = dis.readInt(); dis.skip(size); } return column; }
From source file:org.apache.cassandra.db.SuperColumn.java
public IColumn deserialize(DataInputStream dis, IFilter filter) throws IOException { if (dis.available() == 0) return null; IColumn superColumn = defreezeSuperColumn(dis); superColumn = filter.filter(superColumn, dis); if (superColumn != null) { fillSuperColumn(superColumn, dis); return superColumn; } else {/*from w ww . java 2 s .co m*/ /* read the number of columns stored */ dis.readInt(); /* read the size of all columns to skip */ int size = dis.readInt(); dis.skip(size); return null; } }
From source file:org.kse.crypto.filetype.CryptoFileUtil.java
/** * Detect the KeyStore type contained in the supplied file. * * @param is/*from w ww.j av a 2s .com*/ * Input stream to detect type for * @return KeyStore type or null if none matched * @throws IOException * If an I/O problem occurred */ public static KeyStoreType detectKeyStoreType(InputStream is) throws IOException { byte[] contents = ReadUtil.readFully(is); DataInputStream dis = null; try { dis = new DataInputStream(new ByteArrayInputStream(contents)); // If less than 4 bytes are available it isn't a KeyStore if (dis.available() < 4) { return null; } // Read first integer (4 bytes) int i1 = dis.readInt(); // Test for JKS - starts with appropriate magic number if (i1 == JKS_MAGIC_NUMBER) { return JKS; } // Test for JCEKS - starts with appropriate magic number if (i1 == JCEKS_MAGIC_NUMBER) { return JCEKS; } // Test for BKS and UBER // Both start with a version number of 0, 1 or 2 if ((i1 == 0) || (i1 == 1) || (i1 == 2)) { /* * For BKS and UBER the last 20 bytes of the file are the SHA-1 * Hash while the byte before that is a ASN1Null (0) indicating * the end of the store. UBER, however, encrypts the store * content making it highly unlikely that the ASN1Null end byte * will be preserved. Therefore if the 21st byte from the end of * the file is a ASN1Null then the KeyStore is BKS */ if (contents.length < 26) { // Insufficient bytes to be BKS or UBER return null; } // Skip to 21st from last byte (file length minus 21 and the 4 bytes already read) dis.skip(contents.length - 25); // Read what may be the null byte if (dis.readByte() == 0) { // Found null byte - BKS/BKS-V1 if (i1 == 1) { return BKS_V1; } else { return BKS; } } else { // No null byte - UBER return UBER; } } } finally { IOUtils.closeQuietly(dis); } // @formatter:off /* * Test for PKCS #12. ASN.1 should look like this: * * PFX ::= ASN1Sequence { version ASN1Integer {v3(3)}(v3,...), authSafe * ContentInfo, macData MacData OPTIONAL */ // @formatter:on ASN1Primitive pfx = null; try { pfx = ASN1Primitive.fromByteArray(contents); } catch (IOException e) { // if it cannot be parsed as ASN1, it is certainly not a pfx key store return null; } // Is a sequence... if ((pfx != null) && (pfx instanceof ASN1Sequence)) { // Has two or three components... ASN1Sequence sequence = (ASN1Sequence) pfx; if ((sequence.size() == 2) || (sequence.size() == 3)) { // ...the first of which is a version of 3 ASN1Encodable firstComponent = sequence.getObjectAt(0); if (firstComponent instanceof ASN1Integer) { ASN1Integer version = (ASN1Integer) firstComponent; if (version.getValue().intValue() == 3) { return PKCS12; } } } } // KeyStore type not recognised return null; }
From source file:com.facebook.infrastructure.db.SuperColumn.java
public IColumn deserialize(DataInputStream dis, IFilter filter) throws IOException { if (dis.available() == 0) return null; IColumn superColumn = defreezeSuperColumn(dis); superColumn = filter.filter(superColumn, dis); if (superColumn != null) { if (!superColumn.isMarkedForDelete()) fillSuperColumn(superColumn, dis); return superColumn; } else {//from www .ja v a 2 s. c o m /* read the number of columns stored */ dis.readInt(); /* read the size of all columns to skip */ int size = dis.readInt(); dis.skip(size); return null; } }
From source file:net.sf.keystore_explorer.crypto.filetype.CryptoFileUtil.java
/** * Detect the KeyStore type contained in the supplied file. * * @param is//from w ww .j a v a2 s . co m * Input stream to detect type for * @return KeyStore type or null if none matched * @throws IOException * If an I/O problem occurred */ public static KeyStoreType detectKeyStoreType(InputStream is) throws IOException { byte[] contents = ReadUtil.readFully(is); DataInputStream dis = null; try { dis = new DataInputStream(new ByteArrayInputStream(contents)); // If less than 4 bytes are available it isn't a KeyStore if (dis.available() < 4) { return null; } // Read first integer (4 bytes) int i1 = dis.readInt(); // Test for JKS - starts with appropriate magic number if (i1 == JKS_MAGIC_NUMBER) { return JKS; } if (i1 == HTKS_MAGIC_NUMBER) { return HTKS; } // Test for JCEKS - starts with appropriate magic number if (i1 == JCEKS_MAGIC_NUMBER) { return JCEKS; } // Test for BKS and UBER // Both start with a version number of 0, 1 or 2 if ((i1 == 0) || (i1 == 1) || (i1 == 2)) { /* * For BKS and UBER the last 20 bytes of the file are the SHA-1 * Hash while the byte before that is a ASN1Null (0) indicating * the end of the store. UBER, however, encrypts the store * content making it highly unlikely that the ASN1Null end byte * will be preserved. Therefore if the 21st byte from the end of * the file is a ASN1Null then the KeyStore is BKS */ if (contents.length < 26) { // Insufficient bytes to be BKS or UBER return null; } // Skip to 21st from last byte (file length minus 21 and the 4 bytes already read) dis.skip(contents.length - 25); // Read what may be the null byte if (dis.readByte() == 0) { // Found null byte - BKS/BKS-V1 if (i1 == 1) { return BKS_V1; } else { return BKS; } } else { // No null byte - UBER return UBER; } } } finally { IOUtils.closeQuietly(dis); } // @formatter:off /* * Test for PKCS #12. ASN.1 should look like this: * * PFX ::= ASN1Sequence { version ASN1Integer {v3(3)}(v3,...), authSafe * ContentInfo, macData MacData OPTIONAL */ // @formatter:on ASN1Primitive pfx = null; try { pfx = ASN1Primitive.fromByteArray(contents); } catch (IOException e) { // if it cannot be parsed as ASN1, it is certainly not a pfx key store return null; } // Is a sequence... if ((pfx != null) && (pfx instanceof ASN1Sequence)) { // Has two or three components... ASN1Sequence sequence = (ASN1Sequence) pfx; if ((sequence.size() == 2) || (sequence.size() == 3)) { // ...the first of which is a version of 3 ASN1Encodable firstComponent = sequence.getObjectAt(0); if (firstComponent instanceof ASN1Integer) { ASN1Integer version = (ASN1Integer) firstComponent; if (version.getValue().intValue() == 3) { return PKCS12; } } } } // KeyStore type not recognised return null; }
From source file:com.facebook.infrastructure.db.Column.java
/** * We know the name of the column here so just return it. * Filter is pretty much useless in this call and is ignored. *///from w w w . ja v a 2 s. c o m public IColumn deserialize(DataInputStream dis, String columnName, IFilter filter) throws IOException { if (dis.available() == 0) return null; IColumn column = null; String name = dis.readUTF(); if (name.equals(columnName)) { column = defreeze(dis, name); if (filter instanceof IdentityFilter) { /* * If this is being called with identity filter * since a column name is passed in we know * that this is a final call * Hence if the column is found set the filter to done * so that we do not look for the column in further files */ IdentityFilter f = (IdentityFilter) filter; f.setDone(); } } else { /* Skip a boolean and the timestamp */ dis.skip(DBConstants.boolSize_ + DBConstants.tsSize_); int size = dis.readInt(); dis.skip(size); } return column; }
From source file:org.carbondata.processing.util.LevelSortIndexWriterThread.java
private MemberSortModel[] getLevelData() throws IOException { DataInputStream fileChannel = null; long currPositionIndex = 0; long size = 0; ByteBuffer buffer = null;/*from w w w . ja va2 s . c om*/ // CHECKSTYLE:OFF boolean enableEncoding = Boolean .valueOf(CarbonProperties.getInstance().getProperty(CarbonCommonConstants.ENABLE_BASE64_ENCODING, CarbonCommonConstants.ENABLE_BASE64_ENCODING_DEFAULT)); // CHECKSTYLE:ON try { fileChannel = FileFactory.getDataInputStream(levelFilePath, FileFactory.getFileType(levelFilePath)); CarbonFile memberFile = FileFactory.getCarbonFile(levelFilePath, FileFactory.getFileType(levelFilePath)); size = memberFile.getSize() - 4; long skipSize = size; long actualSkipSize = 0; while (actualSkipSize != size) { actualSkipSize += fileChannel.skip(skipSize); skipSize = skipSize - actualSkipSize; } maxSurrogate = fileChannel.readInt(); } catch (IOException e) { LOGGER.error(e, "problem while reading the level file"); throw e; } finally { CarbonUtil.closeStreams(fileChannel); } try { fileChannel = FileFactory.getDataInputStream(levelFilePath, FileFactory.getFileType(levelFilePath)); // CHECKSTYLE:OFF buffer = ByteBuffer.allocate((int) size); // CHECKSTYLE:ON fileChannel.readFully(buffer.array()); buffer.rewind(); } catch (IOException e) { LOGGER.error(e, "problem while reading the level file"); throw e; } finally { CarbonUtil.closeStreams(fileChannel); } minSurrogate = buffer.getInt(); MemberSortModel[] surogateKeyArrays = new MemberSortModel[maxSurrogate - minSurrogate + 1]; int surrogateKeyIndex = minSurrogate; currPositionIndex += 4; int current = 0; while (currPositionIndex < size) { int len = buffer.getInt(); // CHECKSTYLE:OFF // CHECKSTYLE:ON currPositionIndex += 4; byte[] rowBytes = new byte[len]; buffer.get(rowBytes); currPositionIndex += len; String memberName = null;// CHECKSTYLE:OFF if (!memberDataType.equals(DataType.STRING)) { if (enableEncoding) { memberName = new String(Base64.decodeBase64(rowBytes), Charset.defaultCharset()); } else { memberName = new String(rowBytes, Charset.defaultCharset()); } surogateKeyArrays[current] = new MemberSortModel(surrogateKeyIndex, memberName, null, memberDataType); } else { if (enableEncoding) { rowBytes = Base64.decodeBase64(rowBytes); } surogateKeyArrays[current] = new MemberSortModel(surrogateKeyIndex, null, rowBytes, memberDataType); } surrogateKeyIndex++; current++; } return surogateKeyArrays; }
From source file:org.apache.giraph.graph.BspServiceWorker.java
@Override public void loadCheckpoint(long superstep) { // Algorithm: // Examine all the partition owners and load the ones // that match my hostname and id from the master designated checkpoint // prefixes./*from w w w .j av a2s. c o m*/ long startPos = 0; int loadedPartitions = 0; for (PartitionOwner partitionOwner : workerGraphPartitioner.getPartitionOwners()) { if (partitionOwner.getWorkerInfo().equals(getWorkerInfo())) { String metadataFile = partitionOwner.getCheckpointFilesPrefix() + CHECKPOINT_METADATA_POSTFIX; String partitionsFile = partitionOwner.getCheckpointFilesPrefix() + CHECKPOINT_VERTICES_POSTFIX; try { int partitionId = -1; DataInputStream metadataStream = getFs().open(new Path(metadataFile)); int partitions = metadataStream.readInt(); for (int i = 0; i < partitions; ++i) { startPos = metadataStream.readLong(); partitionId = metadataStream.readInt(); if (partitionId == partitionOwner.getPartitionId()) { break; } } if (partitionId != partitionOwner.getPartitionId()) { throw new IllegalStateException("loadCheckpoint: " + partitionOwner + " not found!"); } metadataStream.close(); Partition<I, V, E, M> partition = new Partition<I, V, E, M>(getConfiguration(), partitionId); DataInputStream partitionsStream = getFs().open(new Path(partitionsFile)); if (partitionsStream.skip(startPos) != startPos) { throw new IllegalStateException( "loadCheckpoint: Failed to skip " + startPos + " on " + partitionsFile); } partition.readFields(partitionsStream); partitionsStream.close(); if (LOG.isInfoEnabled()) { LOG.info("loadCheckpoint: Loaded partition " + partition); } if (getPartitionMap().put(partitionId, partition) != null) { throw new IllegalStateException( "loadCheckpoint: Already has partition owner " + partitionOwner); } ++loadedPartitions; } catch (IOException e) { throw new RuntimeException("loadCheckpoing: Failed to get partition owner " + partitionOwner, e); } } } if (LOG.isInfoEnabled()) { LOG.info("loadCheckpoint: Loaded " + loadedPartitions + " partitions of out " + workerGraphPartitioner.getPartitionOwners().size() + " total."); } // Communication service needs to setup the connections prior to // processing vertices commService.setup(); }
From source file:org.lemurproject.galago.core.parse.WARCRecord.java
private static byte[] readNextRecord(DataInputStream in, StringBuffer headerBuffer) throws IOException { if (in == null) { return null; }//from www.j ava2 s . com if (headerBuffer == null) { return null; } String line = null; // boolean foundMark = false; byte[] retContent = null; boolean foundMark = findNextWARCRecord(in); // // cannot be using a buffered reader here!!!! // // just read the header // // first - find our WARC header // while ((!foundMark) && ((line = readLineFromInputStream(in)) != null)) { // if (line.startsWith(WARC_VERSION)) { // WARC_VERSION_LINE = line; // foundMark = true; // } // } // no WARC mark? if (!foundMark) { return null; } // LOG.info("Found WARC_VERSION"); int contentLength = -1; // read until we see contentLength then an empty line // (to handle malformed ClueWeb09 headers that have blank lines) // get the content length and set our retContent for (line = readLineFromInputStream(in).trim(); line.length() > 0 || contentLength < 0; line = readLineFromInputStream(in).trim()) { if (line.length() > 0) { headerBuffer.append(line); headerBuffer.append(LINE_ENDING); // find the content length designated by Content-Length: <length> String[] parts = line.split(":", 2); if (parts.length == 2 && parts[0].equals("Content-Length")) { try { contentLength = Integer.parseInt(parts[1].trim()); // LOG.info("WARC record content length: " + contentLength); // if this document is too long if (contentLength > MAX_CONTENT_LENGTH) { in.skip(contentLength); if (!findNextWARCRecord(in)) { return null; } headerBuffer.delete(0, headerBuffer.length()); } } catch (NumberFormatException nfEx) { contentLength = -1; } } } } // now read the bytes of the content retContent = new byte[contentLength]; int totalWant = contentLength; int totalRead = 0; // // LOOP TO REMOVE LEADING CR * LF // To prevent last few characters from being cut off of the content // when reading // while ((totalRead == 0) && (totalRead < contentLength)) { byte CR = in.readByte(); byte LF = in.readByte(); if ((CR != 13) && (LF != 10)) { retContent[0] = CR; retContent[1] = LF; totalRead = 2; totalWant = contentLength - totalRead; } } // // // while (totalRead < contentLength) { try { int numRead = in.read(retContent, totalRead, totalWant); if (numRead < 0) { return null; } else { totalRead += numRead; totalWant = contentLength - totalRead; } // end if (numRead < 0) / else } catch (EOFException eofEx) { // resize to what we have if (totalRead > 0) { byte[] newReturn = new byte[totalRead]; System.arraycopy(retContent, 0, newReturn, 0, totalRead); return newReturn; } else { return null; } } // end try/catch (EOFException) } // end while (totalRead < contentLength) return retContent; }