List of usage examples for java.io DataInputStream mark
public synchronized void mark(int readlimit)
From source file:cn.xiongyihui.wificar.MjpegStream.java
public Bitmap readFrame(DataInputStream in) throws IOException { int mContentLength = -1; in.mark(FRAME_MAX_LENGTH); int headerLen = getStartOfSequence(in, SOI_MARKER); in.reset();/* www . j a va2s. c om*/ byte[] header = new byte[headerLen]; in.readFully(header); try { mContentLength = parseContentLength(header); } catch (NumberFormatException nfe) { mContentLength = getEndOfSeqeunce(in, EOF_MARKER); } in.reset(); byte[] frameData = new byte[mContentLength]; in.skipBytes(headerLen); in.readFully(frameData); return BitmapFactory.decodeStream(new ByteArrayInputStream(frameData)); }
From source file:org.apache.axiom.util.blob.WritableBlobTestBase.java
public void testMarkReset() throws IOException { byte[] sourceData1 = new byte[2000]; byte[] sourceData2 = new byte[2000]; random.nextBytes(sourceData1);// ww w . j ava 2 s . c o m random.nextBytes(sourceData2); WritableBlob blob = createBlob(); try { OutputStream out = blob.getOutputStream(); out.write(sourceData1); out.write(sourceData2); out.close(); DataInputStream in = new DataInputStream(blob.getInputStream()); byte[] data1 = new byte[sourceData1.length]; byte[] data2 = new byte[sourceData2.length]; in.readFully(data1); in.mark(sourceData2.length); in.readFully(data2); in.reset(); in.readFully(data2); assertTrue(Arrays.equals(sourceData1, data1)); assertTrue(Arrays.equals(sourceData2, data2)); } finally { releaseBlob(blob); } }
From source file:org.apache.fop.fonts.type1.PFBParser.java
/** * Parses a PFB file into a PFBData object. * @param in InputStream to load the PFB file from * @return PFBData memory representation of the font * @throws IOException In case of an I/O problem *//*from w ww. ja v a2 s . c o m*/ public PFBData parsePFB(InputStream in) throws IOException { PFBData pfb = new PFBData(); BufferedInputStream bin = new BufferedInputStream(in); DataInputStream din = new DataInputStream(bin); din.mark(32); int firstByte = din.readUnsignedByte(); din.reset(); if (firstByte == 128) { pfb.setPFBFormat(PFBData.PFB_PC); parsePCFormat(pfb, din); } else { pfb.setPFBFormat(PFBData.PFB_RAW); parseRAWFormat(pfb, bin); } return pfb; }
From source file:org.apache.hadoop.hbase.HRegionInfo.java
/** * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was * serialized to the stream with {@link #toDelimitedByteArray()} * @param in/* ww w.j av a 2s . c o m*/ * @return An instance of HRegionInfo. * @throws IOException */ public static HRegionInfo parseFrom(final DataInputStream in) throws IOException { // I need to be able to move back in the stream if this is not a pb serialization so I can // do the Writable decoding instead. int pblen = ProtobufUtil.lengthOfPBMagic(); byte[] pbuf = new byte[pblen]; if (in.markSupported()) { //read it with mark() in.mark(pblen); } int read = in.read(pbuf); //assumption: if Writable serialization, it should be longer than pblen. if (read != pblen) throw new IOException("read=" + read + ", wanted=" + pblen); if (ProtobufUtil.isPBMagicPrefix(pbuf)) { return convert(HBaseProtos.RegionInfo.parseDelimitedFrom(in)); } else { // Presume Writables. Need to reset the stream since it didn't start w/ pb. if (in.markSupported()) { in.reset(); HRegionInfo hri = new HRegionInfo(); hri.readFields(in); return hri; } else { //we cannot use BufferedInputStream, it consumes more than we read from the underlying IS ByteArrayInputStream bais = new ByteArrayInputStream(pbuf); SequenceInputStream sis = new SequenceInputStream(bais, in); //concatenate input streams HRegionInfo hri = new HRegionInfo(); hri.readFields(new DataInputStream(sis)); return hri; } } }
From source file:org.apache.hadoop.hdfs.qjournal.client.URLLogInputStream.java
/** * Read the header of fsedit log/*ww w .j av a2 s. c o m*/ * @param in fsedit stream * @return the edit log version number * @throws IOException if error occurs */ static int readLogVersion(DataInputStream in) throws IOException, LogHeaderCorruptException { int logVersion = 0; // Read log file version. Could be missing. in.mark(4); // If edits log is greater than 2G, available method will return negative // numbers, so we avoid having to call available boolean available = true; try { logVersion = in.readByte(); } catch (EOFException e) { available = false; } if (available) { in.reset(); logVersion = in.readInt(); if (logVersion < FSConstants.LAYOUT_VERSION) { // future version throw new LogHeaderCorruptException("Unexpected version of the file system log file: " + logVersion + ". Current version = " + FSConstants.LAYOUT_VERSION + "."); } } return logVersion; }
From source file:org.apache.hadoop.hdfs.server.namenode.bookkeeper.BookKeeperEditLogInputStream.java
/** * Safely reads the log version from the stream. Logic is exactly the same * as in the equivalent {@link EditLogFileInputStream} method. * @see EditLogFileInputStream#readLogVersion(DataInputStream) * @return The log version or 0 if stream is empty *///from w w w. j ava 2 s. c o m private static int readLogVersion(DataInputStream in) throws IOException { int logVersion = 0; in.mark(4); // See comments in EditLogFileInputStream as to why readLogVersion is // implemented in this way boolean available = true; try { logVersion = in.readByte(); } catch (EOFException e) { available = false; } if (available) { in.reset(); logVersion = in.readInt(); if (logVersion < FSConstants.LAYOUT_VERSION) { throw new LedgerHeaderCorruptException("Unexpected version of the log segment in the ledger: " + logVersion + ". Current version is " + FSConstants.LAYOUT_VERSION + "."); } } return logVersion; }
From source file:org.apache.hadoop.hdfs.server.namenode.IngestLocal.java
/** * Load an edit log, and continue applying the changes to the in-memory * structure. This is where we ingest transactions into the standby. *//*from w w w. j av a 2s . c om*/ private int loadFSEdits(File edits) throws IOException { FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem(); FSDirectory fsDir = fsNamesys.dir; int numEdits = 0; int logVersion = 0; String clientName = null; String clientMachine = null; String path = null; int numOpAdd = 0, numOpClose = 0, numOpDelete = 0, numOpRename = 0, numOpSetRepl = 0, numOpMkDir = 0, numOpSetPerm = 0, numOpSetOwner = 0, numOpSetGenStamp = 0, numOpTimes = 0, numOpOther = 0; long startTime = FSNamesystem.now(); LOG.info("Ingest: Consuming transactions from file " + edits + " of size " + edits.length()); rp = new RandomAccessFile(edits, "r"); fp = new FileInputStream(rp.getFD()); // open for reads fc = rp.getChannel(); DataInputStream in = new DataInputStream(fp); try { // Read log file version. Could be missing. in.mark(4); // If edits log is greater than 2G, available method will return negative // numbers, so we avoid having to call available boolean available = true; try { logVersion = in.readByte(); } catch (EOFException e) { available = false; } if (available) { fc.position(0); // reset in = new DataInputStream(fp); logVersion = in.readInt(); if (logVersion != FSConstants.LAYOUT_VERSION) // future version throw new IOException("Ingest: Unexpected version of the file system log file: " + logVersion + ". Current version = " + FSConstants.LAYOUT_VERSION + "."); } assert logVersion <= Storage.LAST_UPGRADABLE_LAYOUT_VERSION : "Unsupported version " + logVersion; currentPosition = fc.position(); numEdits = ingestFSEdits(edits, in, logVersion); // continue to ingest } finally { LOG.info("Ingest: Closing transactions file " + edits); fp.close(); } LOG.info("Ingest: Edits file " + edits.getName() + " of size " + edits.length() + " edits # " + numEdits + " loaded in " + (FSNamesystem.now() - startTime) / 1000 + " seconds."); if (LOG.isDebugEnabled()) { LOG.debug("Ingest: numOpAdd = " + numOpAdd + " numOpClose = " + numOpClose + " numOpDelete = " + numOpDelete + " numOpRename = " + numOpRename + " numOpSetRepl = " + numOpSetRepl + " numOpMkDir = " + numOpMkDir + " numOpSetPerm = " + numOpSetPerm + " numOpSetOwner = " + numOpSetOwner + " numOpSetGenStamp = " + numOpSetGenStamp + " numOpTimes = " + numOpTimes + " numOpOther = " + numOpOther); } if (logVersion != FSConstants.LAYOUT_VERSION) // other version numEdits++; // save this image asap return numEdits; }
From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer.java
/** * Check an fsimage datainputstream's version number. * * The datainput stream is returned at the same point as it was passed in; * this method has no effect on the datainputstream's read pointer. * * @param in Datainputstream of fsimage// w w w .j a va 2 s . co m * @return Filesystem layout version of fsimage represented by stream * @throws IOException If problem reading from in */ private int findImageVersion(DataInputStream in) throws IOException { in.mark(42); // arbitrary amount, resetting immediately int version = in.readInt(); in.reset(); return version; }
From source file:org.apache.synapse.util.TemporaryDataTest.java
public void testMarkReset() throws IOException { byte[] sourceData1 = new byte[2000]; byte[] sourceData2 = new byte[2000]; random.nextBytes(sourceData1);// w w w .ja v a 2 s . c om random.nextBytes(sourceData2); TemporaryData tmp = new TemporaryData(16, 512, "test", ".dat"); OutputStream out = tmp.getOutputStream(); out.write(sourceData1); out.write(sourceData2); out.close(); DataInputStream in = new DataInputStream(tmp.getInputStream()); byte[] data1 = new byte[sourceData1.length]; byte[] data2 = new byte[sourceData2.length]; in.readFully(data1); in.mark(sourceData2.length); in.readFully(data2); in.reset(); in.readFully(data2); assertTrue(Arrays.equals(sourceData1, data1)); assertTrue(Arrays.equals(sourceData2, data2)); }
From source file:org.apache.xmlgraphics.image.codec.png.PNGChunk.java
/** * Returns the PNG chunk type, a four letter case sensitive ASCII type/name. * @param distream the input stream// w ww . j a v a2 s .c om * @return a four letter case sensitive ASCII type/name */ public static String getChunkType(DataInputStream distream) { try { distream.mark(8); /* int length = */distream.readInt(); int type = distream.readInt(); distream.reset(); return typeIntToString(type); } catch (Exception e) { e.printStackTrace(); return null; } }