List of usage examples for java.io RandomAccessFile readFully
public final void readFully(byte b[], int off, int len) throws IOException
From source file:Main.java
public static void main(String[] args) { try {//from www .j a va2 s .c om String s = "Hello world from java2s.com"; RandomAccessFile raf = new RandomAccessFile("c:/test.txt", "rw"); // write something in the file raf.writeUTF(s); // set the file pointer at 0 position raf.seek(0); // create an array equal to the length of raf byte[] arr = new byte[10]; // read the file raf.readFully(arr, 3, 7); // create a new string based on arr String s2 = new String(arr); // print it System.out.println(s2); raf.close(); } catch (IOException ex) { ex.printStackTrace(); } }
From source file:Main.java
public static void main(String args[]) throws Exception { RandomAccessFile fh1 = new RandomAccessFile("a.txt", "r"); RandomAccessFile fh2 = new RandomAccessFile("b.txt", "r"); long filesize1 = fh1.length(); long filesize2 = fh2.length(); // allocate two buffers large enough to hold entire files int bufsize = (int) Math.min(filesize1, filesize2); byte[] buffer1 = new byte[bufsize]; byte[] buffer2 = new byte[bufsize]; fh1.readFully(buffer1, 0, bufsize); fh2.readFully(buffer2, 0, bufsize);// w w w.j a v a 2 s .co m for (int i = 0; i < bufsize; i++) { if (buffer1[i] != buffer2[i]) { System.out.println("Files differ at offset " + i); break; } } fh1.close(); fh2.close(); }
From source file:Diff.java
public static void main(String args[]) { RandomAccessFile fh1 = null; RandomAccessFile fh2 = null;//from w w w. j a va2 s . c om int bufsize; // size of smallest file long filesize1 = -1; long filesize2 = -1; byte buffer1[]; // the two file caches byte buffer2[]; // check what you get as command-line arguments if (args.length == 0 || args[0].equals("?")) { System.err.println("USAGE: java Diff <file1> <file2> | ?"); System.exit(0); } // open file ONE for reading try { fh1 = new RandomAccessFile(args[0], "r"); filesize1 = fh1.length(); } catch (IOException ioErr) { System.err.println("Could not find " + args[0]); System.err.println(ioErr); System.exit(100); } // open file TWO for reading try { fh2 = new RandomAccessFile(args[1], "r"); filesize2 = fh2.length(); } catch (IOException ioErr) { System.err.println("Could not find " + args[1]); System.err.println(ioErr); System.exit(100); } if (filesize1 != filesize2) { System.out.println("Files differ in size !"); System.out.println("'" + args[0] + "' is " + filesize1 + " bytes"); System.out.println("'" + args[1] + "' is " + filesize2 + " bytes"); } // allocate two buffers large enough to hold entire files bufsize = (int) Math.min(filesize1, filesize2); buffer1 = new byte[bufsize]; buffer2 = new byte[bufsize]; try { fh1.readFully(buffer1, 0, bufsize); fh2.readFully(buffer2, 0, bufsize); for (int i = 0; i < bufsize; i++) { if (buffer1[i] != buffer2[i]) { System.out.println("Files differ at offset " + i); break; } } } catch (IOException ioErr) { System.err.println("ERROR: An exception occurred while processing the files"); System.err.println(ioErr); } finally { try { fh1.close(); fh2.close(); } catch (IOException ignored) { } } }
From source file:org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader.java
/** * Read the header at the beginning of the given block meta file. * The current file position will be altered by this method. * If an error occurs, the file is <em>not</em> closed. *///from w w w . ja v a 2s.c o m public static BlockMetadataHeader readHeader(RandomAccessFile raf) throws IOException { byte[] buf = new byte[getHeaderSize()]; raf.seek(0); raf.readFully(buf, 0, buf.length); return readHeader(new DataInputStream(new ByteArrayInputStream(buf))); }
From source file:org.apache.sling.commons.log.logback.internal.Tailer.java
/** * Returns the starting position of UNIX "tail -n". *//* w w w . j a v a2 s .c o m*/ private long getTailStartPos(RandomAccessFile file, int n) throws IOException { int newlineCount = 0; long length = file.length(); long pos = length - BUFFER_SIZE; int buffLength = BUFFER_SIZE; if (pos < 0) { pos = 0; buffLength = (int) length; } while (true) { file.seek(pos); file.readFully(buffer, 0, buffLength); for (int i = buffLength - 1; i >= 0; i--) { if ((char) buffer[i] == '\n') { newlineCount++; if (newlineCount >= n) { pos += (i + 1); return pos; } } } if (pos == 0) { break; } if (pos - BUFFER_SIZE < 0) { buffLength = (int) pos; pos = 0; } else { pos -= BUFFER_SIZE; } } return pos; }
From source file:org.openengsb.connector.promreport.internal.ProcessFileStore.java
private long findLastProcess(RandomAccessFile raf) throws IOException { final String proc = "</Process>"; final byte[] bproc = proc.getBytes(); final int len = proc.length(); for (long i = raf.length() - "</Process></WorkflowLog>".length(); i >= 0; i--) { byte[] buf = new byte[len]; raf.seek(i);//from w ww . j av a2 s. c o m raf.readFully(buf, 0, len); int b; for (b = 0; b < len; b++) { if (buf[b] != bproc[b]) { break; } } if (b == len) { return i; } } return -1; }
From source file:com.limegroup.gnutella.metadata.MP3DataEditor.java
/** * Actually writes the ID3 tags out to the ID3V1 section of mp3 file. */// ww w. jav a 2s . com private int writeID3V1DataToDisk(RandomAccessFile file) { byte[] buffer = new byte[30];//max buffer length...drop/pickup vehicle //see if there are ID3 Tags in the file String tag = ""; try { file.readFully(buffer, 0, 3); tag = new String(buffer, 0, 3); } catch (EOFException e) { return LimeXMLReplyCollection.RW_ERROR; } catch (IOException e) { return LimeXMLReplyCollection.RW_ERROR; } //We are sure this is an MP3 file.Otherwise this method would never //be called. if (!tag.equals("TAG")) { //Write the TAG try { byte[] tagBytes = "TAG".getBytes();//has to be len 3 file.seek(file.length() - 128);//reset the file-pointer file.write(tagBytes, 0, 3);//write these three bytes into the File } catch (IOException ioe) { return LimeXMLReplyCollection.BAD_ID3; } } LOG.debug("about to start writing to file"); boolean b; b = toFile(title_, 30, file, buffer); if (!b) return LimeXMLReplyCollection.FAILED_TITLE; b = toFile(artist_, 30, file, buffer); if (!b) return LimeXMLReplyCollection.FAILED_ARTIST; b = toFile(album_, 30, file, buffer); if (!b) return LimeXMLReplyCollection.FAILED_ALBUM; b = toFile(year_, 4, file, buffer); if (!b) return LimeXMLReplyCollection.FAILED_YEAR; //comment and track (a little bit tricky) b = toFile(comment_, 28, file, buffer);//28 bytes for comment if (!b) return LimeXMLReplyCollection.FAILED_COMMENT; byte trackByte = (byte) -1;//initialize try { if (track_ == null || track_.equals("")) trackByte = (byte) 0; else trackByte = Byte.parseByte(track_); } catch (NumberFormatException nfe) { return LimeXMLReplyCollection.FAILED_TRACK; } try { file.write(0);//separator b/w comment and track(track is optional) file.write(trackByte); } catch (IOException e) { return LimeXMLReplyCollection.FAILED_TRACK; } //genre byte genreByte = getGenreByte(); try { file.write(genreByte); } catch (IOException e) { return LimeXMLReplyCollection.FAILED_GENRE; } //come this far means we are OK. return LimeXMLReplyCollection.NORMAL; }
From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.java
static private void truncateBlock(File blockFile, File metaFile, long oldlen, long newlen) throws IOException { LOG.info("truncateBlock: blockFile=" + blockFile + ", metaFile=" + metaFile + ", oldlen=" + oldlen + ", newlen=" + newlen); if (newlen == oldlen) { return;/*from w w w. ja v a 2 s .c o m*/ } if (newlen > oldlen) { throw new IOException( "Cannot truncate block to from oldlen (=" + oldlen + ") to newlen (=" + newlen + ")"); } DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); int checksumsize = dcs.getChecksumSize(); int bpc = dcs.getBytesPerChecksum(); long n = (newlen - 1) / bpc + 1; long newmetalen = BlockMetadataHeader.getHeaderSize() + n * checksumsize; long lastchunkoffset = (n - 1) * bpc; int lastchunksize = (int) (newlen - lastchunkoffset); byte[] b = new byte[Math.max(lastchunksize, checksumsize)]; RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw"); try { //truncate blockFile blockRAF.setLength(newlen); //read last chunk blockRAF.seek(lastchunkoffset); blockRAF.readFully(b, 0, lastchunksize); } finally { blockRAF.close(); } //compute checksum dcs.update(b, 0, lastchunksize); dcs.writeValue(b, 0, false); //update metaFile RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw"); try { metaRAF.setLength(newmetalen); metaRAF.seek(newmetalen - checksumsize); metaRAF.write(b, 0, checksumsize); } finally { metaRAF.close(); } }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
void resetSubDomainCounts() throws IOException { LOG.info("*** LIST:" + getListId() + " Reset SubDomain Queued Counts."); if (_subDomainMetadataFile.exists()) { LOG.info("*** LIST:" + getListId() + " FILE EXISTS ."); RandomAccessFile file = new RandomAccessFile(_subDomainMetadataFile, "rw"); DataInputBuffer inputBuffer = new DataInputBuffer(); DataOutputBuffer outputBuffer = new DataOutputBuffer(CrawlListMetadata.Constants.FixedDataSize); try {/*from w ww . j a v a 2s . c o m*/ // skip version file.read(); // read item count int itemCount = file.readInt(); LOG.info("*** LIST:" + getListId() + " SUBDOMAIN ITEM COUNT:" + itemCount); CrawlListMetadata newMetadata = new CrawlListMetadata(); for (int i = 0; i < itemCount; ++i) { long orignalPos = file.getFilePointer(); file.readFully(outputBuffer.getData(), 0, CrawlListMetadata.Constants.FixedDataSize); inputBuffer.reset(outputBuffer.getData(), CrawlListMetadata.Constants.FixedDataSize); try { newMetadata.deserialize(inputBuffer, new BinaryProtocol()); } catch (Exception e) { LOG.error("-----Failed to Deserialize Metadata at Index:" + i + " Exception:" + CCStringUtils.stringifyException(e)); } // ok reset everything except hashes and first/last url pointers int urlCount = newMetadata.getUrlCount(); long firstRecordOffset = newMetadata.getFirstRecordOffset(); long lastRecordOffset = newMetadata.getLastRecordOffset(); String domainName = newMetadata.getDomainName(); long domainHash = newMetadata.getDomainHash(); // reset newMetadata.clear(); // restore newMetadata.setUrlCount(urlCount); newMetadata.setFirstRecordOffset(firstRecordOffset); newMetadata.setLastRecordOffset(lastRecordOffset); newMetadata.setDomainName(domainName); newMetadata.setDomainHash(domainHash); // serialize it ... outputBuffer.reset(); newMetadata.serialize(outputBuffer, new BinaryProtocol()); // write it back to disk file.seek(orignalPos); // and rewrite it ... file.write(outputBuffer.getData(), 0, CrawlListMetadata.Constants.FixedDataSize); } } finally { file.close(); } LOG.info("*** LIST:" + getListId() + " DONE RESETTIGN SUBDOMAIN METADATA QUEUE COUNTS"); } }
From source file:org.commoncrawl.service.listcrawler.CrawlList.java
void loadSubDomainMetadataFromDisk() throws IOException { LOG.info("*** LIST:" + getListId() + " LOAD SUBDOMAIN METADATA FROM DISK ... "); if (_subDomainMetadataFile.exists()) { LOG.info("*** LIST:" + getListId() + " FILE EXISTS LOADING SUBDOMAIN DATA FROM DISK."); RandomAccessFile file = new RandomAccessFile(_subDomainMetadataFile, "rw"); DataInputBuffer inputBuffer = new DataInputBuffer(); byte fixedDataBlock[] = new byte[CrawlListMetadata.Constants.FixedDataSize]; try {//from w w w . jav a 2 s .c o m // skip version file.read(); // read item count int itemCount = file.readInt(); LOG.info("*** LIST:" + getListId() + " SUBDOMAIN ITEM COUNT:" + itemCount); CrawlListMetadata newMetadata = new CrawlListMetadata(); TreeMap<Long, Integer> idToOffsetMap = new TreeMap<Long, Integer>(); for (int i = 0; i < itemCount; ++i) { long orignalPos = file.getFilePointer(); file.readFully(fixedDataBlock, 0, fixedDataBlock.length); inputBuffer.reset(fixedDataBlock, fixedDataBlock.length); try { newMetadata.deserialize(inputBuffer, new BinaryProtocol()); } catch (Exception e) { LOG.error("-----Failed to Deserialize Metadata at Index:" + i + " Exception:" + CCStringUtils.stringifyException(e)); } idToOffsetMap.put(newMetadata.getDomainHash(), (int) orignalPos); } // write lookup table _offsetLookupTable = new DataOutputBuffer(idToOffsetMap.size() * OFFSET_TABLE_ENTRY_SIZE); for (Map.Entry<Long, Integer> entry : idToOffsetMap.entrySet()) { _offsetLookupTable.writeLong(entry.getKey()); _offsetLookupTable.writeInt(entry.getValue()); } } finally { file.close(); } LOG.info("*** LIST:" + getListId() + " DONE LOADING SUBDOMAIN DATA FROM DISK"); } else { LOG.info("*** LIST:" + getListId() + " SUBDOMAIN METADATA DOES NOT EXIST! LOADING FROM SCRATCH"); RandomAccessFile fixedDataReader = new RandomAccessFile(_fixedDataFile, "rw"); RandomAccessFile stringDataReader = new RandomAccessFile(_variableDataFile, "rw"); try { //ok rebuild top level metadata as well _metadata.clear(); OnDiskCrawlHistoryItem item = new OnDiskCrawlHistoryItem(); int processedCount = 0; while (fixedDataReader.getFilePointer() != fixedDataReader.length()) { long position = fixedDataReader.getFilePointer(); // store offset in item item._fileOffset = position; // load from disk item.deserialize(fixedDataReader); try { // seek to string data stringDataReader.seek(item._stringsOffset); // and skip buffer length WritableUtils.readVInt(stringDataReader); // and read primary string String url = stringDataReader.readUTF(); // get metadata object for subdomain CrawlListMetadata subDomainMetadata = getTransientSubDomainMetadata(url); // increment url count subDomainMetadata.setUrlCount(subDomainMetadata.getUrlCount() + 1); // increment top level metadata count _metadata.setUrlCount(_metadata.getUrlCount() + 1); // update top level metadata .. updateMetadata(item, _metadata, 0); // update sub-domain metadata object from item data updateMetadata(item, subDomainMetadata, 0); ++processedCount; } catch (IOException e) { LOG.error("Exception Reading String Data For Item:" + (processedCount + 1)); LOG.error("Exception:" + CCStringUtils.stringifyException(e)); LOG.error("File Position:" + fixedDataReader.getFilePointer() + " StringsPointer:" + stringDataReader.getFilePointer()); } if (processedCount % 10000 == 0) { LOG.info("*** LIST:" + getListId() + " Processed:" + processedCount + " Items"); } } // ok commit top level metadata to disk as well writeMetadataToDisk(); } catch (IOException e) { LOG.error("Encountered Exception Queueing Items for List:" + _listId + " Exception:" + CCStringUtils.stringifyException(e)); LOG.error("File Position:" + fixedDataReader.getFilePointer() + " StringsPointer:" + stringDataReader.getFilePointer()); _queueState = QueueState.QUEUED; } finally { fixedDataReader.close(); stringDataReader.close(); } LOG.info("*** LIST:" + getListId() + " SUBDOMAIN METADATA REBUILT FROM LIST DATA . WRITING TO DISK"); // write metadat to disk writeInitialSubDomainMetadataToDisk(); LOG.info("*** LIST:" + getListId() + " SUBDOMAIN METADATA REBUILT FROM LIST DATA . WRITE COMPLETE"); } }