List of usage examples for java.io RandomAccessFile length
public native long length() throws IOException;
From source file:org.apache.sling.commons.log.logback.internal.Tailer.java
/** * Returns the starting position of UNIX "tail -n". *//*from w w w .j av a2s.c om*/ private long getTailStartPos(RandomAccessFile file, int n) throws IOException { int newlineCount = 0; long length = file.length(); long pos = length - BUFFER_SIZE; int buffLength = BUFFER_SIZE; if (pos < 0) { pos = 0; buffLength = (int) length; } while (true) { file.seek(pos); file.readFully(buffer, 0, buffLength); for (int i = buffLength - 1; i >= 0; i--) { if ((char) buffer[i] == '\n') { newlineCount++; if (newlineCount >= n) { pos += (i + 1); return pos; } } } if (pos == 0) { break; } if (pos - BUFFER_SIZE < 0) { buffLength = (int) pos; pos = 0; } else { pos -= BUFFER_SIZE; } } return pos; }
From source file:com.polarion.pso.license.FetchUserLicenseTypeServlet.java
@Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) throws ServletException, IOException { ISecurityService securityService = (ISecurityService) PlatformContext.getPlatform() .lookupService(ISecurityService.class); ITrackerService trackerService = (ITrackerService) PlatformContext.getPlatform() .lookupService(ITrackerService.class); String userId = securityService.getCurrentUser(); String userName = trackerService.getTrackerUser(userId).getName(); long cutoff = System.currentTimeMillis() - (1000); File directory = new File("./logs/main"); FileFilter wcFilter = new WildcardFileFilter("log4j-licensing-*.log"); AgeFileFilter ageFilter = new AgeFileFilter(cutoff); FileFilter andFilter = new org.apache.commons.io.filefilter.AndFileFilter((IOFileFilter) ageFilter, (IOFileFilter) wcFilter);/*from w w w . j a v a 2 s . co m*/ Collection<File> matches = FileUtils.listFiles(directory, (IOFileFilter) andFilter, null); if (!matches.isEmpty()) { File myFile = matches.iterator().next(); RandomAccessFile licFile = new RandomAccessFile(myFile, "r"); // Read the last 1024 bytes Long fileLength = licFile.length(); Long offSet = fileLength - 1024; byte[] bStr = new byte[1024]; licFile.seek(offSet); licFile.read(bStr); licFile.close(); String logString = new java.lang.String(bStr); String[] lineArray = logString.split("\n"); String searchString = "INFO PolarionLicensing - User \'" + userId + "\' logged in"; Boolean found = false; Integer size = lineArray.length - 1; String licType = directory.toString(); for (int i = size; i >= 0; i--) { String line = lineArray[i]; if (line.contains(searchString) && found == false) { found = true; i = -1; Integer startIndex = line.indexOf(searchString) + searchString.length() + 6; licType = line.substring(startIndex); licType = licType.replace("\r", ""); licType = licType.trim(); } } req.setAttribute("userId", userName); req.setAttribute("licType", licType); } else { req.setAttribute("userId", userName); req.setAttribute("licType", "Not Found"); } getServletContext().getRequestDispatcher("/currentUserLicenseType.jsp").forward(req, resp); }
From source file:hudson.util.TextFile.java
/** * Efficiently reads the last N characters (or shorter, if the whole file is shorter than that.) * * <p>/*from www.ja v a 2 s. c o m*/ * This method first tries to just read the tail section of the file to get the necessary chars. * To handle multi-byte variable length encoding (such as UTF-8), we read a larger than * necessary chunk. * * <p> * Some multi-byte encoding, such as Shift-JIS (http://en.wikipedia.org/wiki/Shift_JIS) doesn't * allow the first byte and the second byte of a single char to be unambiguously identified, * so it is possible that we end up decoding incorrectly if we start reading in the middle of a multi-byte * character. All the CJK multi-byte encodings that I know of are self-correcting; as they are ASCII-compatible, * any ASCII characters or control characters will bring the decoding back in sync, so the worst * case we just have some garbage in the beginning that needs to be discarded. To accommodate this, * we read additional 1024 bytes. * * <p> * Other encodings, such as UTF-8, are better in that the character boundary is unambiguous, * so there can be at most one garbage char. For dealing with UTF-16 and UTF-32, we read at * 4 bytes boundary (all the constants and multipliers are multiples of 4.) * * <p> * Note that it is possible to construct a contrived input that fools this algorithm, and in this method * we are willing to live with a small possibility of that to avoid reading the whole text. In practice, * such an input is very unlikely. * * <p> * So all in all, this algorithm should work decently, and it works quite efficiently on a large text. */ public @Nonnull String fastTail(int numChars, Charset cs) throws IOException { RandomAccessFile raf = new RandomAccessFile(file, "r"); try { long len = raf.length(); // err on the safe side and assume each char occupies 4 bytes // additional 1024 byte margin is to bring us back in sync in case we started reading from non-char boundary. long pos = Math.max(0, len - (numChars * 4 + 1024)); raf.seek(pos); byte[] tail = new byte[(int) (len - pos)]; raf.readFully(tail); String tails = cs.decode(java.nio.ByteBuffer.wrap(tail)).toString(); return new String(tails.substring(Math.max(0, tails.length() - numChars))); // trim the baggage of substring by allocating a new String } finally { raf.close(); } }
From source file:com.example.android.vault.EncryptedDocumentTest.java
public void testBitTwiddle() throws Exception { final EncryptedDocument doc = new EncryptedDocument(4, mFile, mDataKey, mMacKey); // write some metadata final JSONObject before = new JSONObject(); before.put("twiddle", "twiddle"); doc.writeMetadataAndContent(before, null); final RandomAccessFile f = new RandomAccessFile(mFile, "rw"); f.seek(f.length() - 4); f.write(0x00);/* ww w .j a v a2s .c o m*/ f.close(); try { doc.readMetadata(); fail("somehow passed hmac"); } catch (DigestException expected) { } }
From source file:com.datasayer.meerkat.MeerJobRunner.java
@SuppressWarnings("unchecked") @Override//from w w w. ja v a2s . co m public void bsp(final BSPPeer<Writable, Writable, Writable, Writable, Writable> peer) throws IOException, SyncException, InterruptedException { while (true) { try { long currentTime = System.currentTimeMillis(); FileSystem fs = FileSystem.get(conf); if (!fs.isFile(logPath)) { System.out.println("can not read input file"); return; } RandomAccessFile file = new RandomAccessFile(logPath.toString(), "r"); long fileLength = file.length(); if (fileLength > filePointer) { file.seek(filePointer); String line = null; while (file.length() > file.getFilePointer()) { line = file.readLine(); line = new String(line.getBytes("8859_1"), "utf-8"); guardMeer.observe(line); } filePointer = file.getFilePointer(); } else { // nothing to do } file.close(); long timeDiff = currentTime - this.lastAggregatedTime; if (timeDiff >= this.aggregationInterval) { peer.sync(); if (peer.getPeerName().equals(masterName)) { bossMeer.masterCompute(new Iterator<Writable>() { private final int producedMessages = peer.getNumCurrentMessages(); private int consumedMessages = 0; @Override public boolean hasNext() { return producedMessages > consumedMessages; } @Override public Writable next() throws NoSuchElementException { if (consumedMessages >= producedMessages) { throw new NoSuchElementException(); } try { consumedMessages++; return peer.getCurrentMessage(); } catch (IOException e) { throw new NoSuchElementException(); } } @Override public void remove() { // BSPPeer.getCurrentMessage originally deletes a message. // Thus, it doesn't need to throw exception. // throw new UnsupportedOperationException(); } }, signalMeer); this.lastAggregatedTime = currentTime; } } } catch (IOException e) { e.printStackTrace(); } } }
From source file:com.baidu.terminator.manager.service.LogServiceImpl.java
@Override public Log readLog(int linkId, long offset) throws IOException { String logFileLocation = LinkLogger.getLogFileLocation(linkId); FileUtils.createFile(logFileLocation); RandomAccessFile raf = null; List<String> lines = new ArrayList<String>(); long length = 0; try {// ww w .jav a 2s . c om raf = new RandomAccessFile(logFileLocation, "r"); raf.seek(offset); length = raf.length(); long point = raf.getFilePointer(); while (point < length) { String line = raf.readLine(); String utf8Line = new String(line.getBytes("8859_1"), "utf-8"); lines.add(utf8Line); if (point - offset >= MAX_READ_BYTES) { length = point; break; } point = raf.getFilePointer(); } } finally { if (raf != null) { raf.close(); } } Log log = new Log(); log.setLogLocation(logFileLocation); log.setOffset(length); log.setContent(lines); return log; }
From source file:com.koda.integ.hbase.storage.FIFOStorageRecycler.java
public void run() { LOG.info(Thread.currentThread().getName() + " started."); while (needContinue()) { // Get oldest file int minId = storage.getMinId().get(); String fileName = storage.getFilePath(minId); RandomAccessFile raf = storage.getFile(minId); try {//from ww w .ja v a 2 s.c om long size = raf.length(); raf.close(); // STATISTICS totalFilesSize.addAndGet(size); LOG.info("Processing file: " + fileName + " size=" + size); // STATISTICS totalScannedFiles.incrementAndGet(); // Update current storage size storage.deleteOldestFile(); } catch (Exception e) { LOG.error(fileName, e); } } LOG.info(Thread.currentThread().getName() + " stopped."); }
From source file:org.commoncrawl.service.crawler.CrawlSegmentLog.java
public static int reconcileLogFile(FileSystem fs, Path logFilePath, int listId, int segmentId, CrawlSegmentFPMap segment, File consolidationFile) throws IOException { RandomAccessFile consolidationStream = null; int consolidationFileItemCount = 0; if (consolidationFile != null) { consolidationStream = new RandomAccessFile(consolidationFile, "rw"); consolidationFileItemCount = readerHeader(consolidationFile); consolidationStream.seek(consolidationStream.length()); }//from w ww . j av a 2 s . co m int processedItemCount = 0; FSDataInputStream hdfsInputStream = null; try { // get the file size on disk long fileSize = fs.getFileStatus(logFilePath).getLen(); // allocate an array that can hold up to the list size of items ... byte[] buffer = new byte[DEFAULT_LOGITEM_LIST_SIZE * LogItem.ItemSize_Bytes]; // calcuate item count int totalItemCount = (int) ((fileSize - getHeaderSize()) / LogItem.ItemSize_Bytes); // get a reader ... hdfsInputStream = fs.open(logFilePath); int headerItemCount = readHeader(hdfsInputStream); if (headerItemCount != totalItemCount) { LOG.warn("CrawlSegmentLog - header item count for log file:" + logFilePath.toString() + " is:" + headerItemCount + " file size indicates:" + totalItemCount); totalItemCount = headerItemCount; } int remainingItemCount = totalItemCount; LogItemBuffer itemList = new LogItemBuffer(listId, segmentId); while (remainingItemCount != 0) { int blockItemCount = Math.min(remainingItemCount, DEFAULT_LOGITEM_LIST_SIZE); // and read the data hdfsInputStream.read(buffer, 0, (int) blockItemCount * LogItem.ItemSize_Bytes); // and if consolidation stream is valid ... if (consolidationStream != null) { // add entries to that stream ... consolidationStream.write(buffer, 0, (int) blockItemCount * LogItem.ItemSize_Bytes); } // if not a dry run... if (segment != null) { // populate the item list itemList.loadFromStream(buffer, blockItemCount); // reconcile the list against the segment processedItemCount += reconcileItemList(itemList, segment); } // reduce item count remainingItemCount -= blockItemCount; } // finally if consolidation stream is valid ... if (consolidationStream != null) { // update the file's header .. writeHeader(consolidationFile, consolidationFileItemCount + totalItemCount); } } finally { if (consolidationStream != null) { consolidationStream.close(); } if (hdfsInputStream != null) { hdfsInputStream.close(); } } return processedItemCount; }
From source file:org.xdi.util.FileUtil.java
public long findLastPosition(String filePath, String searchStr) { try {// w w w .jav a 2 s.com File f = new File(filePath); RandomAccessFile raf; raf = new RandomAccessFile(f, "r"); long position = -1; while (raf.getFilePointer() < raf.length()) { String line = raf.readLine(); if (line.contains(searchStr)) { position = raf.getFilePointer() + line.indexOf(searchStr) - line.length(); continue; } } return position; } catch (FileNotFoundException e) { e.printStackTrace(); return -1; } catch (IOException e) { e.printStackTrace(); return -1; } }
From source file:org.xdi.util.FileUtil.java
/** * returns the first occurrence of specified string in a file. * /* w w w . j a v a2 s.c o m*/ * @param filePath * @param searchStr * @return */ public long findFirstPosition(String filePath, String searchStr) { try { File f = new File(filePath); RandomAccessFile raf; raf = new RandomAccessFile(f, "r"); long position = -1; while (raf.getFilePointer() < raf.length()) { String line = raf.readLine(); if (line.contains(searchStr)) { position = raf.getFilePointer() + line.indexOf(searchStr) - (line.length() + 1); break; } } return position; } catch (FileNotFoundException e) { e.printStackTrace(); return -1; } catch (IOException e) { e.printStackTrace(); return -1; } }