List of usage examples for java.io RandomAccessFile seek
public void seek(long pos) throws IOException
From source file:aarddict.Volume.java
Article readArticle(long pointer) throws IOException { Article a = articleCache.get(pointer); if (a != null) return a; Header h = this.header; long pos = h.articleOffset + pointer; RandomAccessFile f = this.file; f.seek(pos); long articleLength = f.readSpec(h.articleLengthSpec); byte[] articleBytes = new byte[(int) articleLength]; f.read(articleBytes);/*ww w . ja va2s. com*/ String serializedArticle = decompress(articleBytes); a = Article.fromJsonStr(serializedArticle); a.dictionaryUUID = h.uuid; a.volumeId = h.sha1sum; a.pointer = pointer; articleCache.put(pointer, a); return a; }
From source file:hydrograph.ui.perspective.dialog.PreStartActivity.java
private boolean updateINIFile(String javaHome) { logger.debug("Updating JAVA_HOME in ini file ::" + javaHome); javaHome = "-vm\n" + javaHome + "\n"; RandomAccessFile file = null; boolean isUpdated = false; try {/* w ww . ja va2 s . com*/ file = new RandomAccessFile(new File(HYDROGRAPH_INI), "rw"); byte[] text = new byte[(int) file.length()]; file.readFully(text); file.seek(0); file.writeBytes(javaHome); file.write(text); isUpdated = true; } catch (IOException ioException) { logger.error("IOException occurred while updating " + HYDROGRAPH_INI + " file", ioException); } finally { try { if (file != null) { file.close(); } } catch (IOException ioException) { logger.error("IOException occurred while updating " + HYDROGRAPH_INI + " file", ioException); } } return isUpdated; }
From source file:org.openengsb.connector.promreport.internal.ProcessFileStore.java
private long findLastProcess(RandomAccessFile raf) throws IOException { final String proc = "</Process>"; final byte[] bproc = proc.getBytes(); final int len = proc.length(); for (long i = raf.length() - "</Process></WorkflowLog>".length(); i >= 0; i--) { byte[] buf = new byte[len]; raf.seek(i); raf.readFully(buf, 0, len);//from w ww.j a va 2s . com int b; for (b = 0; b < len; b++) { if (buf[b] != bproc[b]) { break; } } if (b == len) { return i; } } return -1; }
From source file:com.metamx.druid.merger.coordinator.ForkingTaskRunner.java
@Override public Optional<InputSupplier<InputStream>> streamTaskLog(final String taskid, final long offset) { final ProcessHolder processHolder; synchronized (tasks) { final TaskInfo taskInfo = tasks.get(taskid); if (taskInfo != null && taskInfo.processHolder != null) { processHolder = taskInfo.processHolder; } else {//from w ww . j a v a 2 s. co m return Optional.absent(); } } return Optional.<InputSupplier<InputStream>>of(new InputSupplier<InputStream>() { @Override public InputStream getInput() throws IOException { final RandomAccessFile raf = new RandomAccessFile(processHolder.logFile, "r"); final long rafLength = raf.length(); if (offset > 0) { raf.seek(offset); } else if (offset < 0 && offset < rafLength) { raf.seek(rafLength + offset); } return Channels.newInputStream(raf.getChannel()); } }); }
From source file:com.zimbra.cs.redolog.util.RedoLogVerify.java
public boolean scanLog(File logfile) throws IOException { boolean good = false; FileLogReader logReader = new FileLogReader(logfile, false); logReader.open();// w ww .jav a 2s.c o m if (!mParams.quiet) { FileHeader header = logReader.getHeader(); mOut.println("HEADER"); mOut.println("------"); mOut.print(header); mOut.println("------"); } boolean hasMailboxIdsFilter = !mParams.mboxIds.isEmpty(); RedoableOp op = null; long lastPosition = 0; long lastOpStartOffset = 0; try { while ((op = logReader.getNextOp()) != null) { lastOpStartOffset = logReader.getLastOpStartOffset(); lastPosition = logReader.position(); if (hasMailboxIdsFilter) { int mboxId = op.getMailboxId(); if (op instanceof StoreIncomingBlob) { List<Integer> list = ((StoreIncomingBlob) op).getMailboxIdList(); if (list != null) { boolean match = false; for (Integer mid : list) { if (mParams.mboxIds.contains(mid)) { match = true; break; } } if (!match) continue; } // If list==null, it's a store incoming blob op targeted at unknown set of mailboxes. // It applies to our filtered mailboxes. } else if (!mParams.mboxIds.contains(mboxId)) { continue; } } if (!mParams.quiet) { printOp(mOut, op, mParams.hideOffset, lastOpStartOffset, lastPosition - lastOpStartOffset); if (mParams.showBlob) { InputStream dataStream = op.getAdditionalDataStream(); if (dataStream != null) { mOut.println("<START OF BLOB>"); ByteUtil.copy(dataStream, true, mOut, false); mOut.println(); mOut.println("<END OF BLOB>"); } } } } good = true; } catch (IOException e) { // The IOException could be a real I/O problem or it could mean // there was a server crash previously and there were half-written // log entries. mOut.println(); mOut.printf("Error while parsing data starting at offset 0x%08x", lastPosition); mOut.println(); long size = logReader.getSize(); long diff = size - lastPosition; mOut.printf("%d bytes remaining in the file", diff); mOut.println(); mOut.println(); if (op != null) { mOut.println("Last suceessfully parsed redo op:"); printOp(mOut, op, false, lastOpStartOffset, lastPosition - lastOpStartOffset); mOut.println(); } // hexdump data around the bad bytes int bytesPerLine = 16; int linesBefore = 10; int linesAfter = 10; long startPos = Math.max(lastPosition - (lastPosition % bytesPerLine) - linesBefore * bytesPerLine, 0); int count = (int) Math.min((linesBefore + linesAfter + 1) * bytesPerLine, lastPosition - startPos + diff); RandomAccessFile raf = null; try { raf = new RandomAccessFile(logfile, "r"); raf.seek(startPos); byte buf[] = new byte[count]; raf.read(buf, 0, count); mOut.printf("Data near error offset %08x:", lastPosition); mOut.println(); hexdump(mOut, buf, 0, count, startPos, lastPosition); mOut.println(); } catch (IOException eh) { mOut.println("Error opening log file " + logfile.getAbsolutePath() + " for hexdump"); eh.printStackTrace(mOut); } finally { if (raf != null) raf.close(); } throw e; } finally { logReader.close(); } return good; }
From source file:pydio.sdk.java.http.AjxpFileBody.java
public void writeTo(OutputStream out) { InputStream in;//from w ww .j a v a 2 s . c om //int bufsize = Integer.parseInt(StateHolder.getInstance().getLocalConfig(Pydio.LOCAL_CONFIG_BUFFER_SIZE)); try { if (this.chunkSize > 0) { RandomAccessFile raf = new RandomAccessFile(getFile(), "r"); int start = chunkIndex * this.chunkSize; int count = 0; int limit = chunkSize; byte[] buffer = new byte[bufsize]; if (chunkIndex == (totalChunks - 1)) { limit = lastChunkSize; } raf.seek(start); while (count < limit) { if (count + bufsize > limit) { if (count == 0) { bufsize = limit; } else { bufsize = limit - count; } } raf.read(buffer, 0, bufsize); out.write(buffer, 0, bufsize); count += bufsize; } raf.close(); } else { in = new FileInputStream(getFile()); byte[] buf = new byte[bufsize]; int len; while ((len = in.read(buf)) > 0) { out.write(buf, 0, len); } in.close(); } this.chunkIndex++; } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } }
From source file:org.apache.sshd.server.filesystem.NativeSshFile.java
/** * Create input stream for reading./*from w ww . j a va2 s . c o m*/ */ public InputStream createInputStream(final long offset) throws IOException { // permission check if (!isReadable()) { throw new IOException("No read permission : " + file.getName()); } // move to the appropriate offset and create input stream final RandomAccessFile raf = new RandomAccessFile(file, "r"); raf.seek(offset); // The IBM jre needs to have both the stream and the random access file // objects closed to actually close the file return new FileInputStream(raf.getFD()) { public void close() throws IOException { super.close(); raf.close(); } }; }
From source file:org.slc.sli.api.resources.FileResource.java
/** * Send a range content/*from w w w . jav a 2s . co m*/ * * @param raf the file from which the content is sent * @param output the output to which the content is sent * @param r the range * @return total length sent * @throws IOException */ private long sendRangeContent(RandomAccessFile raf, OutputStream output, Range r) throws IOException { raf.seek(r.start); byte[] buffer = new byte[4 * 1024]; long left = r.getLength(); long total = 0; int n = -1; while (left > 0 && (n = raf.read(buffer, 0, (int) Math.min(left, buffer.length))) > -1) { output.write(buffer, 0, n); output.flush(); left -= n; total += n; } return total; }
From source file:com.cloud.storage.template.HttpTemplateDownloader.java
@Override public long download(boolean resume, DownloadCompleteCallback callback) { switch (status) { case ABORTED: case UNRECOVERABLE_ERROR: case DOWNLOAD_FINISHED: return 0; default:/*ww w . j a v a2 s . c o m*/ } int bytes = 0; File file = new File(toFile); try { long localFileSize = 0; if (file.exists() && resume) { localFileSize = file.length(); s_logger.info("Resuming download to file (current size)=" + localFileSize); } Date start = new Date(); int responseCode = 0; if (localFileSize > 0) { // require partial content support for resume request.addRequestHeader("Range", "bytes=" + localFileSize + "-"); if (client.executeMethod(request) != HttpStatus.SC_PARTIAL_CONTENT) { errorString = "HTTP Server does not support partial get"; status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; return 0; } } else if ((responseCode = client.executeMethod(request)) != HttpStatus.SC_OK) { status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; errorString = " HTTP Server returned " + responseCode + " (expected 200 OK) "; return 0; //FIXME: retry? } Header contentLengthHeader = request.getResponseHeader("Content-Length"); boolean chunked = false; long remoteSize2 = 0; if (contentLengthHeader == null) { Header chunkedHeader = request.getResponseHeader("Transfer-Encoding"); if (chunkedHeader == null || !"chunked".equalsIgnoreCase(chunkedHeader.getValue())) { status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; errorString = " Failed to receive length of download "; return 0; //FIXME: what status do we put here? Do we retry? } else if ("chunked".equalsIgnoreCase(chunkedHeader.getValue())) { chunked = true; } } else { remoteSize2 = Long.parseLong(contentLengthHeader.getValue()); } if (remoteSize == 0) { remoteSize = remoteSize2; } if (remoteSize > MAX_TEMPLATE_SIZE_IN_BYTES) { s_logger.info("Remote size is too large: " + remoteSize + " , max=" + MAX_TEMPLATE_SIZE_IN_BYTES); status = Status.UNRECOVERABLE_ERROR; errorString = "Download file size is too large"; return 0; } if (remoteSize == 0) { remoteSize = MAX_TEMPLATE_SIZE_IN_BYTES; } InputStream in = !chunked ? new BufferedInputStream(request.getResponseBodyAsStream()) : new ChunkedInputStream(request.getResponseBodyAsStream()); RandomAccessFile out = new RandomAccessFile(file, "rwd"); out.seek(localFileSize); s_logger.info("Starting download from " + getDownloadUrl() + " to " + toFile + " remoteSize=" + remoteSize + " , max size=" + MAX_TEMPLATE_SIZE_IN_BYTES); byte[] block = new byte[CHUNK_SIZE]; long offset = 0; boolean done = false; status = TemplateDownloader.Status.IN_PROGRESS; while (!done && status != Status.ABORTED && offset <= remoteSize) { if ((bytes = in.read(block, 0, CHUNK_SIZE)) > -1) { out.write(block, 0, bytes); offset += bytes; out.seek(offset); totalBytes += bytes; } else { done = true; } } Date finish = new Date(); String downloaded = "(incomplete download)"; if (totalBytes >= remoteSize) { status = TemplateDownloader.Status.DOWNLOAD_FINISHED; downloaded = "(download complete remote=" + remoteSize + "bytes)"; } errorString = "Downloaded " + totalBytes + " bytes " + downloaded; downloadTime += finish.getTime() - start.getTime(); out.close(); return totalBytes; } catch (HttpException hte) { status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; errorString = hte.getMessage(); } catch (IOException ioe) { status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; //probably a file write error? errorString = ioe.getMessage(); } finally { if (status == Status.UNRECOVERABLE_ERROR && file.exists() && !file.isDirectory()) { file.delete(); } request.releaseConnection(); if (callback != null) { callback.downloadComplete(status); } } return 0; }
From source file:com.metamx.druid.indexing.coordinator.ForkingTaskRunner.java
@Override public Optional<InputSupplier<InputStream>> streamTaskLog(final String taskid, final long offset) { final ProcessHolder processHolder; synchronized (tasks) { final ForkingTaskRunnerWorkItem taskWorkItem = tasks.get(taskid); if (taskWorkItem != null && taskWorkItem.processHolder != null) { processHolder = taskWorkItem.processHolder; } else {/*from w w w . j a va 2s . c o m*/ return Optional.absent(); } } return Optional.<InputSupplier<InputStream>>of(new InputSupplier<InputStream>() { @Override public InputStream getInput() throws IOException { final RandomAccessFile raf = new RandomAccessFile(processHolder.logFile, "r"); final long rafLength = raf.length(); if (offset > 0) { raf.seek(offset); } else if (offset < 0 && offset < rafLength) { raf.seek(rafLength + offset); } return Channels.newInputStream(raf.getChannel()); } }); }