List of usage examples for java.io RandomAccessFile seek
public void seek(long pos) throws IOException
From source file:org.ms123.common.git.GitServiceImpl.java
protected static String getFileType(File file) { if (file.isDirectory()) { return "sw.directory"; } else if (file.toString().endsWith(".txt")) { return "text/plain"; } else if (file.toString().endsWith(".jpeg") || file.toString().endsWith(".jpg")) { return "image/jpeg"; } else if (file.toString().endsWith(".png")) { return "image/png"; } else if (file.toString().endsWith(".svg")) { return "image/svg+xml"; } else if (file.toString().endsWith(".xml")) { return "text/xml"; } else if (file.toString().endsWith(".woff") || file.toString().endsWith(".woff.gz")) { return "application/x-font-woff"; } else if (file.toString().endsWith(".js") || file.toString().endsWith(".js.gz")) { return "text/javascript"; } else if (file.toString().endsWith(".adoc") || file.toString().endsWith(".adoc.gz")) { return "text/x-asciidoc"; } else if (file.toString().endsWith(".html") || file.toString().endsWith(".html.gz")) { return "text/html"; } else if (file.toString().endsWith(".css") || file.toString().endsWith(".css.gz")) { return "text/css"; } else if (file.toString().endsWith(".yaml") || file.toString().endsWith(".yml")) { return "text/x-yaml"; } else if (file.toString().endsWith(".json") || file.toString().endsWith(".json.gz")) { return "application/json"; } else if (file.toString().endsWith(".odt")) { return "application/vnd.oasis.opendocument.text"; } else if (file.toString().endsWith(".groovy")) { return "sw.groovy"; } else if (file.toString().endsWith(".java")) { return "sw.java"; }//w w w . j a v a 2s .c o m RandomAccessFile r = null; try { int lnr = 0; r = new RandomAccessFile(file, "r"); int i = r.readInt(); if (i == 1534293853) { r.seek(0); while (true) { String line = r.readLine(); if (line == null) { break; } if (lnr == 0 && !line.startsWith("[sw]")) break; if (line.trim().startsWith("type")) { int eq = line.indexOf("="); if (eq != -1) { return line.substring(eq + 1).trim(); } } lnr++; if (lnr > 20) break; } } else if (i == -2555936) { return "image/jpeg"; } else if (i == -1991225785) { return "image/png"; } } catch (Exception e) { return "sw.unknown"; } finally { closeFile(r); } return detectFileType(file); }
From source file:org.exist.util.VirtualTempFile.java
/** * The method <code>getChunk</code> */*from w w w.java2 s.c om*/ * @param offset a <code>long</code> value * @return a <code>byte[]</code> value * @exception IOException if an error occurs */ public byte[] getChunk(long offset) throws IOException { byte[] data = null; if (os != null) { close(); } if (tempFile != null) { final RandomAccessFile raf = new RandomAccessFile(tempFile, "r"); raf.seek(offset); long remaining = raf.length() - offset; if (remaining > maxChunkSize) { remaining = maxChunkSize; } else if (remaining < 0) { remaining = 0; } data = new byte[(int) remaining]; raf.readFully(data); raf.close(); } else if (tempBuffer != null) { long remaining = tempBuffer.length - offset; if (remaining > maxChunkSize) { remaining = maxChunkSize; } else if (remaining < 0) { remaining = 0; } data = new byte[(int) remaining]; if (remaining > 0) { System.arraycopy(tempBuffer, (int) offset, data, 0, (int) remaining); } } return data; }
From source file:hudson.util.TextFile.java
/** * Efficiently reads the last N characters (or shorter, if the whole file is shorter than that.) * * <p>/*from w w w.j a v a 2 s . com*/ * This method first tries to just read the tail section of the file to get the necessary chars. * To handle multi-byte variable length encoding (such as UTF-8), we read a larger than * necessary chunk. * * <p> * Some multi-byte encoding, such as Shift-JIS (http://en.wikipedia.org/wiki/Shift_JIS) doesn't * allow the first byte and the second byte of a single char to be unambiguously identified, * so it is possible that we end up decoding incorrectly if we start reading in the middle of a multi-byte * character. All the CJK multi-byte encodings that I know of are self-correcting; as they are ASCII-compatible, * any ASCII characters or control characters will bring the decoding back in sync, so the worst * case we just have some garbage in the beginning that needs to be discarded. To accommodate this, * we read additional 1024 bytes. * * <p> * Other encodings, such as UTF-8, are better in that the character boundary is unambiguous, * so there can be at most one garbage char. For dealing with UTF-16 and UTF-32, we read at * 4 bytes boundary (all the constants and multipliers are multiples of 4.) * * <p> * Note that it is possible to construct a contrived input that fools this algorithm, and in this method * we are willing to live with a small possibility of that to avoid reading the whole text. In practice, * such an input is very unlikely. * * <p> * So all in all, this algorithm should work decently, and it works quite efficiently on a large text. */ public @Nonnull String fastTail(int numChars, Charset cs) throws IOException { RandomAccessFile raf = new RandomAccessFile(file, "r"); try { long len = raf.length(); // err on the safe side and assume each char occupies 4 bytes // additional 1024 byte margin is to bring us back in sync in case we started reading from non-char boundary. long pos = Math.max(0, len - (numChars * 4 + 1024)); raf.seek(pos); byte[] tail = new byte[(int) (len - pos)]; raf.readFully(tail); String tails = cs.decode(java.nio.ByteBuffer.wrap(tail)).toString(); return new String(tails.substring(Math.max(0, tails.length() - numChars))); // trim the baggage of substring by allocating a new String } finally { raf.close(); } }
From source file:org.apache.hadoop.hdfs.server.datanode.DataStorage.java
public boolean isConversionNeeded(StorageDirectory sd) throws IOException { File oldF = new File(sd.getRoot(), "storage"); if (!oldF.exists()) return false; // check the layout version inside the storage file // Lock and Read old storage file RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws"); FileLock oldLock = oldFile.getChannel().tryLock(); try {/*from www . j a v a2s .co m*/ oldFile.seek(0); int oldVersion = oldFile.readInt(); if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) return false; } finally { oldLock.release(); oldFile.close(); } return true; }
From source file:com.jk.framework.util.FakeRunnable.java
/** * Write on file.//from ww w .j av a 2 s .co m * * @author Mohamde Kiswani * @param file * the file * @param string * the string * @param lineIndex * the line index * @throws IOException * Signals that an I/O exception has occurred. * @since 28-1-2010 * @description : to write on file by using random access ssechanism */ public static void writeOnFile(final File file, final String string, final long lineIndex) throws IOException { final RandomAccessFile rand = new RandomAccessFile(file, "rw"); rand.seek(lineIndex); // Seek to lineIndex of file rand.writeBytes(string); // Write end of file rand.close(); }
From source file:org.apache.sshd.server.filesystem.NativeSshFile.java
/** * Create output stream for writing.//from w w w . j av a 2 s.c o m */ public OutputStream createOutputStream(final long offset) throws IOException { // permission check if (!isWritable()) { throw new IOException("No write permission : " + file.getName()); } // create output stream final RandomAccessFile raf = new RandomAccessFile(file, "rw"); raf.setLength(offset); raf.seek(offset); // The IBM jre needs to have both the stream and the random access file // objects closed to actually close the file return new FileOutputStream(raf.getFD()) { public void close() throws IOException { super.close(); raf.close(); } }; }
From source file:org.patientview.monitoring.ImportMonitor.java
/** * Returns the last N lines of a file. Assumes lines are terminated by |n ascii character *//*from w w w. j a v a 2 s . co m*/ private static List<String> getLastNLinesOfFile(int numberOfLinesToReturn) { List<String> lastNLines = new ArrayList<String>(); java.io.RandomAccessFile fileHandler = null; try { File file = getTodaysCountFile(); fileHandler = new java.io.RandomAccessFile(file, "r"); long totalNumberOfCharactersInFile = file.length() - 1; StringBuilder sb = new StringBuilder(); int numberOfLinesRead = 0; /** * loop through characters in file, construct lines out of characters, add lines to a list */ for (long currentCharacter = totalNumberOfCharactersInFile; currentCharacter != -1; currentCharacter--) { fileHandler.seek(currentCharacter); int readByte = fileHandler.readByte(); if (readByte == LINE_FEED || readByte == CARRIAGE_RETURN) { if (numberOfLinesRead == numberOfLinesToReturn) { break; } numberOfLinesRead++; /** * add line to line list */ String currentLine = sb.reverse().toString(); sb = new StringBuilder(); if (StringUtils.isNotBlank(currentLine)) { lastNLines.add(currentLine); } else { LOGGER.error("Read line does not contain any data"); continue; } } else { sb.append((char) readByte); } } /** * add the last line */ lastNLines.add(sb.reverse().toString()); } catch (Exception e) { LOGGER.error("Can not find today's file", e); } finally { if (fileHandler != null) { try { fileHandler.close(); } catch (IOException e) { fileHandler = null; } } } return lastNLines; }
From source file:org.apache.hadoop.hdfs.TestReplication.java
public void testPendingReplicationRetry() throws IOException { MiniDFSCluster cluster = null;/*w ww .j ava 2 s . c o m*/ int numDataNodes = 4; String testFile = "/replication-test-file"; Path testPath = new Path(testFile); byte buffer[] = new byte[1024]; for (int i = 0; i < buffer.length; i++) { buffer[i] = '1'; } try { Configuration conf = new Configuration(); conf.set("dfs.replication", Integer.toString(numDataNodes)); //first time format cluster = new MiniDFSCluster(0, conf, numDataNodes, true, true, null, null); cluster.waitActive(); DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf); OutputStream out = cluster.getFileSystem().create(testPath); out.write(buffer); out.close(); waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1); // get first block of the file. String block = dfsClient.namenode.getBlockLocations(testFile, 0, Long.MAX_VALUE).get(0).getBlock() .getBlockName(); cluster.shutdown(); cluster = null; //Now mess up some of the replicas. //Delete the first and corrupt the next two. File baseDir = new File(System.getProperty("test.build.data"), "dfs/data"); for (int i = 0; i < 25; i++) { buffer[i] = '0'; } int fileCount = 0; for (int i = 0; i < 6; i++) { File blockFile = new File(baseDir, "data" + (i + 1) + "/current/" + block); LOG.info("Checking for file " + blockFile); if (blockFile.exists()) { if (fileCount == 0) { LOG.info("Deleting file " + blockFile); assertTrue(blockFile.delete()); } else { // corrupt it. LOG.info("Corrupting file " + blockFile); long len = blockFile.length(); assertTrue(len > 50); RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw"); try { blockOut.seek(len / 3); blockOut.write(buffer, 0, 25); } finally { blockOut.close(); } } fileCount++; } } assertEquals(3, fileCount); /* Start the MiniDFSCluster with more datanodes since once a writeBlock * to a datanode node fails, same block can not be written to it * immediately. In our case some replication attempts will fail. */ LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs"); conf = new Configuration(); conf.set("dfs.replication", Integer.toString(numDataNodes)); conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2)); conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5)); conf.set("dfs.safemode.threshold.pct", "0.75f"); // only 3 copies exist cluster = new MiniDFSCluster(0, conf, numDataNodes * 2, false, true, null, null); cluster.waitActive(); dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf); waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1); } finally { if (cluster != null) { cluster.shutdown(); } } }
From source file:edu.harvard.i2b2.patientMapping.ui.PatientIDConversionJFrame.java
private void append(RandomAccessFile f, String outString) throws IOException { try {/* w ww . ja va 2 s . co m*/ f.seek(f.length()); f.writeBytes(outString); } catch (IOException e) { throw new IOException("trouble writing to random access file."); } return; }
From source file:com.microsoft.azure.management.datalake.store.uploader.SingleSegmentUploader.java
/** * Opens the input stream./*w w w. j a va 2 s .c o m*/ * @return A {@link RandomAccessFile} stream of the file being uploaded. * @throws IOException Thrown if the input stream cannot be opened due to file accessibility or existence. */ private RandomAccessFile openInputStream() throws IOException { RandomAccessFile stream = new RandomAccessFile(metadata.getInputFilePath(), "r"); if (segmentMetadata.getOffset() >= stream.length()) { throw new IllegalArgumentException("StartOffset is beyond the end of the input file"); } // always seek from the beginning of the file stream.seek(0); stream.seek(segmentMetadata.getOffset()); return stream; }