List of usage examples for java.io RandomAccessFile getFD
public final FileDescriptor getFD() throws IOException
From source file:org.apache.hadoop.mapred.FadvisedChunkedFile.java
public FadvisedChunkedFile(RandomAccessFile file, long position, long count, int chunkSize, boolean manageOsCache, int readaheadLength, ReadaheadPool readaheadPool, String identifier) throws IOException { super(file, position, count, chunkSize); this.manageOsCache = manageOsCache; this.readaheadLength = readaheadLength; this.readaheadPool = readaheadPool; this.fd = file.getFD(); this.identifier = identifier; }
From source file:org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream.java
/** * Creates output buffers and file object. * /* w w w.j a v a2 s . co m*/ * @param conf * Configuration object * @param name * File name to store edit log * @param size * Size of flush buffer * @throws IOException */ public EditLogFileOutputStream(Configuration conf, File name, int size) throws IOException { super(); shouldSyncWritesAndSkipFsync = conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT); file = name; doubleBuf = new EditsDoubleBuffer(size); RandomAccessFile rp; if (shouldSyncWritesAndSkipFsync) { rp = new RandomAccessFile(name, "rws"); } else { rp = new RandomAccessFile(name, "rw"); } fp = new FileOutputStream(rp.getFD()); // open for append fc = rp.getChannel(); fc.position(fc.size()); }
From source file:fr.gael.dhus.server.ftp.DHuSFtpProduct.java
@Override public InputStream createInputStream(long offset) throws IOException { File file = new File(product.getDownloadablePath()); logger.debug("Retrieving File stream from " + file.getPath()); /*//from www . ja va 2 s .c om return new FileInputStream(file); */ // permission check if (!doesExist()) { throw new IOException("No read permission : " + file.getName()); } // move to the appropriate offset and create input stream final RandomAccessFile raf = new RandomAccessFile(file, "r"); try { raf.seek(offset); // The IBM jre needs to have both the stream and the random access file // objects closed to actually close the file return new RegulatedInputStream.Builder(new FileInputStream(raf.getFD()) { public void close() throws IOException { super.close(); raf.close(); } }, TrafficDirection.OUTBOUND).userName(user.getName()) .copyStreamListener(new DownloadActionRecordListener(product.getUuid(), product.getIdentifier(), vfsService.getDhusUserFromFtpUser(user))) .build(); } catch (IOException e) { raf.close(); throw e; } }
From source file:org.apache.hadoop.mapred.FadvisedFileRegion.java
public FadvisedFileRegion(RandomAccessFile file, long position, long count, boolean manageOsCache, int readaheadLength, ReadaheadPool readaheadPool, String identifier, int shuffleBufferSize, boolean shuffleTransferToAllowed) throws IOException { super(file.getChannel(), position, count); this.manageOsCache = manageOsCache; this.readaheadLength = readaheadLength; this.readaheadPool = readaheadPool; this.fd = file.getFD(); this.identifier = identifier; this.fileChannel = file.getChannel(); this.count = count; this.position = position; this.shuffleBufferSize = shuffleBufferSize; this.shuffleTransferToAllowed = shuffleTransferToAllowed; }
From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.LsrPBImage.java
public void visit(RandomAccessFile file) throws IOException { if (!FSImageUtil.checkFileFormat(file)) { throw new IOException("Unrecognized FSImage"); }/*from w ww . jav a 2 s.co m*/ FileSummary summary = FSImageUtil.loadSummary(file); FileInputStream fin = null; try { fin = new FileInputStream(file.getFD()); ArrayList<FileSummary.Section> sections = Lists.newArrayList(summary.getSectionsList()); Collections.sort(sections, new Comparator<FileSummary.Section>() { @Override public int compare(FileSummary.Section s1, FileSummary.Section s2) { SectionName n1 = SectionName.fromString(s1.getName()); SectionName n2 = SectionName.fromString(s2.getName()); if (n1 == null) { return n2 == null ? 0 : -1; } else if (n2 == null) { return -1; } else { return n1.ordinal() - n2.ordinal(); } } }); for (FileSummary.Section s : sections) { fin.getChannel().position(s.getOffset()); InputStream is = FSImageUtil.wrapInputStreamForCompression(conf, summary.getCodec(), new BufferedInputStream(new LimitInputStream(fin, s.getLength()))); switch (SectionName.fromString(s.getName())) { case STRING_TABLE: loadStringTable(is); break; case INODE: loadINodeSection(is); break; case INODE_REFERENCE: loadINodeReferenceSection(is); break; case INODE_DIR: loadINodeDirectorySection(is); break; default: break; } } list("", INodeId.ROOT_INODE_ID); } finally { IOUtils.cleanup(null, fin); } }
From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java
@Test(expected = BadCheckpointException.class) public void testCheckpointVersionNotEqualToMeta() throws Exception { RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw"); try {//from www.ja v a 2s . co m EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test"); backingStore.close(); writer.seek(EventQueueBackingStoreFile.INDEX_VERSION * Serialization.SIZE_OF_LONG); writer.writeLong(2L); writer.getFD().sync(); backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test"); } finally { writer.close(); } }
From source file:org.wso2.carbon.apimgt.everywhere.startup.publisher.APIManagerStartupPublisher.java
private InputStream getImageInputStream(String imageAbsolutePath) throws IOException { RandomAccessFile file1 = new RandomAccessFile(imageAbsolutePath, "r"); return new FileInputStream(file1.getFD()); }
From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java
@Test(expected = BadCheckpointException.class) public void testCheckpointBadVersion() throws Exception { RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw"); try {/* w ww . j a va2 s .c o m*/ EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test"); backingStore.close(); writer.seek(EventQueueBackingStoreFile.INDEX_VERSION * Serialization.SIZE_OF_LONG); writer.writeLong(94L); writer.getFD().sync(); backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test"); } finally { writer.close(); } }
From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java
@Test(expected = BadCheckpointException.class) public void testCheckpointOrderIdNotEqualToMeta() throws Exception { RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw"); try {/*from w w w. ja va2s .c om*/ EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test"); backingStore.close(); writer.seek(EventQueueBackingStoreFile.INDEX_WRITE_ORDER_ID * Serialization.SIZE_OF_LONG); writer.writeLong(2L); writer.getFD().sync(); backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test"); } finally { writer.close(); } }
From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java
@Test(expected = BadCheckpointException.class) public void testIncompleteCheckpoint() throws Exception { RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw"); try {/*from w w w . jav a2 s . c o m*/ EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test"); backingStore.close(); writer.seek(EventQueueBackingStoreFile.INDEX_CHECKPOINT_MARKER * Serialization.SIZE_OF_LONG); writer.writeLong(EventQueueBackingStoreFile.CHECKPOINT_INCOMPLETE); writer.getFD().sync(); backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test"); } finally { writer.close(); } }