Example usage for java.io RandomAccessFile getFD

List of usage examples for java.io RandomAccessFile getFD

Introduction

In this page you can find the example usage for java.io RandomAccessFile getFD.

Prototype

public final FileDescriptor getFD() throws IOException 

Source Link

Document

Returns the opaque file descriptor object associated with this stream.

Usage

From source file:org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.java

public void visit(RandomAccessFile file) throws IOException {
    if (!FSImageUtil.checkFileFormat(file)) {
        throw new IOException("Unrecognized FSImage");
    }/*from   w  w w .  j a  v  a 2 s  . c o m*/

    FileSummary summary = FSImageUtil.loadSummary(file);
    try (FileInputStream fin = new FileInputStream(file.getFD())) {
        out.print("<?xml version=\"1.0\"?>\n<fsimage>");

        out.print("<version>");
        o("layoutVersion", summary.getLayoutVersion());
        o("onDiskVersion", summary.getOndiskVersion());
        // Output the version of OIV (which is not necessarily the version of
        // the fsimage file).  This could be helpful in the case where a bug
        // in OIV leads to information loss in the XML-- we can quickly tell
        // if a specific fsimage XML file is affected by this bug.
        o("oivRevision", VersionInfo.getRevision());
        out.print("</version>\n");

        ArrayList<FileSummary.Section> sections = Lists.newArrayList(summary.getSectionsList());
        Collections.sort(sections, new Comparator<FileSummary.Section>() {
            @Override
            public int compare(FileSummary.Section s1, FileSummary.Section s2) {
                SectionName n1 = SectionName.fromString(s1.getName());
                SectionName n2 = SectionName.fromString(s2.getName());
                if (n1 == null) {
                    return n2 == null ? 0 : -1;
                } else if (n2 == null) {
                    return -1;
                } else {
                    return n1.ordinal() - n2.ordinal();
                }
            }
        });

        for (FileSummary.Section s : sections) {
            fin.getChannel().position(s.getOffset());
            InputStream is = FSImageUtil.wrapInputStreamForCompression(conf, summary.getCodec(),
                    new BufferedInputStream(new LimitInputStream(fin, s.getLength())));

            switch (SectionName.fromString(s.getName())) {
            case NS_INFO:
                dumpNameSection(is);
                break;
            case STRING_TABLE:
                loadStringTable(is);
                break;
            case INODE:
                dumpINodeSection(is);
                break;
            case INODE_REFERENCE:
                dumpINodeReferenceSection(is);
                break;
            case INODE_DIR:
                dumpINodeDirectorySection(is);
                break;
            case FILES_UNDERCONSTRUCTION:
                dumpFileUnderConstructionSection(is);
                break;
            case SNAPSHOT:
                dumpSnapshotSection(is);
                break;
            case SNAPSHOT_DIFF:
                dumpSnapshotDiffSection(is);
                break;
            case SECRET_MANAGER:
                dumpSecretManagerSection(is);
                break;
            case CACHE_MANAGER:
                dumpCacheManagerSection(is);
                break;
            default:
                break;
            }
        }
        out.print("</fsimage>\n");
    }
}

From source file:org.apache.flume.channel.file.TestFileChannelRestart.java

@Test
public void testCorruptCheckpointVersionMostSignificant4Bytes() throws Exception {
    Map<String, String> overrides = Maps.newHashMap();
    channel = createFileChannel(overrides);
    channel.start();//  w w w . ja v a 2  s .c  o m
    Assert.assertTrue(channel.isOpen());
    Set<String> in = putEvents(channel, "restart", 10, 100);
    Assert.assertEquals(100, in.size());
    forceCheckpoint(channel);
    channel.stop();
    File checkpoint = new File(checkpointDir, "checkpoint");
    RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
    writer.seek(EventQueueBackingStoreFile.INDEX_VERSION * Serialization.SIZE_OF_LONG);
    writer.write(new byte[] { (byte) 1, (byte) 5 });
    writer.getFD().sync();
    writer.close();
    channel = createFileChannel(overrides);
    channel.start();
    Assert.assertTrue(channel.isOpen());
    Set<String> out = consumeChannel(channel);
    Assert.assertTrue(channel.didFullReplayDueToBadCheckpointException());
    compareInputAndOut(in, out);
}

From source file:org.apache.flume.channel.file.TestFileChannelRestart.java

@Test
public void testCorruptCheckpointCompleteMarkerMostSignificant4Bytes() throws Exception {
    Map<String, String> overrides = Maps.newHashMap();
    channel = createFileChannel(overrides);
    channel.start();//from   w w  w . j  ava 2  s .c  om
    Assert.assertTrue(channel.isOpen());
    Set<String> in = putEvents(channel, "restart", 10, 100);
    Assert.assertEquals(100, in.size());
    forceCheckpoint(channel);
    channel.stop();
    File checkpoint = new File(checkpointDir, "checkpoint");
    RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
    writer.seek(EventQueueBackingStoreFile.INDEX_CHECKPOINT_MARKER * Serialization.SIZE_OF_LONG);
    writer.write(new byte[] { (byte) 1, (byte) 5 });
    writer.getFD().sync();
    writer.close();
    channel = createFileChannel(overrides);
    channel.start();
    Assert.assertTrue(channel.isOpen());
    Set<String> out = consumeChannel(channel);
    Assert.assertTrue(channel.didFullReplayDueToBadCheckpointException());
    compareInputAndOut(in, out);
}

From source file:com.thoughtworks.go.config.GoConfigDataSource.java

public synchronized GoConfigSaveResult writeWithLock(UpdateConfigCommand updatingCommand,
        GoConfigHolder configHolder) {/*  ww  w . j ava  2  s.c om*/
    FileChannel channel = null;
    FileOutputStream outputStream = null;
    FileLock lock = null;
    try {
        RandomAccessFile randomAccessFile = new RandomAccessFile(fileLocation(), "rw");
        channel = randomAccessFile.getChannel();
        lock = channel.lock();

        // Need to convert to xml before we try to write it to the config file.
        // If our cruiseConfig fails XSD validation, we don't want to write it incorrectly.
        String configAsXml = getModifiedConfig(updatingCommand, configHolder);

        randomAccessFile.seek(0);
        randomAccessFile.setLength(0);
        outputStream = new FileOutputStream(randomAccessFile.getFD());
        LOGGER.info(String.format("[Configuration Changed] Saving updated configuration."));
        IOUtils.write(configAsXml, outputStream);
        ConfigSaveState configSaveState = shouldMergeConfig(updatingCommand, configHolder)
                ? ConfigSaveState.MERGED
                : ConfigSaveState.UPDATED;
        return new GoConfigSaveResult(internalLoad(configAsXml, getConfigUpdatingUser(updatingCommand)),
                configSaveState);
    } catch (ConfigFileHasChangedException e) {
        LOGGER.warn("Configuration file could not be merged successfully after a concurrent edit: "
                + e.getMessage(), e);
        throw e;
    } catch (GoConfigInvalidException e) {
        LOGGER.warn("Configuration file is invalid: " + e.getMessage(), e);
        throw bomb(e.getMessage(), e);
    } catch (Exception e) {
        LOGGER.error("Configuration file is not valid: " + e.getMessage(), e);
        throw bomb(e.getMessage(), e);
    } finally {
        if (channel != null && lock != null) {
            try {
                lock.release();
                channel.close();
                IOUtils.closeQuietly(outputStream);
            } catch (IOException e) {
                LOGGER.error("Error occured when releasing file lock and closing file.", e);
            }
        }
        LOGGER.debug("[Config Save] Done writing with lock");
    }
}

From source file:org.apache.flume.channel.file.TestFileChannelRestart.java

private void doTestIncompleteCheckpoint(boolean backup) throws Exception {
    Map<String, String> overrides = Maps.newHashMap();
    overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
    channel = createFileChannel(overrides);
    channel.start();/*w  w w  .ja  v  a 2 s .c  om*/
    Assert.assertTrue(channel.isOpen());
    Set<String> in = putEvents(channel, "restart", 10, 100);
    Assert.assertEquals(100, in.size());
    forceCheckpoint(channel);
    if (backup) {
        Thread.sleep(2000);
    }
    channel.stop();
    File checkpoint = new File(checkpointDir, "checkpoint");
    RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
    writer.seek(EventQueueBackingStoreFile.INDEX_CHECKPOINT_MARKER * Serialization.SIZE_OF_LONG);
    writer.writeLong(EventQueueBackingStoreFile.CHECKPOINT_INCOMPLETE);
    writer.getFD().sync();
    writer.close();
    channel = createFileChannel(overrides);
    channel.start();
    Assert.assertTrue(channel.isOpen());
    Assert.assertTrue(!backup || channel.checkpointBackupRestored());
    Set<String> out = consumeChannel(channel);
    compareInputAndOut(in, out);
}

From source file:org.apache.hadoop.dfs.StorageInfo.java

protected void writeCorruptedData(RandomAccessFile file) throws IOException {
    final String messageForPreUpgradeVersion = "\nThis file is INTENTIONALLY CORRUPTED so that versions\n"
            + "of Hadoop prior to 0.13 (which are incompatible\n"
            + "with this directory layout) will fail to start.\n";

    file.seek(0);//w  w w .  j a  v  a 2 s.  co  m
    file.writeInt(FSConstants.LAYOUT_VERSION);
    org.apache.hadoop.io.UTF8.writeString(file, "");
    file.writeBytes(messageForPreUpgradeVersion);
    file.getFD().sync();
}

From source file:org.apache.jxtadoop.hdfs.server.common.Storage.java

protected void writeCorruptedData(RandomAccessFile file) throws IOException {
    final String messageForPreUpgradeVersion = "\nThis file is INTENTIONALLY CORRUPTED so that versions\n"
            + "of Hadoop prior to 0.13 (which are incompatible\n"
            + "with this directory layout) will fail to start.\n";

    file.seek(0);/*from  w  w w .  j  a  v a 2  s  . c  om*/
    file.writeInt(FSConstants.LAYOUT_VERSION);
    org.apache.jxtadoop.io.UTF8.writeString(file, "");
    file.writeBytes(messageForPreUpgradeVersion);
    file.getFD().sync();
}

From source file:hotbeans.support.FileSystemHotBeanModuleRepository.java

/**
 * Obtains a file lock on the repository lock file.
 */// ww w. ja v  a 2s .c o m
protected RepositoryFileLock obtainRepositoryFileLock(final boolean shared, final int timeout)
        throws IOException {
    Log logger = this.getLog();

    if (logger.isDebugEnabled())
        logger.debug("Obtaining repository file lock (shared: " + shared + ").");

    RepositoryFileLock repositoryFileLock = null;
    FileLock lock = null;
    final long beginWait = System.currentTimeMillis();

    while (repositoryFileLock == null) {
        try {
            RandomAccessFile lockFile = new RandomAccessFile(
                    new File(moduleRepositoryDirectory, LOCK_FILE_NAME), "rws");
            FileChannel channel = lockFile.getChannel();

            // Attempt to obtain a lock on the file
            lock = channel.tryLock(0L, Long.MAX_VALUE, shared);
            if (!shared && (lockFile.length() == 0)) {
                lockFile.write(new String("LOCK").getBytes());
                lockFile.getFD().sync();
            }
            repositoryFileLock = new RepositoryFileLock(lockFile, lock);
        } catch (IOException ioe) {
            if (logger.isDebugEnabled())
                logger.debug("Error obtaining repository file lock (shared: " + shared + ").", ioe);
            if (timeout < 0)
                throw ioe;
        } catch (OverlappingFileLockException ofle) {
            if (logger.isDebugEnabled())
                logger.debug("Error obtaining repository file lock (shared: " + shared + ").", ofle);
            if (timeout < 0)
                throw ofle;
        }

        if (repositoryFileLock == null) // This statement shouldn't be reaced if timeout is < 0
        {
            if ((System.currentTimeMillis() - beginWait) > timeout) // Wait a maximum of timeout milliseconds on lock
            {
                throw new IOException("Timeout while waiting for file lock on repository lock file!");
            } else {
                // Otherwise - wait a while before trying to obtain a lock again
                try {
                    Thread.sleep(Math.min(250, timeout - (System.currentTimeMillis() - beginWait)));
                } catch (InterruptedException ie) {
                }
            }
        }
    }

    if (logger.isDebugEnabled())
        logger.debug("Repository file lock (shared: " + shared + ") obtained.");

    return repositoryFileLock;
}

From source file:com.koda.integ.hbase.storage.FileExtStorage.java

/**
 * Open file./*from w w  w . ja v  a  2  s  . c om*/
 *
 * @param id the id
 * @param mode the mode
 * @return the random access file
 */
private RandomAccessFile openFile(int id, String mode) {
    String path = getFilePath(id);
    RandomAccessFile file = null;
    try {
        file = new RandomAccessFile(path, mode);
        if (noPageCache == true) {
            int fd = CLibrary.getfd(file.getFD());
            CLib.trySkipCache(fd, 0, 0);
        }
    } catch (FileNotFoundException e) {

    } catch (IOException e) {
        LOG.error(e.getMessage(), e);
    }
    return file;
}

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.java

private static FileInputStream openAndSeek(File file, long offset) throws IOException {
    RandomAccessFile raf = null;
    try {/*from  w  w w .j  a  va2s.  c  om*/
        raf = new RandomAccessFile(file, "r");
        if (offset > 0) {
            raf.seek(offset);
        }
        return new FileInputStream(raf.getFD());
    } catch (IOException ioe) {
        IOUtils.cleanup(null, raf);
        throw ioe;
    }
}