Example usage for java.io RandomAccessFile seek

List of usage examples for java.io RandomAccessFile seek

Introduction

In this page you can find the example usage for java.io RandomAccessFile seek.

Prototype

public void seek(long pos) throws IOException 

Source Link

Document

Sets the file-pointer offset, measured from the beginning of this file, at which the next read or write occurs.

Usage

From source file:phex.update.UpdateCheckRunner.java

private String getErrorLogFileTail() {
    try {/*from w  w  w.  j a  v  a  2 s  .  c o  m*/
        File logFile = Environment.getInstance().getPhexConfigFile("phex.error.log");
        if (!logFile.exists()) {
            return null;
        }
        RandomAccessFile raf = new RandomAccessFile(logFile, "r");
        long pos = Math.max(raf.length() - 10 * 1024, 0);
        raf.seek(pos);
        byte[] buffer = new byte[(int) Math.min(10 * 1024, raf.length())];
        int lenRead = raf.read(buffer);
        return new String(buffer, 0, lenRead);
    } catch (IOException exp) {
        NLogger.error(UpdateCheckRunner.class, exp, exp);
        return exp.toString();
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestFSEditLogLoader.java

@Test
public void testValidateEditLogWithCorruptHeader() throws IOException {
    File testDir = new File(TEST_DIR, "testValidateEditLogWithCorruptHeader");
    SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
    File logFile = prepareUnfinalizedTestEditLog(testDir, 2, offsetToTxId);
    RandomAccessFile rwf = new RandomAccessFile(logFile, "rw");
    try {//from   w  w  w  .  ja  v a  2 s  .com
        rwf.seek(0);
        rwf.writeLong(42); // corrupt header
    } finally {
        rwf.close();
    }
    EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile);
    assertTrue(validation.hasCorruptHeader());
}

From source file:org.apache.hadoop.io.TestIOUtils.java

@Test
public void testWriteFully() throws IOException {
    final int INPUT_BUFFER_LEN = 10000;
    final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
    byte[] input = new byte[INPUT_BUFFER_LEN];
    for (int i = 0; i < input.length; i++) {
        input[i] = (byte) (i & 0xff);
    }//from  w ww.  j  a  va 2 s.  co  m
    byte[] output = new byte[input.length];

    try {
        RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
        FileChannel fc = raf.getChannel();
        ByteBuffer buf = ByteBuffer.wrap(input);
        IOUtils.writeFully(fc, buf);
        raf.seek(0);
        raf.read(output);
        for (int i = 0; i < input.length; i++) {
            assertEquals(input[i], output[i]);
        }
        buf.rewind();
        IOUtils.writeFully(fc, buf, HALFWAY);
        for (int i = 0; i < HALFWAY; i++) {
            assertEquals(input[i], output[i]);
        }
        raf.seek(0);
        raf.read(output);
        for (int i = HALFWAY; i < input.length; i++) {
            assertEquals(input[i - HALFWAY], output[i]);
        }
    } finally {
        File f = new File(TEST_FILE_NAME);
        if (f.exists()) {
            f.delete();
        }
    }
}

From source file:org.apache.cassandra.db.VerifyTest.java

@Test
public void testVerifyCorruptRowCorrectDigest() throws IOException, WriteTimeoutException {
    CompactionManager.instance.disableAutoCompaction();
    Keyspace keyspace = Keyspace.open(KEYSPACE);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CORRUPT_CF2);

    fillCF(cfs, KEYSPACE, CORRUPT_CF2, 2);

    List<Row> rows = cfs.getRangeSlice(Util.range("", ""), null, new IdentityQueryFilter(), 1000);

    SSTableReader sstable = cfs.getSSTables().iterator().next();

    // overwrite one row with garbage
    long row0Start = sstable.getPosition(RowPosition.ForKey.get(ByteBufferUtil.bytes("0"), sstable.partitioner),
            SSTableReader.Operator.EQ).position;
    long row1Start = sstable.getPosition(RowPosition.ForKey.get(ByteBufferUtil.bytes("1"), sstable.partitioner),
            SSTableReader.Operator.EQ).position;
    long startPosition = row0Start < row1Start ? row0Start : row1Start;
    long endPosition = row0Start < row1Start ? row1Start : row0Start;

    RandomAccessFile file = new RandomAccessFile(sstable.getFilename(), "rw");
    file.seek(startPosition);
    file.writeBytes(StringUtils.repeat('z', (int) 2));
    file.close();/*from w  w  w  . j  ava2 s  .  c  o m*/

    // Update the Digest to have the right Checksum
    writeChecksum(simpleFullChecksum(sstable.getFilename()), sstable.descriptor.filenameFor(Component.DIGEST));

    Verifier verifier = new Verifier(cfs, sstable, false);

    // First a simple verify checking digest, which should succeed
    try {
        verifier.verify(false);
    } catch (CorruptSSTableException err) {
        fail("Simple verify should have succeeded as digest matched");
    }

    // Now try extended verify
    try {
        verifier.verify(true);

    } catch (CorruptSSTableException err) {
        return;
    }
    fail("Expected a CorruptSSTableException to be thrown");

}

From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java

@Test(expected = BadCheckpointException.class)
public void testCheckpointVersionNotEqualToMeta() throws Exception {
    RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
    try {/*  w  ww .  ja  v a 2s  . co  m*/
        EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
        backingStore.close();
        writer.seek(EventQueueBackingStoreFile.INDEX_VERSION * Serialization.SIZE_OF_LONG);
        writer.writeLong(2L);
        writer.getFD().sync();
        backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
    } finally {
        writer.close();
    }
}

From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java

@Test(expected = BadCheckpointException.class)
public void testCheckpointOrderIdNotEqualToMeta() throws Exception {
    RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
    try {//w  w  w .  jav  a2 s  .c o m
        EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
        backingStore.close();
        writer.seek(EventQueueBackingStoreFile.INDEX_WRITE_ORDER_ID * Serialization.SIZE_OF_LONG);
        writer.writeLong(2L);
        writer.getFD().sync();
        backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
    } finally {
        writer.close();
    }
}

From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java

@Test(expected = BadCheckpointException.class)
public void testCheckpointBadVersion() throws Exception {
    RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");
    try {/*from   w w  w . j a v  a  2s  .com*/
        EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
        backingStore.close();
        writer.seek(EventQueueBackingStoreFile.INDEX_VERSION * Serialization.SIZE_OF_LONG);
        writer.writeLong(94L);
        writer.getFD().sync();

        backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
    } finally {
        writer.close();
    }
}

From source file:org.apache.flume.channel.file.TestEventQueueBackingStoreFactory.java

@Test(expected = BadCheckpointException.class)
public void testIncompleteCheckpoint() throws Exception {
    RandomAccessFile writer = new RandomAccessFile(checkpoint, "rw");

    try {//from  w  w w  .  j a va2 s  .  c  om
        EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
        backingStore.close();
        writer.seek(EventQueueBackingStoreFile.INDEX_CHECKPOINT_MARKER * Serialization.SIZE_OF_LONG);
        writer.writeLong(EventQueueBackingStoreFile.CHECKPOINT_INCOMPLETE);
        writer.getFD().sync();
        backingStore = EventQueueBackingStoreFactory.get(checkpoint, 10, "test");
    } finally {
        writer.close();
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestFSEditLogLoader.java

@Test
public void testDisplayRecentEditLogOpCodes() throws IOException {
    // start a cluster 
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;//ww w . j  a  v a2 s.c o  m
    FileSystem fileSys = null;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
            .enableManagedDfsDirsRedundancy(false).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    FSImage fsimage = namesystem.getFSImage();
    for (int i = 0; i < 20; i++) {
        fileSys.mkdirs(new Path("/tmp/tmp" + i));
    }
    StorageDirectory sd = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
    cluster.shutdown();

    File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
    assertTrue("Should exist: " + editFile, editFile.exists());

    // Corrupt the edits file.
    long fileLen = editFile.length();
    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
    rwf.seek(fileLen - 40);
    for (int i = 0; i < 20; i++) {
        rwf.write(FSEditLogOpCodes.OP_DELETE.getOpCode());
    }
    rwf.close();

    StringBuilder bld = new StringBuilder();
    bld.append("^Error replaying edit log at offset \\d+.  ");
    bld.append("Expected transaction ID was \\d+\n");
    bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
                .enableManagedDfsDirsRedundancy(false).format(false).build();
        fail("should not be able to start");
    } catch (IOException e) {
        assertTrue("error message contains opcodes message", e.getMessage().matches(bld.toString()));
    }
}

From source file:org.apache.hadoop.dfs.TestReplication.java

public void testPendingReplicationRetry() throws IOException {

    MiniDFSCluster cluster = null;/*from  w  ww . j av a  2  s. c o  m*/
    int numDataNodes = 4;
    String testFile = "/replication-test-file";
    Path testPath = new Path(testFile);

    byte buffer[] = new byte[1024];
    for (int i = 0; i < buffer.length; i++) {
        buffer[i] = '1';
    }

    try {
        Configuration conf = new Configuration();
        conf.set("dfs.replication", Integer.toString(numDataNodes));
        //first time format
        cluster = new MiniDFSCluster(0, conf, numDataNodes, true, true, null, null);
        cluster.waitActive();
        DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()),
                conf);

        OutputStream out = cluster.getFileSystem().create(testPath);
        out.write(buffer);
        out.close();

        waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);

        // get first block of the file.
        String block = dfsClient.namenode.getBlockLocations(testFile, 0, Long.MAX_VALUE).get(0).getBlock()
                .getBlockName();

        cluster.shutdown();
        cluster = null;

        //Now mess up some of the replicas.
        //Delete the first and corrupt the next two.
        File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
        for (int i = 0; i < 25; i++) {
            buffer[i] = '0';
        }

        int fileCount = 0;
        for (int i = 0; i < 6; i++) {
            File blockFile = new File(baseDir, "data" + (i + 1) + "/current/" + block);
            LOG.info("Checking for file " + blockFile);

            if (blockFile.exists()) {
                if (fileCount == 0) {
                    LOG.info("Deleting file " + blockFile);
                    assertTrue(blockFile.delete());
                } else {
                    // corrupt it.
                    LOG.info("Corrupting file " + blockFile);
                    long len = blockFile.length();
                    assertTrue(len > 50);
                    RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw");
                    blockOut.seek(len / 3);
                    blockOut.write(buffer, 0, 25);
                }
                fileCount++;
            }
        }
        assertEquals(3, fileCount);

        /* Start the MiniDFSCluster with more datanodes since once a writeBlock
         * to a datanode node fails, same block can not be written to it
         * immediately. In our case some replication attempts will fail.
         */

        LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
        conf = new Configuration();
        conf.set("dfs.replication", Integer.toString(numDataNodes));
        conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
        conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5));
        conf.set("dfs.safemode.threshold.pct", "0.75f"); // only 3 copies exist

        cluster = new MiniDFSCluster(0, conf, numDataNodes * 2, false, true, null, null);
        cluster.waitActive();

        dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);

        waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);

    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}