Example usage for java.io RandomAccessFile seek

List of usage examples for java.io RandomAccessFile seek

Introduction

In this page you can find the example usage for java.io RandomAccessFile seek.

Prototype

public void seek(long pos) throws IOException 

Source Link

Document

Sets the file-pointer offset, measured from the beginning of this file, at which the next read or write occurs.

Usage

From source file:org.apache.flume.channel.file.TestFileChannelRestart.java

private void doTestCorruptCheckpointMeta(boolean backup) throws Exception {
    Map<String, String> overrides = Maps.newHashMap();
    overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup));
    channel = createFileChannel(overrides);
    channel.start();/* w w w .  ja  v a2s  .  co  m*/
    Assert.assertTrue(channel.isOpen());
    Set<String> in = putEvents(channel, "restart", 10, 100);
    Assert.assertEquals(100, in.size());
    forceCheckpoint(channel);
    if (backup) {
        Thread.sleep(2000);
    }
    channel.stop();
    File checkpoint = new File(checkpointDir, "checkpoint");
    RandomAccessFile writer = new RandomAccessFile(Serialization.getMetaDataFile(checkpoint), "rw");
    writer.seek(10);
    writer.writeLong(new Random().nextLong());
    writer.getFD().sync();
    writer.close();
    channel = createFileChannel(overrides);
    channel.start();
    Assert.assertTrue(channel.isOpen());
    Assert.assertTrue(!backup || channel.checkpointBackupRestored());
    Set<String> out = consumeChannel(channel);
    compareInputAndOut(in, out);
}

From source file:org.apache.hadoop.hive.ql.io.TestRCFile.java

@Test
public void testReadCorruptFile() throws IOException, SerDeException {
    cleanup();/* w  w w. ja  va2  s  .  c  o m*/

    byte[][] record = { null, null, null, null, null, null, null, null };

    RCFileOutputFormat.setColumnNumber(conf, expectedFieldsData.length);
    RCFile.Writer writer = new RCFile.Writer(fs, conf, file, null, new DefaultCodec());
    BytesRefArrayWritable bytes = new BytesRefArrayWritable(record.length);
    final int recCount = 100;
    Random rand = new Random();
    for (int recIdx = 0; recIdx < recCount; recIdx++) {
        for (int i = 0; i < record.length; i++) {
            record[i] = new Integer(rand.nextInt()).toString().getBytes("UTF-8");
        }
        for (int i = 0; i < record.length; i++) {
            BytesRefWritable cu = new BytesRefWritable(record[i], 0, record[i].length);
            bytes.set(i, cu);
        }
        writer.append(bytes);
        bytes.clear();
    }
    writer.close();

    // Insert junk in middle of file. Assumes file is on local disk.
    RandomAccessFile raf = new RandomAccessFile(file.toUri().getPath(), "rw");
    long corruptOffset = raf.length() / 2;
    LOG.info("corrupting " + raf + " at offset " + corruptOffset);
    raf.seek(corruptOffset);
    raf.writeBytes("junkjunkjunkjunkjunkjunkjunkjunk");
    raf.close();

    // Set the option for tolerating corruptions. The read should succeed.
    Configuration tmpConf = new Configuration(conf);
    tmpConf.setBoolean("hive.io.rcfile.tolerate.corruptions", true);
    RCFile.Reader reader = new RCFile.Reader(fs, file, tmpConf);

    LongWritable rowID = new LongWritable();

    while (true) {
        boolean more = reader.next(rowID);
        if (!more) {
            break;
        }
        BytesRefArrayWritable cols = new BytesRefArrayWritable();
        reader.getCurrentRow(cols);
        cols.resetValid(8);
    }

    reader.close();
}

From source file:org.apache.flume.channel.file.TestFileChannelRestart.java

private void testFastReplay(boolean shouldCorruptCheckpoint, boolean useFastReplay) throws Exception {
    Map<String, String> overrides = Maps.newHashMap();
    overrides.put(FileChannelConfiguration.USE_FAST_REPLAY, String.valueOf(useFastReplay));
    channel = createFileChannel(overrides);
    channel.start();/*w  w w  .j a v a  2 s .  c o  m*/
    Assert.assertTrue(channel.isOpen());
    Set<String> in = putEvents(channel, "restart", 10, 100);
    Assert.assertEquals(100, in.size());
    forceCheckpoint(channel);
    channel.stop();
    if (shouldCorruptCheckpoint) {
        File checkpoint = new File(checkpointDir, "checkpoint");
        RandomAccessFile writer = new RandomAccessFile(Serialization.getMetaDataFile(checkpoint), "rw");
        writer.seek(10);
        writer.writeLong(new Random().nextLong());
        writer.getFD().sync();
        writer.close();
    }
    channel = createFileChannel(overrides);
    channel.start();
    Assert.assertTrue(channel.isOpen());
    Set<String> out = consumeChannel(channel);
    if (useFastReplay && shouldCorruptCheckpoint) {
        Assert.assertTrue(channel.didFastReplay());
    } else {
        Assert.assertFalse(channel.didFastReplay());
    }
    compareInputAndOut(in, out);
}

From source file:org.mhisoft.wallet.service.AttachmentService.java

private byte[] _readFileContent(boolean decompress, final int storeVersion, String fileStoreDataFile,
        FileAccessEntry entry, PBEEncryptor encryptor) {

    try {// w ww.ja v a 2 s  .c om
        RandomAccessFile fileStore = new RandomAccessFile(fileStoreDataFile, "rw");
        fileStore.seek(entry.getPosOfContent());
        byte[] _encedBytes = new byte[entry.getEncSize()];
        fileStore.readFully(_encedBytes);
        byte[] fileContent = encryptor.decrypt(_encedBytes, entry.getAlgorithmParameters());

        //after decrypt, deflate the compressed data
        if (decompress && storeVersion >= WalletModel.LATEST_DATA_VERSION) {
            GZIPInputStream decompressedStream = new GZIPInputStream(new ByteArrayInputStream(fileContent));
            fileContent = IOUtils.toByteArray(decompressedStream);
        }

        entry.setFileContent(fileContent);
        entry.setSize(fileContent.length); //decrypted size.

        fileStore.close();
        return fileContent;

    } catch (IOException e) {
        e.printStackTrace();
        DialogUtils.getInstance().error("Can't read " + fileStoreDataFile + ":" + e.toString());
    }
    return null;
}

From source file:org.apache.james.mailrepository.file.MBoxMailRepository.java

/**
 * @see org.apache.james.mailrepository.api.MailRepository#store(Mail)
 *//*from w  w w .  j  a  v  a  2 s .  c  om*/
public void store(Mail mc) {

    if ((getLogger().isDebugEnabled())) {
        String logBuffer = this.getClass().getName() + " Will store message to file " + mboxFile;

        getLogger().debug(logBuffer);
    }
    this.mList = null;
    // Now make up the from header
    String fromHeader = null;
    String message = null;
    try {
        message = getRawMessage(mc.getMessage());
        // check for nullsender
        if (mc.getMessage().getFrom() == null) {
            fromHeader = "From   " + dy.format(Calendar.getInstance().getTime());
        } else {
            fromHeader = "From " + mc.getMessage().getFrom()[0] + " "
                    + dy.format(Calendar.getInstance().getTime());
        }

    } catch (IOException e) {
        getLogger().error("Unable to parse mime message for " + mboxFile, e);
    } catch (MessagingException e) {
        getLogger().error("Unable to parse mime message for " + mboxFile, e);
    }
    // And save only the new stuff to disk
    RandomAccessFile saveFile;
    try {
        saveFile = new RandomAccessFile(mboxFile, "rw");
        saveFile.seek(saveFile.length()); // Move to the end
        saveFile.writeBytes((fromHeader + "\n"));
        saveFile.writeBytes((message + "\n"));
        saveFile.close();

    } catch (FileNotFoundException e) {
        getLogger().error("Unable to save(open) file (File not found) " + mboxFile, e);
    } catch (IOException e) {
        getLogger().error("Unable to write file (General I/O problem) " + mboxFile, e);
    }
}

From source file:com.thoughtworks.go.config.GoConfigDataSource.java

public synchronized GoConfigSaveResult writeWithLock(UpdateConfigCommand updatingCommand,
        GoConfigHolder configHolder) {//from  ww w.  j  a  v a 2  s.c o  m
    FileChannel channel = null;
    FileOutputStream outputStream = null;
    FileLock lock = null;
    try {
        RandomAccessFile randomAccessFile = new RandomAccessFile(fileLocation(), "rw");
        channel = randomAccessFile.getChannel();
        lock = channel.lock();

        // Need to convert to xml before we try to write it to the config file.
        // If our cruiseConfig fails XSD validation, we don't want to write it incorrectly.
        String configAsXml = getModifiedConfig(updatingCommand, configHolder);

        randomAccessFile.seek(0);
        randomAccessFile.setLength(0);
        outputStream = new FileOutputStream(randomAccessFile.getFD());
        LOGGER.info(String.format("[Configuration Changed] Saving updated configuration."));
        IOUtils.write(configAsXml, outputStream);
        ConfigSaveState configSaveState = shouldMergeConfig(updatingCommand, configHolder)
                ? ConfigSaveState.MERGED
                : ConfigSaveState.UPDATED;
        return new GoConfigSaveResult(internalLoad(configAsXml, getConfigUpdatingUser(updatingCommand)),
                configSaveState);
    } catch (ConfigFileHasChangedException e) {
        LOGGER.warn("Configuration file could not be merged successfully after a concurrent edit: "
                + e.getMessage(), e);
        throw e;
    } catch (GoConfigInvalidException e) {
        LOGGER.warn("Configuration file is invalid: " + e.getMessage(), e);
        throw bomb(e.getMessage(), e);
    } catch (Exception e) {
        LOGGER.error("Configuration file is not valid: " + e.getMessage(), e);
        throw bomb(e.getMessage(), e);
    } finally {
        if (channel != null && lock != null) {
            try {
                lock.release();
                channel.close();
                IOUtils.closeQuietly(outputStream);
            } catch (IOException e) {
                LOGGER.error("Error occured when releasing file lock and closing file.", e);
            }
        }
        LOGGER.debug("[Config Save] Done writing with lock");
    }
}

From source file:org.hrva.capture.LogTail.java

/**
 * Tail the given file if the size has changed and return a temp filename.
 *
 * <p>This returns a temp filename if the log being tailed has changed.
 * </p>/*from   ww w.ja va2 s  .  c o m*/
 * 
 * <p>The supplied target filename is -- actually -- a format string.
 * The available value, <<tt>{0}</tt> is the sequence number
 * that's saved in the history cache.</p>
 *
 * @param source The log filename to tail
 * @param target A temporary filename into which to save the tail piece.
 * @return temp filename, if the file size changed; otherwise null
 * @throws FileNotFoundException
 * @throws IOException
 */
public String tail(String source, String target) throws FileNotFoundException, IOException {
    // The resulting file name (or null if the log did not grow).
    String temp_name = null;

    // Open our last-time-we-looked file.
    String cache_file_name = global.getProperty("logtail.tail_status_filename", "logtail.history");
    String limit_str = global.getProperty("logtail.file_size_limit", "1m"); // 1 * 1024 * 1024;
    int limit;
    if (limit_str.endsWith("m") || limit_str.endsWith("M")) {
        limit = 1024 * 1024 * Integer.parseInt(limit_str.substring(0, limit_str.length() - 1));
    } else if (limit_str.endsWith("k") || limit_str.endsWith("K")) {
        limit = 1024 * Integer.parseInt(limit_str.substring(0, limit_str.length() - 1));
    } else {
        limit = Integer.parseInt(limit_str);
    }

    Properties state = get_state(cache_file_name);

    // Find the previous size and sequence number
    String prev_size_str = state.getProperty("size." + source, "0");
    long prev_size = Long.parseLong(prev_size_str);
    String seq_str = state.getProperty("seq." + source, "0");
    long sequence = Long.parseLong(seq_str);

    Object[] details = { source, target, seq_str, prev_size_str };
    logger.info(MessageFormat.format("Tailing {0} to {1}", details));
    logger.info(MessageFormat.format("Count {2}, Bytes {3}", details));
    sequence += 1;

    // Attempt to seek to the previous position
    long position = 0;
    File log_to_tail = new File(source);
    RandomAccessFile rdr = new RandomAccessFile(log_to_tail, "r");
    try {
        long current_size = rdr.length();
        if (current_size == prev_size) {
            // Same size.  Nothing more to do here.
            position = current_size;
        } else {
            // Changed size.  Either grew or was truncated.
            if (rdr.length() < prev_size) {
                // Got truncated.  Read from beginning.
                sequence = 0;
                prev_size = 0;
            } else {
                // Got bigger.  Read from where we left off.
                rdr.seek(prev_size);
            }
            // Read to EOF or the limit.  
            // No reason to get greedy.
            int read_size;
            if (current_size - prev_size > limit) {
                read_size = limit;
                rdr.seek(current_size - limit);
            } else {
                read_size = (int) (current_size - prev_size);
            }
            byte[] buffer = new byte[read_size];
            rdr.read(buffer);
            position = rdr.getFilePointer();

            // Write temp file
            Object[] args = { sequence };
            temp_name = MessageFormat.format(target, args);

            File extract = new File(temp_name);
            OutputStream wtr = new FileOutputStream(extract);
            wtr.write(buffer);
        }
    } finally {
        rdr.close();
    }

    // Update our private last-time-we-looked file.
    state.setProperty("size." + source, String.valueOf(position));
    state.setProperty("seq." + source, String.valueOf(sequence));
    save_state(cache_file_name, state);

    Object[] details2 = { source, target, seq_str, prev_size_str, String.valueOf(sequence),
            String.valueOf(position) };
    logger.info(MessageFormat.format("Count {4}, Bytes {5}", details2));

    return temp_name;
}

From source file:com.yifanlu.PSXperiaTool.PSXperiaTool.java

private void patchGame() throws IOException {
    /*/*from w ww .  ja va  2 s.co  m*/
     * Custom patch format (config/game-patch.bin) is as follows:
     * 0x8 byte little endian: Address in game image to start patching
     * 0x8 byte little endian: Length of patch
     * If there are more patches, repeat after reading the length of patch
     * Note that all games will be patched the same way, so if a game is broken before patching, it will still be broken!
     */
    nextStep("Patching game.");
    File gamePatch = new File(mTempDir, "/config/game-patch.bin");
    if (!gamePatch.exists())
        return;
    Logger.info("Making a copy of game.");
    File tempGame = new File(mTempDir, "game.iso");
    FileUtils.copyFile(mInputFile, tempGame);
    RandomAccessFile game = new RandomAccessFile(tempGame, "rw");
    InputStream patch = new FileInputStream(gamePatch);
    while (true) {
        byte[] rawPatchAddr = new byte[8];
        byte[] rawPatchLen = new byte[8];
        if (patch.read(rawPatchAddr) + patch.read(rawPatchLen) < rawPatchAddr.length + rawPatchLen.length)
            break;
        ByteBuffer bb = ByteBuffer.wrap(rawPatchAddr);
        bb.order(ByteOrder.LITTLE_ENDIAN);
        long patchAddr = bb.getLong();
        bb = ByteBuffer.wrap(rawPatchLen);
        bb.order(ByteOrder.LITTLE_ENDIAN);
        long patchLen = bb.getLong();

        game.seek(patchAddr);
        while (patchLen-- > 0) {
            game.write(patch.read());
        }
    }
    mInputFile = tempGame;
    game.close();
    patch.close();
    Logger.debug("Done patching game.");
}

From source file:com.stimulus.archiva.domain.Volume.java

protected void writeVolumeInfoLines(RandomAccessFile out) {
    try {//from www. j a  v  a 2  s .  c  o m
        logger.debug("writeVolumeInfoLines()");
        out.setLength(0);
        out.seek(0);
        // don't save to ejected volume
        if (isEjected())
            return;

        // make a new volume unused
        if (getStatus() == Volume.Status.NEW)
            setStatus(Volume.Status.UNUSED);
        out.seek(0); //Seek to end of file
        out.writeBytes("# Archiva " + Config.getConfig().getApplicationVersion() + " Volume Information\n");
        out.writeBytes("# note: this file is crucial - do not delete it!\n");
        out.writeBytes("version:3\n");
        if (getID() != null || getID().length() > 0) {
            out.writeBytes("id:" + getID() + "\n");
        }
        if (getStatus() != null) {
            out.writeBytes("status:" + getStatus() + "\n");
        }
        if (getCreatedDate() != null)
            out.writeBytes("created:" + DateUtil.convertDatetoString(getCreatedDate()) + "\n");
        if (getClosedDate() != null)
            out.writeBytes("closed:" + DateUtil.convertDatetoString(getClosedDate()) + "\n");

    } catch (IOException io) {
        if (getStatus() != Volume.Status.UNMOUNTED)
            logger.error("failed to write volumeinfo. {" + toString() + "} cause:" + io, io);
    } catch (ConfigurationException ce) {
        logger.error("failed to set volume status. {" + toString() + "} cause:" + ce, ce);
    }
}

From source file:org.apache.hadoop.dfs.StorageInfo.java

protected void writeCorruptedData(RandomAccessFile file) throws IOException {
    final String messageForPreUpgradeVersion = "\nThis file is INTENTIONALLY CORRUPTED so that versions\n"
            + "of Hadoop prior to 0.13 (which are incompatible\n"
            + "with this directory layout) will fail to start.\n";

    file.seek(0);
    file.writeInt(FSConstants.LAYOUT_VERSION);
    org.apache.hadoop.io.UTF8.writeString(file, "");
    file.writeBytes(messageForPreUpgradeVersion);
    file.getFD().sync();/* www.  j a v a2 s .  c o  m*/
}