Example usage for java.io RandomAccessFile writeInt

List of usage examples for java.io RandomAccessFile writeInt

Introduction

In this page you can find the example usage for java.io RandomAccessFile writeInt.

Prototype

public final void writeInt(int v) throws IOException 

Source Link

Document

Writes an int to the file as four bytes, high byte first.

Usage

From source file:org.apache.hadoop.dfs.StorageInfo.java

protected void writeCorruptedData(RandomAccessFile file) throws IOException {
    final String messageForPreUpgradeVersion = "\nThis file is INTENTIONALLY CORRUPTED so that versions\n"
            + "of Hadoop prior to 0.13 (which are incompatible\n"
            + "with this directory layout) will fail to start.\n";

    file.seek(0);/*from  w ww  .ja  v a2s  .  c  o m*/
    file.writeInt(FSConstants.LAYOUT_VERSION);
    org.apache.hadoop.io.UTF8.writeString(file, "");
    file.writeBytes(messageForPreUpgradeVersion);
    file.getFD().sync();
}

From source file:org.apache.jxtadoop.hdfs.server.common.Storage.java

protected void writeCorruptedData(RandomAccessFile file) throws IOException {
    final String messageForPreUpgradeVersion = "\nThis file is INTENTIONALLY CORRUPTED so that versions\n"
            + "of Hadoop prior to 0.13 (which are incompatible\n"
            + "with this directory layout) will fail to start.\n";

    file.seek(0);/*from   w w  w  .ja  va  2s  .  c  om*/
    file.writeInt(FSConstants.LAYOUT_VERSION);
    org.apache.jxtadoop.io.UTF8.writeString(file, "");
    file.writeBytes(messageForPreUpgradeVersion);
    file.getFD().sync();
}

From source file:org.mhisoft.wallet.service.AttachmentService.java

/**
 * abandon the old store and transfer everthing to the wnew store.
 * The same model for both old and new store.
 *
 * @param oldStorefName/*from   w  w w.j  a va  2s. co  m*/
 * @param model
 * @param encryptor
 */
protected void compactAttachmentStore(final String oldStorefName, final WalletModel model,
        final PBEEncryptor encryptor) {
    String newStoreName = oldStorefName + ".tmp";
    File newFile = new File(newStoreName);
    if (newFile.exists()) {
        if (!newFile.delete()) {
            DialogUtils.getInstance().error("Can't delete the tmp file:" + newStoreName);
        }
    }

    FileAccessTable t = new FileAccessTable();
    for (WalletItem item : model.getItemsFlatList()) {
        if (item.getAttachmentEntry() == null)
            continue;

        if (item.getAttachmentEntry().getAccessFlag() == FileAccessFlag.None) {
            //no change , need to transfer to the new file.
            t.addEntry(item.getAttachmentEntry());
        } else if (item.getAttachmentEntry().getAccessFlag() == FileAccessFlag.Merge) {
            t.addEntry(item.getAttachmentEntry());
        } else if (FileAccessFlag.Create == item.getAttachmentEntry().getAccessFlag()
                || FileAccessFlag.Update == item.getAttachmentEntry().getAccessFlag()) {
            if (item.getAttachmentEntry() != null) {
                if (item.getAttachmentEntry().getNewEntry() != null)
                    t.addEntry(item.getNewAttachmentEntry());
                else
                    item.getAttachmentEntry();
            }
        }
    }

    RandomAccessFile attachmentFileStore = null;
    if (t.getSize() > 0) {
        try {
            attachmentFileStore = new RandomAccessFile(newStoreName, "rw");
            attachmentFileStore.seek(0);
            attachmentFileStore.writeInt(t.getSize());

            writeFileEntries(model, true, oldStorefName, 4, attachmentFileStore, t, model.getEncryptorForRead(),
                    encryptor);

            attachmentFileStore.close();
            attachmentFileStore = null;

            //now do the swap of the store to the new one.
            new File(oldStorefName).delete();
            newFile.renameTo(new File(oldStorefName));

        } catch (IOException e) {
            e.printStackTrace();
            DialogUtils.getInstance().error("compactAttachmentStore() failed", e.getMessage());
        } finally {
            if (attachmentFileStore != null)
                try {
                    attachmentFileStore.close();
                } catch (IOException e) {
                    //e.printStackTrace();
                }
        }
    } else {
        //need to handle all images are deleted
        //just remove the old store.
        new File(oldStorefName).delete();
    }

}

From source file:org.apache.flume.channel.file.TestFlumeEventQueue.java

@Test(expected = BadCheckpointException.class)
public void testCorruptInflightPuts() throws Exception {
    RandomAccessFile inflight = null;
    try {/* w  ww. j  av a 2s  .c  o  m*/
        queue = new FlumeEventQueue(backingStore, backingStoreSupplier.getInflightTakes(),
                backingStoreSupplier.getInflightPuts());
        long txnID1 = new Random().nextInt(Integer.MAX_VALUE - 1);
        long txnID2 = txnID1 + 1;
        queue.addWithoutCommit(new FlumeEventPointer(1, 1), txnID1);
        queue.addWithoutCommit(new FlumeEventPointer(2, 1), txnID1);
        queue.addWithoutCommit(new FlumeEventPointer(2, 2), txnID2);
        queue.checkpoint(true);
        TimeUnit.SECONDS.sleep(3L);
        inflight = new RandomAccessFile(backingStoreSupplier.getInflightPuts(), "rw");
        inflight.seek(0);
        inflight.writeInt(new Random().nextInt());
        queue = new FlumeEventQueue(backingStore, backingStoreSupplier.getInflightTakes(),
                backingStoreSupplier.getInflightPuts());
        SetMultimap<Long, Long> deserializedMap = queue.deserializeInflightPuts();
        Assert.assertTrue(deserializedMap.get(txnID1).contains(new FlumeEventPointer(1, 1).toLong()));
        Assert.assertTrue(deserializedMap.get(txnID1).contains(new FlumeEventPointer(2, 1).toLong()));
        Assert.assertTrue(deserializedMap.get(txnID2).contains(new FlumeEventPointer(2, 2).toLong()));
    } finally {
        inflight.close();
    }
}

From source file:org.apache.flume.channel.file.TestFlumeEventQueue.java

@Test(expected = BadCheckpointException.class)
public void testCorruptInflightTakes() throws Exception {
    RandomAccessFile inflight = null;
    try {//from   w w  w  .j  a va 2s.c  o  m
        queue = new FlumeEventQueue(backingStore, backingStoreSupplier.getInflightTakes(),
                backingStoreSupplier.getInflightPuts());
        long txnID1 = new Random().nextInt(Integer.MAX_VALUE - 1);
        long txnID2 = txnID1 + 1;
        queue.addWithoutCommit(new FlumeEventPointer(1, 1), txnID1);
        queue.addWithoutCommit(new FlumeEventPointer(2, 1), txnID1);
        queue.addWithoutCommit(new FlumeEventPointer(2, 2), txnID2);
        queue.checkpoint(true);
        TimeUnit.SECONDS.sleep(3L);
        inflight = new RandomAccessFile(backingStoreSupplier.getInflightTakes(), "rw");
        inflight.seek(0);
        inflight.writeInt(new Random().nextInt());
        queue = new FlumeEventQueue(backingStore, backingStoreSupplier.getInflightTakes(),
                backingStoreSupplier.getInflightPuts());
        SetMultimap<Long, Long> deserializedMap = queue.deserializeInflightTakes();
        Assert.assertTrue(deserializedMap.get(txnID1).contains(new FlumeEventPointer(1, 1).toLong()));
        Assert.assertTrue(deserializedMap.get(txnID1).contains(new FlumeEventPointer(2, 1).toLong()));
        Assert.assertTrue(deserializedMap.get(txnID2).contains(new FlumeEventPointer(2, 2).toLong()));
    } finally {
        inflight.close();
    }
}

From source file:org.mhisoft.wallet.service.AttachmentService.java

/**
 * Append to the existing store for Merged and Created/Updated attachments.
 * @param filename//from  w  ww. ja  v  a 2s  .c  o m
 * @param model
 * @param encryptor
 */
protected void appendAttachmentStore(final String filename, final WalletModel model,
        final PBEEncryptor encryptor) {

    FileAccessTable t = new FileAccessTable();
    for (WalletItem item : model.getItemsFlatList()) {
        if (item.getAttachmentEntry() == null)
            continue;
        if (item.getAttachmentEntry().getAccessFlag() == FileAccessFlag.Merge) {
            t.addEntry(item.getAttachmentEntry());
        } else if (FileAccessFlag.Create == item.getAttachmentEntry().getAccessFlag()
                || FileAccessFlag.Update == item.getAttachmentEntry().getAccessFlag()) {

            if (item.getNewAttachmentEntry() != null && item.getNewAttachmentEntry().getFile() != null) {
                t.addEntry(item.getNewAttachmentEntry());
            } else if (item.getAttachmentEntry().getFile() != null) {
                t.addEntry(item.getAttachmentEntry());
            }
        }
    }

    RandomAccessFile attachmentFileStore = null;
    try {
        attachmentFileStore = new RandomAccessFile(filename, "rw");

        if (t.getSize() > 0) {

            //write the total number of entries first
            int entriesCount = attachmentFileStore.readInt();

            //add to be appended ones
            entriesCount += t.getSize();
            attachmentFileStore.seek(0);
            attachmentFileStore.writeInt(entriesCount);

            //seek to the end
            long itemStartPos = attachmentFileStore.length();
            attachmentFileStore.seek(itemStartPos);

            //append new entries to the end of the store.
            writeFileEntries(model, false, filename, itemStartPos, attachmentFileStore, t,
                    model.getEncryptorForRead(), encryptor);

        }

        /*marked the deleted entries **/
        for (WalletItem item : model.getItemsFlatList()) {
            if (item.getAttachmentEntry() == null)
                continue;
            if (FileAccessFlag.Delete == item.getAttachmentEntry().getAccessFlag() //the attachment is deleted
                    //the entry had the content saved in the store, now it is replaced. the new content is appended to the end of the file.
                    // the file entry at the old position needs to be marked as DELETE.
                    || (FileAccessFlag.Update == item.getAttachmentEntry().getAccessFlag()
                            && item.getAttachmentEntry().getEncSize() > 0)
                            && item.getAttachmentEntry().getPosition() > 0) {

                attachmentFileStore.seek(item.getAttachmentEntry().getPosition() + 40);
                attachmentFileStore.writeInt(FileAccessFlag.Delete.ordinal());
            }
        }

        attachmentFileStore.close();
        attachmentFileStore = null;

    } catch (IOException e) {
        e.printStackTrace();
        DialogUtils.getInstance().error("Error writing attachment entries.", e.getMessage());
    } finally {
        if (attachmentFileStore != null)
            try {
                attachmentFileStore.close();
            } catch (IOException e) {
                //e.printStackTrace();
            }
    }

}

From source file:com.example.android.vault.EncryptedDocument.java

/**
 * Encrypt and write both the metadata and content sections of this
 * document, reading the content from the given pipe. Internally uses
 * {@link ParcelFileDescriptor#checkError()} to verify that content arrives
 * without errors. Writes to temporary file to keep atomic view of contents,
 * swapping into place only when write is successful.
 * <p/>//from   w  ww. jav  a  2 s. c  o m
 * Pipe is left open, so caller is responsible for calling
 * {@link ParcelFileDescriptor#close()} or
 * {@link ParcelFileDescriptor#closeWithError(String)}.
 *
 * @param contentIn read end of a pipe.
 */
public void writeMetadataAndContent(JSONObject meta, ParcelFileDescriptor contentIn)
        throws IOException, GeneralSecurityException {
    // Write into temporary file to provide an atomic view of existing
    // contents during write, and also to recover from failed writes.
    final String tempName = mFile.getName() + ".tmp_" + Thread.currentThread().getId();
    final File tempFile = new File(mFile.getParentFile(), tempName);

    RandomAccessFile f = new RandomAccessFile(tempFile, "rw");
    try {
        // Truncate any existing data
        f.setLength(0);

        // Write content first to detect size
        if (contentIn != null) {
            f.seek(CONTENT_OFFSET);
            final int plainLength = writeSection(f, new FileInputStream(contentIn.getFileDescriptor()));
            meta.put(Document.COLUMN_SIZE, plainLength);

            // Verify that remote side of pipe finished okay; if they
            // crashed or indicated an error then this throws and we
            // leave the original file intact and clean up temp below.
            contentIn.checkError();
        }

        meta.put(Document.COLUMN_DOCUMENT_ID, mDocId);
        meta.put(Document.COLUMN_LAST_MODIFIED, System.currentTimeMillis());

        // Rewind and write metadata section
        f.seek(0);
        f.writeInt(MAGIC_NUMBER);

        final ByteArrayInputStream metaIn = new ByteArrayInputStream(
                meta.toString().getBytes(StandardCharsets.UTF_8));
        writeSection(f, metaIn);

        if (f.getFilePointer() > CONTENT_OFFSET) {
            throw new IOException("Metadata section was too large");
        }

        // Everything written fine, atomically swap new data into place.
        // fsync() before close would be overkill, since rename() is an
        // atomic barrier.
        f.close();
        tempFile.renameTo(mFile);

    } catch (JSONException e) {
        throw new IOException(e);
    } finally {
        // Regardless of what happens, always try cleaning up.
        f.close();
        tempFile.delete();
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestEditLog.java

/** 
 * Test edit log failover from a corrupt edit log
 *//*from  w  w  w  .  jav  a2 s  .  c o  m*/
@Test
public void testEditLogFailOverFromCorrupt() throws IOException {
    File f1 = new File(TEST_DIR + "/failover0");
    File f2 = new File(TEST_DIR + "/failover1");
    List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());

    NNStorage storage = setupEdits(editUris, 3);

    final long startErrorTxId = 1 * TXNS_PER_ROLL + 1;
    final long endErrorTxId = 2 * TXNS_PER_ROLL;

    File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
        public boolean accept(File dir, String name) {
            if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId, endErrorTxId))) {
                return true;
            }
            return false;
        }
    });
    assertEquals(1, files.length);

    long fileLen = files[0].length();
    LOG.debug("Corrupting Log File: " + files[0] + " len: " + fileLen);
    RandomAccessFile rwf = new RandomAccessFile(files[0], "rw");
    rwf.seek(fileLen - 4); // seek to checksum bytes
    int b = rwf.readInt();
    rwf.seek(fileLen - 4);
    rwf.writeInt(b + 1);
    rwf.close();

    FSEditLog editlog = getFSEditLog(storage);
    editlog.initJournalsForWrite();
    long startTxId = 1;
    Collection<EditLogInputStream> streams = null;
    try {
        streams = editlog.selectInputStreams(startTxId, 4 * TXNS_PER_ROLL);
        readAllEdits(streams, startTxId);
    } catch (IOException e) {
        LOG.error("edit log failover didn't work", e);
        fail("Edit log failover didn't work");
    } finally {
        IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0]));
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestEditLog.java

@Test
public void testEditChecksum() throws Exception {
    // start a cluster 
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;//from  www  . j  a va  2s.  com
    FileSystem fileSys = null;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    FSImage fsimage = namesystem.getFSImage();
    final FSEditLog editLog = fsimage.getEditLog();
    fileSys.mkdirs(new Path("/tmp"));

    Iterator<StorageDirectory> iter = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS);
    LinkedList<StorageDirectory> sds = new LinkedList<StorageDirectory>();
    while (iter.hasNext()) {
        sds.add(iter.next());
    }
    editLog.close();
    cluster.shutdown();

    for (StorageDirectory sd : sds) {
        File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
        assertTrue(editFile.exists());

        long fileLen = editFile.length();
        LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen);
        RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
        rwf.seek(fileLen - 4); // seek to checksum bytes
        int b = rwf.readInt();
        rwf.seek(fileLen - 4);
        rwf.writeInt(b + 1);
        rwf.close();
    }

    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
        fail("should not be able to start");
    } catch (IOException e) {
        // expected
        assertNotNull("Cause of exception should be ChecksumException", e.getCause());
        assertEquals("Cause of exception should be ChecksumException", ChecksumException.class,
                e.getCause().getClass());
    }
}

From source file:org.commoncrawl.service.listcrawler.CrawlList.java

void writeInitialSubDomainMetadataToDisk() throws IOException {

    RandomAccessFile file = new RandomAccessFile(_subDomainMetadataFile, "rw");

    try {//  ww  w .  ja va2s.c  o m

        file.writeByte(0); // version
        file.writeInt(_transientSubDomainStats.size());

        ArrayList<CrawlListMetadata> sortedMetadata = new ArrayList<CrawlListMetadata>();
        sortedMetadata.addAll(_transientSubDomainStats.values());
        _transientSubDomainStats = null;
        CrawlListMetadata metadataArray[] = sortedMetadata.toArray(new CrawlListMetadata[0]);
        Arrays.sort(metadataArray, new Comparator<CrawlListMetadata>() {

            @Override
            public int compare(CrawlListMetadata o1, CrawlListMetadata o2) {
                int result = ((Integer) o2.getUrlCount()).compareTo(o1.getUrlCount());
                if (result == 0) {
                    result = o1.getDomainName().compareTo(o2.getDomainName());
                }
                return result;
            }
        });

        DataOutputBuffer outputBuffer = new DataOutputBuffer(CrawlListMetadata.Constants.FixedDataSize);

        TreeMap<Long, Integer> idToOffsetMap = new TreeMap<Long, Integer>();

        for (CrawlListMetadata entry : metadataArray) {
            // reset output buffer 
            outputBuffer.reset();
            // write item to disk 
            entry.serialize(outputBuffer, new BinaryProtocol());

            if (outputBuffer.getLength() > CrawlListMetadata.Constants.FixedDataSize) {
                LOG.fatal("Metadata Serialization for List:" + getListId() + " SubDomain:"
                        + entry.getDomainName());
                System.out.println("Metadata Serialization for List:" + getListId() + " SubDomain:"
                        + entry.getDomainName());
            }
            // save offset 
            idToOffsetMap.put(entry.getDomainHash(), (int) file.getFilePointer());
            // write out fixed data size 
            file.write(outputBuffer.getData(), 0, CrawlListMetadata.Constants.FixedDataSize);
        }

        // write lookup table 
        _offsetLookupTable = new DataOutputBuffer(idToOffsetMap.size() * OFFSET_TABLE_ENTRY_SIZE);

        for (Map.Entry<Long, Integer> entry : idToOffsetMap.entrySet()) {
            _offsetLookupTable.writeLong(entry.getKey());
            _offsetLookupTable.writeInt(entry.getValue());
        }
    } finally {
        file.close();
    }
    _transientSubDomainStats = null;
}