Example usage for java.io RandomAccessFile close

List of usage examples for java.io RandomAccessFile close

Introduction

In this page you can find the example usage for java.io RandomAccessFile close.

Prototype

public void close() throws IOException 

Source Link

Document

Closes this random access file stream and releases any system resources associated with the stream.

Usage

From source file:org.apache.hcatalog.pig.TestHCatLoader.java

@Test
public void testGetInputBytes() throws Exception {
    File file = new File(TEST_WAREHOUSE_DIR + "/" + SPECIFIC_SIZE_TABLE + "/part-m-00000");
    file.deleteOnExit();//w  w  w  . jav a2  s . co  m
    RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw");
    randomAccessFile.setLength(2L * 1024 * 1024 * 1024);
    randomAccessFile.close();
    Job job = new Job();
    HCatLoader hCatLoader = new HCatLoader();
    hCatLoader.setUDFContextSignature("testGetInputBytes");
    hCatLoader.setLocation(SPECIFIC_SIZE_TABLE, job);
    ResourceStatistics statistics = hCatLoader.getStatistics(file.getAbsolutePath(), job);
    assertEquals(2048, (long) statistics.getmBytes());
}

From source file:nl.mpi.handle.util.implementation.HandleUtil.java

/**
 * Gets the website service account private key file from the local file system.
 * @return Returns the private key file as a byte array.
 * @throws java.io.FileNotFoundException Throws <CODE>FileNotFoundException</CODE> if the private key file for the website Handle System service account cannot be found on the local file system.
 * @throws java.io.IOException Throws <CODE>IOException</CODE> if the private key file for the website Handle System service account cannot be accessed.
 *//*from  ww  w  .  j ava2  s . com*/
public byte[] getPrivateKeyFile() throws IOException {
    byte[] fileBytes = null;

    File privKeyFile = new File(handleAdminKeyFilePath);

    if (privKeyFile.exists() == false) {
        throw new IOException("The admin private key file could not be found.");
    }

    if (privKeyFile.canRead() == false) {
        throw new IOException("The admin private key file cannot be read.");
    }

    RandomAccessFile privateKeyContents = new RandomAccessFile(privKeyFile, "r");

    int length = (int) privateKeyContents.length();

    if (length > 0) {
        fileBytes = new byte[length];
        privateKeyContents.read(fileBytes);
        privateKeyContents.close();
    } else {
        throw new IOException("The private key file is empty.");
    }

    return fileBytes;
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestFSEditLogLoader.java

@Test
public void testDisplayRecentEditLogOpCodes() throws IOException {
    // start a cluster 
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;/*  ww  w  .j  av  a2 s  . co  m*/
    FileSystem fileSys = null;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
            .enableManagedDfsDirsRedundancy(false).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    FSImage fsimage = namesystem.getFSImage();
    for (int i = 0; i < 20; i++) {
        fileSys.mkdirs(new Path("/tmp/tmp" + i));
    }
    StorageDirectory sd = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
    cluster.shutdown();

    File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
    assertTrue("Should exist: " + editFile, editFile.exists());

    // Corrupt the edits file.
    long fileLen = editFile.length();
    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
    rwf.seek(fileLen - 40);
    for (int i = 0; i < 20; i++) {
        rwf.write(FSEditLogOpCodes.OP_DELETE.getOpCode());
    }
    rwf.close();

    StringBuilder bld = new StringBuilder();
    bld.append("^Error replaying edit log at offset \\d+.  ");
    bld.append("Expected transaction ID was \\d+\n");
    bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
                .enableManagedDfsDirsRedundancy(false).format(false).build();
        fail("should not be able to start");
    } catch (IOException e) {
        assertTrue("error message contains opcodes message", e.getMessage().matches(bld.toString()));
    }
}

From source file:dk.statsbiblioteket.hadoop.archeaderextractor.ARCHeaderExtractor.java

private static Map<Long, String> parse(File arcFile) throws IOException {
    Map<Long, String> headers = new HashMap<Long, String>();

    /*//w ww  .j av  a  2s .co m
    String lastModifiedString = String.valueOf(arcFile.lastModified());
    headers.put((long) 1, lastModifiedString);
    return headers;
    */

    /*  extract the time stamp from the file name of the ARC file
    The time stamp is the third element of the file name when split on "-".
     */

    String timeStamp = arcFile.getName().split("-")[2];

    RandomAccessFile raf;
    RandomAccessFileInputStream rafin;
    ByteCountingPushBackInputStream pbin;

    ArcReader arcReader = null;
    ArcRecordBase arcRecord;
    UriProfile uriProfile = UriProfile.RFC3986;

    boolean bBlockDigestEnabled = true;
    boolean bPayloadDigestEnabled = true;
    int recordHeaderMaxSize = 8192;
    int payloadHeaderMaxSize = 32768;

    raf = new RandomAccessFile(arcFile, "r");
    rafin = new RandomAccessFileInputStream(raf);
    pbin = new ByteCountingPushBackInputStream(new BufferedInputStream(rafin, 8192), 16);

    if (ArcReaderFactory.isArcFile(pbin)) {
        arcReader = ArcReaderFactory.getReaderUncompressed(pbin);
        arcReader.setUriProfile(uriProfile);
        arcReader.setBlockDigestEnabled(bBlockDigestEnabled);
        arcReader.setPayloadDigestEnabled(bPayloadDigestEnabled);
        arcReader.setRecordHeaderMaxSize(recordHeaderMaxSize);
        arcReader.setPayloadHeaderMaxSize(payloadHeaderMaxSize);

        while ((arcRecord = arcReader.getNextRecord()) != null) {
            headers.put(arcRecord.getStartOffset(), extractHeader(arcRecord, timeStamp));
        }
        arcReader.close();
    } else {
        System.err.println("Input file is not an ARC file");
    }

    if (arcReader != null) {
        arcReader.close();
    }
    pbin.close();
    raf.close();

    return headers;

}

From source file:org.apache.hadoop.hdfs.server.datanode.DataStorage.java

protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
    File oldF = new File(rootDir, "storage");
    if (oldF.exists())
        return;//from   w  w  w.  j ava2 s  . c  o m
    // recreate old storage file to let pre-upgrade versions fail
    if (!oldF.createNewFile())
        throw new IOException("Cannot create file " + oldF);
    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
    // write new version into old storage file
    try {
        writeCorruptedData(oldFile);
    } finally {
        oldFile.close();
    }
}

From source file:org.apache.flume.tools.TestFileChannelIntegrityTool.java

public void doTestFixCorruptEvents(boolean withCheckpoint) throws Exception {
    Set<String> corruptFiles = new HashSet<String>();
    File[] files = dataDir.listFiles(new FilenameFilter() {
        @Override/*from  ww w.ja  v a  2  s.c o m*/
        public boolean accept(File dir, String name) {
            if (name.contains("lock") || name.contains("meta")) {
                return false;
            }
            return true;
        }
    });
    Random random = new Random();
    int corrupted = 0;
    for (File dataFile : files) {
        LogFile.SequentialReader reader = new LogFileV3.SequentialReader(dataFile, null);
        RandomAccessFile handle = new RandomAccessFile(dataFile, "rw");
        long eventPosition1 = reader.getPosition();
        LogRecord rec = reader.next();
        //No point corrupting commits, so ignore them
        if (rec == null || rec.getEvent().getClass().getName().equals("org.apache.flume.channel.file.Commit")) {
            handle.close();
            reader.close();
            continue;
        }
        long eventPosition2 = reader.getPosition();
        rec = reader.next();
        handle.seek(eventPosition1 + 100);
        handle.writeInt(random.nextInt());
        corrupted++;
        corruptFiles.add(dataFile.getName());
        if (rec == null || rec.getEvent().getClass().getName().equals("org.apache.flume.channel.file.Commit")) {
            handle.close();
            reader.close();
            continue;
        }
        handle.seek(eventPosition2 + 100);
        handle.writeInt(random.nextInt());
        corrupted++;
        handle.close();
        reader.close();

    }
    FileChannelIntegrityTool tool = new FileChannelIntegrityTool();
    tool.run(new String[] { "-l", dataDir.toString() });
    FileChannel channel = new FileChannel();
    channel.setName("channel");
    String cp;
    if (withCheckpoint) {
        cp = origCheckpointDir.toString();
    } else {
        FileUtils.deleteDirectory(checkpointDir);
        Assert.assertTrue(checkpointDir.mkdirs());
        cp = checkpointDir.toString();
    }
    ctx.put(FileChannelConfiguration.CHECKPOINT_DIR, cp);
    ctx.put(FileChannelConfiguration.DATA_DIRS, dataDir.toString());
    channel.configure(ctx);
    channel.start();
    Transaction tx = channel.getTransaction();
    tx.begin();
    int i = 0;
    while (channel.take() != null) {
        i++;
    }
    tx.commit();
    tx.close();
    channel.stop();
    Assert.assertEquals(25 - corrupted, i);
    files = dataDir.listFiles(new FilenameFilter() {
        @Override
        public boolean accept(File dir, String name) {
            if (name.contains(".bak")) {
                return true;
            }
            return false;
        }
    });
    Assert.assertEquals(corruptFiles.size(), files.length);
    for (File file : files) {
        String name = file.getName();
        name = name.replaceAll(".bak", "");
        Assert.assertTrue(corruptFiles.remove(name));
    }
    Assert.assertTrue(corruptFiles.isEmpty());
}

From source file:info.ajaxplorer.client.http.AjxpFileBody.java

public void writeTo(OutputStream out) {
    InputStream in;//from   ww  w .j  a  v a2 s .  c o  m
    try {
        if (this.chunkSize > 0) {
            //System.out.println("Uploading file part " + this.chunkIndex);
            RandomAccessFile raf = new RandomAccessFile(getFile(), "r");
            int start = chunkIndex * this.chunkSize;
            int count = 0;
            int limit = chunkSize;
            if (chunkIndex == (totalChunks - 1)) {
                limit = lastChunkSize;
            }
            raf.seek(start);
            while (count < limit) {
                int byt = raf.read();
                out.write(byt);
                count++;
            }
            raf.close();
            //System.out.println("Sent " + count);            
        } else {
            in = new FileInputStream(getFile());
            byte[] buf = new byte[1024];
            int len;
            while ((len = in.read(buf)) > 0) {
                out.write(buf, 0, len);
            }
            in.close();
        }
        this.chunkIndex++;
    } catch (FileNotFoundException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:hoot.services.info.ErrorLog.java

public String getErrorlog(long maxLength) throws Exception {

    File file = new File(_errLogPath);
    RandomAccessFile randomAccessFile = new RandomAccessFile(file, "r");
    int lines = 0;
    StringBuilder builder = new StringBuilder();
    long length = file.length();
    //length--;//  w w w . j  av  a  2 s. c  o  m

    long startOffset = 0;
    if (length > maxLength) {
        startOffset = length - maxLength;
    }
    for (long seek = startOffset; seek < length; seek++) {
        randomAccessFile.seek(seek);
        char c = (char) randomAccessFile.read();
        builder.append(c);
    }

    randomAccessFile.close();

    return builder.toString();
}

From source file:com.btoddb.fastpersitentqueue.MemorySegmentSerializer.java

public boolean searchOffline(MemorySegment seg, FpqEntry target) throws IOException {
    RandomAccessFile raFile = new RandomAccessFile(createPagingFile(seg), "r");
    try {/* www. ja  v  a 2 s  .c  o  m*/
        // jump over header info - we already have it
        raFile.seek(seg.getEntryListOffsetOnDisk());
        for (int i = 0; i < seg.getNumberOfEntries(); i++) {
            FpqEntry entry = new FpqEntry();
            entry.readFromPaging(raFile);
            if (target.equals(entry)) {
                return true;
            }
        }
        return false;
    } finally {
        raFile.close();
    }
}

From source file:io.github.jeremgamer.preview.actions.Download.java

private void download() {

    GeneralSave gs = new GeneralSave();
    try {//from   w w  w  .  j a v a 2s.com
        gs.load(new File("projects/" + Editor.getProjectName() + "/general.rbd"));
    } catch (IOException e1) {
        e1.printStackTrace();
    }
    name = gs.getString("name");
    new Thread(new Runnable() {

        @Override
        public void run() {
            if (url == null) {

            } else {
                File archive = new File(System.getProperty("user.home") + "/AppData/Roaming/.rocketbuilder/"
                        + name + "/data.zip");
                File outputFolder = new File(
                        System.getProperty("user.home") + "/AppData/Roaming/.rocketbuilder/" + name);

                new File(System.getProperty("user.home") + "/AppData/Roaming/.rocketbuilder/" + name).mkdirs();
                URL webFile;
                try {
                    webFile = new URL(url);
                    ReadableByteChannel rbc = Channels.newChannel(webFile.openStream());
                    fos = new FileOutputStream(System.getProperty("user.home")
                            + "/AppData/Roaming/.rocketbuilder/" + name + "/data.zip");
                    HttpURLConnection httpConn = (HttpURLConnection) webFile.openConnection();
                    totalBytes = httpConn.getContentLength();
                    new Thread(new Runnable() {
                        @Override
                        public void run() {
                            try {
                                while (bytesCopied < totalBytes) {
                                    for (CustomProgressBar bar : barList) {
                                        bytesCopied = fos.getChannel().size();
                                        progressValue = (int) (100 * bytesCopied / totalBytes);
                                        bar.setValue(progressValue);
                                        if (bar.isStringPainted()) {
                                            bar.setString(progressValue + "%     " + bytesCopied / 1000 + "/"
                                                    + totalBytes / 1000 + "Kb     tape " + step + "/2");
                                        }
                                    }
                                }
                            } catch (IOException e) {
                                e.printStackTrace();
                            }
                        }

                    }).start();
                    fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
                    fos.close();
                    step = 2;
                    for (CustomProgressBar bar : barList) {
                        if (bar.isStringPainted()) {
                            bar.setString("tape " + step + "/2 : Extraction");
                        }
                    }

                    for (int timeout = 100; timeout > 0; timeout--) {
                        RandomAccessFile ran = null;

                        try {
                            ran = new RandomAccessFile(archive, "rw");
                            break;
                        } catch (Exception ex) {
                        } finally {
                            if (ran != null)
                                try {
                                    ran.close();
                                } catch (IOException ex) {
                                }

                            ran = null;
                        }

                        try {
                            Thread.sleep(100);
                        } catch (InterruptedException ex) {
                        }
                    }

                    ZipFile zipFile = new ZipFile(archive, Charset.forName("Cp437"));
                    Enumeration<? extends ZipEntry> entries = zipFile.entries();
                    while (entries.hasMoreElements()) {
                        ZipEntry entry = entries.nextElement();
                        File entryDestination = new File(outputFolder, entry.getName());
                        entryDestination.getParentFile().mkdirs();
                        if (entry.isDirectory())
                            entryDestination.mkdirs();
                        else {
                            InputStream in = zipFile.getInputStream(entry);
                            OutputStream out = new FileOutputStream(entryDestination);
                            IOUtils.copy(in, out);
                            IOUtils.closeQuietly(in);
                            IOUtils.closeQuietly(out);
                            in.close();
                            out.close();
                        }
                    }

                    for (CustomProgressBar bar : barList) {
                        bar.setString("");
                    }

                    zipFile.close();
                    archive.delete();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        }

    }).start();
}