Example usage for java.io RandomAccessFile getChannel

List of usage examples for java.io RandomAccessFile getChannel

Introduction

In this page you can find the example usage for java.io RandomAccessFile getChannel.

Prototype

public final FileChannel getChannel() 

Source Link

Document

Returns the unique java.nio.channels.FileChannel FileChannel object associated with this file.

Usage

From source file:org.pentaho.di.trans.steps.excelinput.StaxWorkBookTest.java

@Test
public void testRead() throws Exception {
    FileLock lock = null;/*  w w  w . j  av a  2s  .c o m*/
    RandomAccessFile randomAccessFile = null;
    try {
        readData();
        File fileAfterRead = new File("testfiles/sample-file.xlsx");
        randomAccessFile = new RandomAccessFile(fileAfterRead, "rw");
        FileChannel fileChannel = randomAccessFile.getChannel();
        lock = fileChannel.tryLock();
        // check that we could lock file
        assertTrue(lock.isValid());
    } finally {
        if (lock != null) {
            lock.release();
        }
        if (randomAccessFile != null) {
            randomAccessFile.close();
        }
    }
}

From source file:org.pentaho.di.trans.steps.excelinput.StaxWorkBookIT.java

@Test
public void testRead() throws Exception {
    FileLock lock = null;/* w w  w .  j a  va2s.com*/
    RandomAccessFile randomAccessFile = null;
    try {
        readData(sample);
        File fileAfterRead = new File(sample);
        randomAccessFile = new RandomAccessFile(fileAfterRead, "rw");
        FileChannel fileChannel = randomAccessFile.getChannel();
        lock = fileChannel.tryLock();
        // check that we could lock file
        assertTrue(lock.isValid());
    } finally {
        if (lock != null) {
            lock.release();
        }
        if (randomAccessFile != null) {
            randomAccessFile.close();
        }
    }
}

From source file:MyFormApp.java

void pdfToimage(File filename) throws FileNotFoundException, IOException { //?pdf ?

    // TODO Auto-generated method stub
    File pdfFile = new File(filename.toString()); // pdf
    RandomAccessFile raf = new RandomAccessFile(pdfFile, "r");
    FileChannel channel = raf.getChannel();
    ByteBuffer buf = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size());
    PDFFile pdf = new PDFFile(buf);

    int i = 0;/*  w w w .  j  a  v  a2 s  .com*/
    String fileNameWithOutExt = FilenameUtils.removeExtension(filename.getName());

    Rectangle rect = new Rectangle(0, 0, (int) pdf.getPage(i).getBBox().getWidth(), //
            (int) pdf.getPage(i).getBBox().getHeight());
    BufferedImage bufferedImage = new BufferedImage(100, 100, BufferedImage.TYPE_INT_RGB);

    Image image = pdf.getPage(i).getImage(rect.width, rect.height, // width & height
            rect, // clip rect
            null, // null for the ImageObserver
            true, // fill background with white
            true // block until drawing is done
    );
    Graphics2D bufImageGraphics = bufferedImage.createGraphics();
    bufImageGraphics.drawImage(image.getScaledInstance(100, 100, Image.SCALE_AREA_AVERAGING), 0, 0, null);

    ImageIO.write(bufferedImage, "PNG", new File(PATH + fileNameWithOutExt + ".png")); //? 
}

From source file:org.geoserver.restupload.ResumableUploadResourceManager.java

public Long handleUpload(String uploadId, Representation entity, Long startPosition) {
    ResumableUploadResource resource = getResource(uploadId);
    Long writtenBytes = 0L;/*from  w  ww .  j ava  2  s.  com*/
    try {
        final ReadableByteChannel source = entity.getChannel();
        RandomAccessFile raf = null;
        FileChannel outputChannel = null;
        try {
            raf = new RandomAccessFile(resource.getFile(), "rw");
            outputChannel = raf.getChannel();
            writtenBytes = IOUtils.copyToFileChannel(256 * 1024, source, outputChannel, startPosition);
        } finally {
            try {
                if (raf != null) {
                    raf.close();
                }
            } finally {
                IOUtils.closeQuietly(source);
                IOUtils.closeQuietly(outputChannel);
            }
        }
    } catch (IOException e) {
        LOGGER.log(Level.SEVERE, e.getMessage(), e);
    } finally {

    }

    return resource.getFile().length();
}

From source file:org.apache.hadoop.dfs.TestFsck.java

public void testCorruptBlock() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong("dfs.blockreport.intervalMsec", 1000);
    FileSystem fs = null;//from   w  w  w. ja  v  a 2s. c om
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;

    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    fs = cluster.getFileSystem();
    Path file1 = new Path("/testCorruptBlock");
    DFSTestUtil.createFile(fs, file1, 1024, (short) 3, 0);
    // Wait until file replication has completed
    DFSTestUtil.waitReplication(fs, file1, (short) 3);
    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();

    // Make sure filesystem is in healthy state
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains("HEALTHY"));

    // corrupt replicas 
    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
    for (int i = 0; i < 6; i++) {
        File blockFile = new File(baseDir, "data" + (i + 1) + "/current/" + block);
        if (blockFile.exists()) {
            RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
            FileChannel channel = raFile.getChannel();
            String badString = "BADBAD";
            int rand = random.nextInt((int) channel.size() / 2);
            raFile.seek(rand);
            raFile.write(badString.getBytes());
            raFile.close();
        }
    }
    // Read the file to trigger reportBadBlocks
    try {
        IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true);
    } catch (IOException ie) {
        // Ignore exception
    }

    dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    blocks = dfsClient.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
    while (replicaCount != 3) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException ignore) {
        }
        blocks = dfsClient.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        replicaCount = blocks.get(0).getLocations().length;
    }
    assertTrue(blocks.get(0).isCorrupt());

    // Check if fsck reports the same
    outStr = runFsck(conf, 1, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains("CORRUPT"));
    assertTrue(outStr.contains("testCorruptBlock"));

    cluster.shutdown();
}

From source file:org.apache.hadoop.hdfs.TestCrcCorruption.java

/** 
  * check if DFS can handle corrupted CRC blocks
  *//*from ww  w  .ja  va2s  . c o  m*/
private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
    MiniDFSCluster cluster = null;
    int numDataNodes = 2;
    short replFactor = 2;
    Random random = new Random();

    try {
        cluster = new MiniDFSCluster.Builder(conf).numNameNodes(1).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        util.createFiles(fs, "/srcdat", replFactor);
        util.waitReplication(fs, "/srcdat", (short) 2);

        // Now deliberately remove/truncate meta blocks from the first
        // directory of the first datanode. The complete absense of a meta
        // file disallows this Datanode to send data to another datanode.
        // However, a client is alowed access to this block.
        //
        File storageDir = MiniDFSCluster.getStorageDir(0, 1);
        String bpid = cluster.getNamesystem().getBlockPoolId();
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        assertTrue("data directory does not exist", data_dir.exists());
        File[] blocks = data_dir.listFiles();
        assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
        int num = 0;
        for (int idx = 0; idx < blocks.length; idx++) {
            if (blocks[idx].getName().startsWith("blk_") && blocks[idx].getName().endsWith(".meta")) {
                num++;
                if (num % 3 == 0) {
                    //
                    // remove .meta file
                    //
                    LOG.info("Deliberately removing file " + blocks[idx].getName());
                    assertTrue("Cannot remove file.", blocks[idx].delete());
                } else if (num % 3 == 1) {
                    //
                    // shorten .meta file
                    //
                    RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
                    FileChannel channel = file.getChannel();
                    int newsize = random.nextInt((int) channel.size() / 2);
                    LOG.info("Deliberately truncating file " + blocks[idx].getName() + " to size " + newsize
                            + " bytes.");
                    channel.truncate(newsize);
                    file.close();
                } else {
                    //
                    // corrupt a few bytes of the metafile
                    //
                    RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
                    FileChannel channel = file.getChannel();
                    long position = 0;
                    //
                    // The very first time, corrupt the meta header at offset 0
                    //
                    if (num != 2) {
                        position = (long) random.nextInt((int) channel.size());
                    }
                    int length = random.nextInt((int) (channel.size() - position + 1));
                    byte[] buffer = new byte[length];
                    random.nextBytes(buffer);
                    channel.write(ByteBuffer.wrap(buffer), position);
                    LOG.info("Deliberately corrupting file " + blocks[idx].getName() + " at offset " + position
                            + " length " + length);
                    file.close();
                }
            }
        }

        //
        // Now deliberately corrupt all meta blocks from the second
        // directory of the first datanode
        //
        storageDir = MiniDFSCluster.getStorageDir(0, 1);
        data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        assertTrue("data directory does not exist", data_dir.exists());
        blocks = data_dir.listFiles();
        assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));

        int count = 0;
        File previous = null;
        for (int idx = 0; idx < blocks.length; idx++) {
            if (blocks[idx].getName().startsWith("blk_") && blocks[idx].getName().endsWith(".meta")) {
                //
                // Move the previous metafile into the current one.
                //
                count++;
                if (count % 2 == 0) {
                    LOG.info("Deliberately insertimg bad crc into files " + blocks[idx].getName() + " "
                            + previous.getName());
                    assertTrue("Cannot remove file.", blocks[idx].delete());
                    assertTrue("Cannot corrupt meta file.", previous.renameTo(blocks[idx]));
                    assertTrue("Cannot recreate empty meta file.", previous.createNewFile());
                    previous = null;
                } else {
                    previous = blocks[idx];
                }
            }
        }

        //
        // Only one replica is possibly corrupted. The other replica should still
        // be good. Verify.
        //
        assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat"));
        LOG.info("All File still have a valid replica");

        //
        // set replication factor back to 1. This causes only one replica of
        // of each block to remain in HDFS. The check is to make sure that 
        // the corrupted replica generated above is the one that gets deleted.
        // This test is currently disabled until HADOOP-1557 is solved.
        //
        util.setReplication(fs, "/srcdat", (short) 1);
        //util.waitReplication(fs, "/srcdat", (short)1);
        //LOG.info("All Files done with removing replicas");
        //assertTrue("Excess replicas deleted. Corrupted replicas found.",
        //           util.checkFiles(fs, "/srcdat"));
        LOG.info("The excess-corrupted-replica test is disabled " + " pending HADOOP-1557");

        util.cleanup(fs, "/srcdat");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:com.example.psumaps.MapView.java

public static Bitmap convertToMutable(Bitmap imgIn) {
    try {//from  w ww  .  j av a  2  s  .  com
        // this is the file going to use temporally to save the bytes.
        // This file will not be a image, it will store the raw image data.
        File file = new File(Environment.getExternalStorageDirectory() + File.separator + "temp.tmp");

        // Open an RandomAccessFile
        // Make sure you have added uses-permission
        // android:name="android.permission.WRITE_EXTERNAL_STORAGE"
        // into AndroidManifest.xml file
        RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw");

        // get the width and height of the source bitmap.
        int width = imgIn.getWidth();
        int height = imgIn.getHeight();
        Bitmap.Config type = imgIn.getConfig();

        // Copy the byte to the file
        // Assume source bitmap loaded using options.inPreferredConfig =
        // Config.ARGB_8888;
        FileChannel channel = randomAccessFile.getChannel();
        MappedByteBuffer map = channel.map(FileChannel.MapMode.READ_WRITE, 0, imgIn.getRowBytes() * height);
        imgIn.copyPixelsToBuffer(map);
        // recycle the source bitmap, this will be no longer used.
        imgIn.recycle();
        System.gc();// try to force the bytes from the imgIn to be released

        // Create a new bitmap to load the bitmap again. Probably the memory
        // will be available.
        imgIn = Bitmap.createBitmap(width, height, type);
        map.position(0);
        // load it back from temporary
        imgIn.copyPixelsFromBuffer(map);
        // close the temporary file and channel , then delete that also
        channel.close();
        randomAccessFile.close();

        // delete the temp file
        file.delete();

    } catch (FileNotFoundException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    }

    return imgIn;
}

From source file:org.apache.tajo.storage.http.ExampleHttpServerHandler.java

private void processGet(ChannelHandlerContext context, FullHttpRequest request) {
    try {// w  w  w  .  ja va  2 s . co  m
        File file = getRequestedFile(request.getUri());

        RandomAccessFile raf = new RandomAccessFile(file, "r");
        long fileLength = raf.length();

        HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
        HttpHeaders.setContentLength(response, fileLength);
        setContentTypeHeader(response, file);

        context.write(response);

        context.write(new DefaultFileRegion(raf.getChannel(), 0, fileLength));

        // Write the end marker.
        ChannelFuture future = context.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
        future.addListener(ChannelFutureListener.CLOSE);

    } catch (IOException | URISyntaxException e) {
        context.writeAndFlush(getBadRequest(e.getMessage()));
    }
}

From source file:eu.stratosphere.nephele.services.iomanager.IOManagerPerformanceBenchmark.java

@SuppressWarnings("resource")
private final void speedTestNIO(int bufferSize, boolean direct) throws IOException {
    final Channel.ID tmpChannel = ioManager.createChannel();

    File tempFile = null;/*  w ww. jav  a  2  s.co  m*/
    FileChannel fs = null;

    try {
        tempFile = new File(tmpChannel.getPath());

        RandomAccessFile raf = new RandomAccessFile(tempFile, "rw");
        fs = raf.getChannel();

        ByteBuffer buf = direct ? ByteBuffer.allocateDirect(bufferSize) : ByteBuffer.allocate(bufferSize);

        long writeStart = System.currentTimeMillis();

        int valsLeft = NUM_INTS_WRITTEN;
        while (valsLeft-- > 0) {
            if (buf.remaining() < 4) {
                buf.flip();
                fs.write(buf);
                buf.clear();
            }
            buf.putInt(valsLeft);
        }

        if (buf.position() > 0) {
            buf.flip();
            fs.write(buf);
        }

        fs.close();
        raf.close();
        fs = null;

        long writeElapsed = System.currentTimeMillis() - writeStart;

        // ----------------------------------------------------------------

        raf = new RandomAccessFile(tempFile, "r");
        fs = raf.getChannel();
        buf.clear();

        long readStart = System.currentTimeMillis();

        fs.read(buf);
        buf.flip();

        valsLeft = NUM_INTS_WRITTEN;
        while (valsLeft-- > 0) {
            if (buf.remaining() < 4) {
                buf.compact();
                fs.read(buf);
                buf.flip();
            }
            if (buf.getInt() != valsLeft) {
                throw new IOException();
            }
        }

        fs.close();
        raf.close();

        long readElapsed = System.currentTimeMillis() - readStart;

        LOG.info("NIO Channel with buffer " + bufferSize + ": write " + writeElapsed + " msecs, read "
                + readElapsed + " msecs.");
    } finally {
        // close if possible
        if (fs != null) {
            fs.close();
            fs = null;
        }
        // try to delete the file
        if (tempFile != null) {
            tempFile.delete();
        }
    }
}

From source file:org.apache.hadoop.mapred.TestFadvisedFileRegion.java

@Test(timeout = 100000)
public void testCustomShuffleTransfer() throws IOException {
    File absLogDir = new File("target", TestFadvisedFileRegion.class.getSimpleName() + "LocDir")
            .getAbsoluteFile();//from w  w w. ja v  a2s  .  c  om

    String testDirPath = StringUtils.join(Path.SEPARATOR,
            new String[] { absLogDir.getAbsolutePath(), "testCustomShuffleTransfer" });
    File testDir = new File(testDirPath);
    testDir.mkdirs();

    System.out.println(testDir.getAbsolutePath());

    File inFile = new File(testDir, "fileIn.out");
    File outFile = new File(testDir, "fileOut.out");

    //Initialize input file
    byte[] initBuff = new byte[FILE_SIZE];
    Random rand = new Random();
    rand.nextBytes(initBuff);

    FileOutputStream out = new FileOutputStream(inFile);
    try {
        out.write(initBuff);
    } finally {
        IOUtils.cleanup(LOG, out);
    }

    //define position and count to read from a file region.
    int position = 2 * 1024 * 1024;
    int count = 4 * 1024 * 1024 - 1;

    RandomAccessFile inputFile = null;
    RandomAccessFile targetFile = null;
    WritableByteChannel target = null;
    FadvisedFileRegion fileRegion = null;

    try {
        inputFile = new RandomAccessFile(inFile.getAbsolutePath(), "r");
        targetFile = new RandomAccessFile(outFile.getAbsolutePath(), "rw");
        target = targetFile.getChannel();

        Assert.assertEquals(FILE_SIZE, inputFile.length());

        //create FadvisedFileRegion
        fileRegion = new FadvisedFileRegion(inputFile, position, count, false, 0, null, null, 1024, false);

        //test corner cases
        customShuffleTransferCornerCases(fileRegion, target, count);

        long pos = 0;
        long size;
        while ((size = fileRegion.customShuffleTransfer(target, pos)) > 0) {
            pos += size;
        }

        //assert size
        Assert.assertEquals(count, (int) pos);
        Assert.assertEquals(count, targetFile.length());
    } finally {
        if (fileRegion != null) {
            fileRegion.releaseExternalResources();
        }
        IOUtils.cleanup(LOG, target);
        IOUtils.cleanup(LOG, targetFile);
        IOUtils.cleanup(LOG, inputFile);
    }

    //Read the target file and verify that copy is done correctly
    byte[] buff = new byte[FILE_SIZE];
    FileInputStream in = new FileInputStream(outFile);
    try {
        int total = in.read(buff, 0, count);

        Assert.assertEquals(count, total);

        for (int i = 0; i < count; i++) {
            Assert.assertEquals(initBuff[position + i], buff[i]);
        }
    } finally {
        IOUtils.cleanup(LOG, in);
    }

    //delete files and folders
    inFile.delete();
    outFile.delete();
    testDir.delete();
    absLogDir.delete();
}