Example usage for java.io RandomAccessFile write

List of usage examples for java.io RandomAccessFile write

Introduction

In this page you can find the example usage for java.io RandomAccessFile write.

Prototype

public void write(byte b[], int off, int len) throws IOException 

Source Link

Document

Writes len bytes from the specified byte array starting at offset off to this file.

Usage

From source file:phex.util.FileUtils.java

/**
 * Appends the fileToAppend on the destination file. The file that is appended
 * will be removed afterwards./*from  w ww  .java2 s .  co m*/
 *
 * @throws IOException in case an IO operation fails
 */
public static void appendFile(File destination, File fileToAppend) throws IOException {
    long destFileLength = destination.length();
    long appendFileLength = fileToAppend.length();
    // open files
    FileInputStream inStream = new FileInputStream(fileToAppend);
    try {
        RandomAccessFile destFile = new RandomAccessFile(destination, "rwd");
        try {
            // extend file length... this causes dramatical performance boost since
            // contents is streamed into already freed space.
            destFile.setLength(destFileLength + appendFileLength);
            destFile.seek(destFileLength);
            byte[] buffer = new byte[(int) Math.min(BUFFER_LENGTH, appendFileLength)];
            int length;
            while (-1 != (length = inStream.read(buffer))) {
                long start2 = System.currentTimeMillis();
                destFile.write(buffer, 0, length);
                long end2 = System.currentTimeMillis();
                try {
                    Thread.sleep((end2 - start2) * 2);
                } catch (InterruptedException exp) {
                    // reset interrupted flag
                    Thread.currentThread().interrupt();
                    return;
                }
            }
        } finally {
            destFile.close();
            IOUtil.closeQuietly(destFile);
        }
    } finally {
        IOUtil.closeQuietly(inStream);
    }

    FileUtils.deleteFileMultiFallback(fileToAppend);
}

From source file:org.jcodec.common.io.Buffer.java

public void writeTo(RandomAccessFile file) throws IOException {
    file.write(buffer, pos, limit - pos);
}

From source file:ape.CorruptFileCommand.java

/**
 * This method is the actual method used to corrupt data/file
 *///www . ja  v  a2 s .  c  o m
public boolean corrupt(String corruptAddress) throws IOException {
    FileInputStream fin;
    byte[] buf;
    int count;

    try {

        RandomAccessFile tmp = new RandomAccessFile(corruptAddress, "rw");
        tmp.seek(offset);

        if (size <= 0) {
            System.out.println("ERROR: The size parameter must be positive");
            Main.logger.info("ERROR: The size parameter must be positive");
            return false;
        }

        buf = new byte[size];

        count = 0;
        if ((count = tmp.read(buf, 0, size)) == -1) {
            System.out.println("The file chosen is smaller than the corruption size (" + size + " bytes)");
            Main.logger.info("The file chosen is smaller than the corruption size (" + size + " bytes)");
            return false;
        }

        for (int i = 0; i < count; i++) {
            buf[i] = 0x3;
        }

        tmp.seek(0);
        tmp.close();
    } catch (FileNotFoundException e1) {
        System.out.println("Cannot open the file on the path given");
        Main.logger.info("Cannot open the file on the path given");
        e1.printStackTrace();
        Main.logger.info(e1);
        return false;
    } catch (IOException e) {
        e.printStackTrace();
        return false;
    }

    RandomAccessFile raf;
    try {
        raf = new RandomAccessFile(corruptAddress, "rw");
        try {
            raf.seek(offset);

            raf.write(buf, 0, count);
            raf.seek(0);
            raf.close();
        } catch (IOException e) {
            System.out.println("Corrupting file failed");
            Main.logger.info("Corrupting file failed");
            e.printStackTrace();
            Main.logger.info(e);
            return false;
        }

        return true;
    } catch (FileNotFoundException e1) {
        System.out.println("Cannot open the file on the path: " + corruptAddress);
        Main.logger.info("Cannot open the file on the path: " + corruptAddress);
        e1.printStackTrace();
        Main.logger.info(e1);
        return false;
    }
}

From source file:com.v2soft.misto.Providers.MapnikProvider.java

@Override
public synchronized boolean prepareTileImage(TileInfo info) {
    try {/*from   www. ja v  a2  s . c  o m*/
        String local_name = String.format("%d_%d_%d.png", info.getZoom(), info.getLongitude(),
                info.getLatitude());
        if (!mLocalCache.isFileInCache(local_name)) {
            String query = String.format("http://%s/%d/%d/%d%s", BASE_HOST, info.getZoom(), info.getLongitude(),
                    info.getLatitude(), IMAGE_EXT);
            Log.d("Uploading tile ", query);

            HttpParams httpParameters = new BasicHttpParams();
            // Set the timeout in milliseconds until a connection is established.
            int timeoutConnection = 3000;
            HttpConnectionParams.setConnectionTimeout(httpParameters, timeoutConnection);
            // Set the default socket timeout (SO_TIMEOUT) 
            // in milliseconds which is the timeout for waiting for data.
            int timeoutSocket = 5000;
            HttpConnectionParams.setSoTimeout(httpParameters, timeoutSocket);

            DefaultHttpClient client = new DefaultHttpClient(httpParameters);

            HttpGet request = new HttpGet(query);

            HttpResponse response = client.execute(request);
            HttpEntity entity = response.getEntity();
            int code = response.getStatusLine().getStatusCode();
            if (code == 200) {
                Calendar cal = Calendar.getInstance();
                cal.add(Calendar.MONTH, 1);
                RandomAccessFile out = mLocalCache.addFile(local_name, cal.getTime());
                InputStream in = entity.getContent();
                byte[] buffer = new byte[4096];
                int readed = 0;
                while ((readed = in.read(buffer)) > 0) {
                    out.write(buffer, 0, readed);
                }
                out.close();
                in.close();
            }
        }
        InputStream in = mLocalCache.getFileInputStream(local_name);
        final Bitmap bitmap = BitmapFactory.decodeStream(in);
        BitmapManager.registerBitmap(bitmap, info.toString());
        info.setBitmap(bitmap);
        in.close();
        return true;
    } catch (Exception e) {
        Log.d("MapnikProvider::prepareTileImage", e.toString());
    }
    return false;
}

From source file:ape.CorruptCommand.java

/**
 * This method is the implementation of the corrupt function.
 * Given an address, it corrupts the file in the given address
 *//*from   w  w w .  j  av a  2s. co  m*/
public boolean corrupt(String corruptAddress) throws IOException {
    // Trying to get a random HDFS block file
    if (Main.VERBOSE) {
        System.out.println("Trying to get a random HDFS block file");
    }
    if (corruptAddress == null) {
        corruptAddress = getCorruptAddress();
    }

    // If the above statement failed to set corruptAddress then there was a failure
    if (corruptAddress == null) {
        System.out.println("Could not get a random HDFS block file");
        Main.logger.info("Could not get a random HDFS block file");
        return false;
    }

    byte[] buf;
    int count;

    try {
        RandomAccessFile tmp = new RandomAccessFile(corruptAddress, "rw");
        tmp.seek(offset);
        if (size <= 0) {
            System.out.println("ERROR: The size parameter must be positive");
            Main.logger.info("ERROR: The size parameter must be positive");
            return false;
        }

        buf = new byte[size];

        count = 0;
        if ((count = tmp.read(buf, 0, size)) == -1) {
            System.out.println("The file chosen is smaller than the corruption size (" + size + " bytes)");
            Main.logger.info("The file chosen is smaller than the corruption size (" + size + " bytes)");
            return false;
        }

        for (int i = 0; i < count; i++) {
            buf[i] = 0x3;
        }

        tmp.seek(0);
        tmp.close();
    } catch (FileNotFoundException e1) {
        System.out.println("Cannot open the file on the path given");
        Main.logger.info("Cannot open the file on the path given");
        e1.printStackTrace();
        Main.logger.info(e1);
        return false;
    } catch (IOException e) {
        System.out.println("Corrupting file failed");
        Main.logger.info("Corrupting file failed");
        e.printStackTrace();
        Main.logger.info(e);
        return false;
    }

    RandomAccessFile raf;
    try {
        raf = new RandomAccessFile(corruptAddress, "rw");
        raf.seek(offset);
        raf.write(buf, 0, count);
        raf.seek(0);
        raf.close();

        return true;
    } catch (FileNotFoundException e1) {
        System.out.println("Cannot open the file on the path: " + corruptAddress);
        Main.logger.info("Cannot open the file on the path: " + corruptAddress);
        e1.printStackTrace();
        Main.logger.info(e1);
        return false;
    } catch (IOException e) {
        System.out.println("Corrupting file failed");
        Main.logger.info("Corrupting file failed");
        e.printStackTrace();
        Main.logger.info(e);
        return false;
    }
}

From source file:org.kawanfw.file.servlet.util.FileTransferManager.java

/**
 * Copy the input stream into the raf/*from  ww  w.  j  a va2  s .co  m*/
 * 
 * @param input
 * @param output
 * @param buffer
 * @return the lrngth written into the reaf
 * @throws IOException
 */
private long copy(InputStream input, RandomAccessFile output, byte[] buffer) throws IOException {
    long count = 0;
    int n = 0;
    while (EOF != (n = input.read(buffer))) {
        output.write(buffer, 0, n);
        count += n;
    }
    return count;
}

From source file:org.apache.hadoop.dfs.TestReplication.java

public void testPendingReplicationRetry() throws IOException {

    MiniDFSCluster cluster = null;/*from  ww  w.  ja va2 s. co m*/
    int numDataNodes = 4;
    String testFile = "/replication-test-file";
    Path testPath = new Path(testFile);

    byte buffer[] = new byte[1024];
    for (int i = 0; i < buffer.length; i++) {
        buffer[i] = '1';
    }

    try {
        Configuration conf = new Configuration();
        conf.set("dfs.replication", Integer.toString(numDataNodes));
        //first time format
        cluster = new MiniDFSCluster(0, conf, numDataNodes, true, true, null, null);
        cluster.waitActive();
        DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()),
                conf);

        OutputStream out = cluster.getFileSystem().create(testPath);
        out.write(buffer);
        out.close();

        waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);

        // get first block of the file.
        String block = dfsClient.namenode.getBlockLocations(testFile, 0, Long.MAX_VALUE).get(0).getBlock()
                .getBlockName();

        cluster.shutdown();
        cluster = null;

        //Now mess up some of the replicas.
        //Delete the first and corrupt the next two.
        File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
        for (int i = 0; i < 25; i++) {
            buffer[i] = '0';
        }

        int fileCount = 0;
        for (int i = 0; i < 6; i++) {
            File blockFile = new File(baseDir, "data" + (i + 1) + "/current/" + block);
            LOG.info("Checking for file " + blockFile);

            if (blockFile.exists()) {
                if (fileCount == 0) {
                    LOG.info("Deleting file " + blockFile);
                    assertTrue(blockFile.delete());
                } else {
                    // corrupt it.
                    LOG.info("Corrupting file " + blockFile);
                    long len = blockFile.length();
                    assertTrue(len > 50);
                    RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw");
                    blockOut.seek(len / 3);
                    blockOut.write(buffer, 0, 25);
                }
                fileCount++;
            }
        }
        assertEquals(3, fileCount);

        /* Start the MiniDFSCluster with more datanodes since once a writeBlock
         * to a datanode node fails, same block can not be written to it
         * immediately. In our case some replication attempts will fail.
         */

        LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
        conf = new Configuration();
        conf.set("dfs.replication", Integer.toString(numDataNodes));
        conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
        conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5));
        conf.set("dfs.safemode.threshold.pct", "0.75f"); // only 3 copies exist

        cluster = new MiniDFSCluster(0, conf, numDataNodes * 2, false, true, null, null);
        cluster.waitActive();

        dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);

        waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);

    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:com.kkbox.toolkit.image.KKImageRequest.java

private void cryptToFile(String sourceFilePath, String targetFilePath) throws Exception {
    // FIXME: should have two functions: decyptToFile and encryptToFile
    RandomAccessFile sourceFile = new RandomAccessFile(sourceFilePath, "r");
    RandomAccessFile targetFile = new RandomAccessFile(targetFilePath, "rw");
    int readLength;
    do {/* ww w .j av a2s. c o m*/
        readLength = sourceFile.read(buffer, 0, BUFFER_SIZE);
        if (readLength != -1) {
            if (cipher != null) {
                buffer = cipher.doFinal(buffer);
            }
            targetFile.write(buffer, 0, readLength);
        }
    } while (readLength != -1);
    sourceFile.close();
    targetFile.close();
}

From source file:org.apache.hadoop.hdfs.TestReplication.java

public void testPendingReplicationRetry() throws IOException {

    MiniDFSCluster cluster = null;//ww  w  .  j  a v  a 2 s .  c  om
    int numDataNodes = 4;
    String testFile = "/replication-test-file";
    Path testPath = new Path(testFile);

    byte buffer[] = new byte[1024];
    for (int i = 0; i < buffer.length; i++) {
        buffer[i] = '1';
    }

    try {
        Configuration conf = new Configuration();
        conf.set("dfs.replication", Integer.toString(numDataNodes));
        //first time format
        cluster = new MiniDFSCluster(0, conf, numDataNodes, true, true, null, null);
        cluster.waitActive();
        DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()),
                conf);

        OutputStream out = cluster.getFileSystem().create(testPath);
        out.write(buffer);
        out.close();

        waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);

        // get first block of the file.
        String block = dfsClient.namenode.getBlockLocations(testFile, 0, Long.MAX_VALUE).get(0).getBlock()
                .getBlockName();

        cluster.shutdown();
        cluster = null;

        //Now mess up some of the replicas.
        //Delete the first and corrupt the next two.
        File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
        for (int i = 0; i < 25; i++) {
            buffer[i] = '0';
        }

        int fileCount = 0;
        for (int i = 0; i < 6; i++) {
            File blockFile = new File(baseDir, "data" + (i + 1) + "/current/" + block);
            LOG.info("Checking for file " + blockFile);

            if (blockFile.exists()) {
                if (fileCount == 0) {
                    LOG.info("Deleting file " + blockFile);
                    assertTrue(blockFile.delete());
                } else {
                    // corrupt it.
                    LOG.info("Corrupting file " + blockFile);
                    long len = blockFile.length();
                    assertTrue(len > 50);
                    RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw");
                    try {
                        blockOut.seek(len / 3);
                        blockOut.write(buffer, 0, 25);
                    } finally {
                        blockOut.close();
                    }
                }
                fileCount++;
            }
        }
        assertEquals(3, fileCount);

        /* Start the MiniDFSCluster with more datanodes since once a writeBlock
         * to a datanode node fails, same block can not be written to it
         * immediately. In our case some replication attempts will fail.
         */

        LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
        conf = new Configuration();
        conf.set("dfs.replication", Integer.toString(numDataNodes));
        conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
        conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5));
        conf.set("dfs.safemode.threshold.pct", "0.75f"); // only 3 copies exist

        cluster = new MiniDFSCluster(0, conf, numDataNodes * 2, false, true, null, null);
        cluster.waitActive();

        dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);

        waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);

    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:com.alibaba.otter.node.etl.common.pipe.impl.http.AbstractHttpPipe.java

protected EncryptedData encryptFile(File file) {
    // ?file path??
    EncryptedData encryptedData = null;// w ww.  ja  v  a2s. com
    try {
        encryptedData = EncryptUtils.encrypt(file.getPath().getBytes("UTF-8"));
    } catch (UnsupportedEncodingException e) {
        // ignore
    }

    // ?
    RandomAccessFile raf = null;
    try {
        raf = new RandomAccessFile(file, "rw");
        long origLength = file.length();
        int keyLength = ByteUtils.stringToBytes(encryptedData.getKey()).length;
        int crcLength = ByteUtils.stringToBytes(encryptedData.getCrc()).length;
        long totalLength = origLength + crcLength + keyLength;
        raf.setLength(totalLength);
        raf.seek(origLength);
        raf.write(ByteUtils.stringToBytes(encryptedData.getKey()), 0, keyLength);
        raf.seek(origLength + keyLength);
        raf.write(ByteUtils.stringToBytes(encryptedData.getCrc()), 0, crcLength);
    } catch (Exception e) {
        throw new PipeException("write_encrypted_error", e);
    } finally {
        IOUtils.closeQuietly(raf);
    }

    return encryptedData;
}