Example usage for java.io RandomAccessFile length

List of usage examples for java.io RandomAccessFile length

Introduction

In this page you can find the example usage for java.io RandomAccessFile length.

Prototype

public native long length() throws IOException;

Source Link

Document

Returns the length of this file.

Usage

From source file:Interface.FramePrincipal.java

private void bt_uplActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_bt_uplActionPerformed

    File diretorio = new File(dir_arq.getText());
    File file = new File(dir_arq.getText() + "/" + nom_arq.getText());

    if (!diretorio.exists()) {
        JOptionPane.showMessageDialog(null, "Informe um diretrio vlido!");
    } else if (!file.exists() || "".equals(nom_arq.getText())) {
        JOptionPane.showMessageDialog(null, "Informe um arquivo vlido!");
    } else {/*from  w w  w.  ja  va 2s.  c om*/
        try {
            //////////////////////////////////////// Validar tamanho de arquivo/////////////////////////////////////////////////               
            RandomAccessFile arquivo = new RandomAccessFile(dir_arq.getText() + "/" + nom_arq.getText(), "r");
            long tamanho = arquivo.length();

            if (tamanho >= 104857600) {
                JOptionPane.showMessageDialog(null, "Arquivo excedeu o tamanho mximo de 100 MB!");
                arquivo.close();
                return;
            }

            //////////////////////////////////////////// Carrega arquivo para o bucket /////////////////////////////////////////
            HttpClient client = new HttpClient();
            client.getParams().setParameter("http.useragent", "Test Client");

            BufferedReader br = null;
            String apikey = "AIzaSyAuKiAdUluAz4IEaOUoXldA8XuwEbty5V8";

            File input = new File(dir_arq.getText() + "/" + nom_arq.getText());

            PostMethod method = new PostMethod("https://www.googleapis.com/upload/storage/v1/b/" + bac.getNome()
                    + "/o?uploadType=media&name=" + nom_arq.getText());
            method.addParameter("uploadType", "media");
            method.addParameter("name", nom_arq.getText());
            method.setRequestEntity(new InputStreamRequestEntity(new FileInputStream(input), input.length()));
            //       method.setRequestHeader("Content-type", "image/png; charset=ISO-8859-1");
            method.setRequestHeader("Content-type", "application/octet-stream");

            //       try{
            int returnCode = client.executeMethod(method);

            if (returnCode == HttpStatus.SC_NOT_IMPLEMENTED) {
                System.err.println("The Post method is not implemented by this URI");
                method.getResponseBodyAsString();
            } else {
                br = new BufferedReader(new InputStreamReader(method.getResponseBodyAsStream()));
                String readLine;
                while (((readLine = br.readLine()) != null)) {
                    System.err.println(readLine);
                }
                br.close();
            }

        } catch (Exception e) {
            System.err.println(e);

        }
        JOptionPane.showMessageDialog(null, "Arquivo carregado com sucesso!");
    }
}

From source file:org.exist.util.VirtualTempFile.java

/**
  * The method <code>getChunk</code>
  *//from ww  w. j  a va  2 s.  c  om
  * @param offset a <code>long</code> value
  * @return a <code>byte[]</code> value
  * @exception IOException if an error occurs
  */
public byte[] getChunk(long offset) throws IOException {
    byte[] data = null;

    if (os != null) {
        close();
    }

    if (tempFile != null) {
        final RandomAccessFile raf = new RandomAccessFile(tempFile, "r");
        raf.seek(offset);
        long remaining = raf.length() - offset;
        if (remaining > maxChunkSize) {
            remaining = maxChunkSize;
        } else if (remaining < 0) {
            remaining = 0;
        }
        data = new byte[(int) remaining];
        raf.readFully(data);
        raf.close();
    } else if (tempBuffer != null) {
        long remaining = tempBuffer.length - offset;
        if (remaining > maxChunkSize) {
            remaining = maxChunkSize;
        } else if (remaining < 0) {
            remaining = 0;
        }
        data = new byte[(int) remaining];
        if (remaining > 0) {
            System.arraycopy(tempBuffer, (int) offset, data, 0, (int) remaining);
        }
    }

    return data;
}

From source file:org.apache.hadoop.mapred.TestFadvisedFileRegion.java

@Test(timeout = 100000)
public void testCustomShuffleTransfer() throws IOException {
    File absLogDir = new File("target", TestFadvisedFileRegion.class.getSimpleName() + "LocDir")
            .getAbsoluteFile();//from  ww w .  j a va 2s. com

    String testDirPath = StringUtils.join(Path.SEPARATOR,
            new String[] { absLogDir.getAbsolutePath(), "testCustomShuffleTransfer" });
    File testDir = new File(testDirPath);
    testDir.mkdirs();

    System.out.println(testDir.getAbsolutePath());

    File inFile = new File(testDir, "fileIn.out");
    File outFile = new File(testDir, "fileOut.out");

    //Initialize input file
    byte[] initBuff = new byte[FILE_SIZE];
    Random rand = new Random();
    rand.nextBytes(initBuff);

    FileOutputStream out = new FileOutputStream(inFile);
    try {
        out.write(initBuff);
    } finally {
        IOUtils.cleanup(LOG, out);
    }

    //define position and count to read from a file region.
    int position = 2 * 1024 * 1024;
    int count = 4 * 1024 * 1024 - 1;

    RandomAccessFile inputFile = null;
    RandomAccessFile targetFile = null;
    WritableByteChannel target = null;
    FadvisedFileRegion fileRegion = null;

    try {
        inputFile = new RandomAccessFile(inFile.getAbsolutePath(), "r");
        targetFile = new RandomAccessFile(outFile.getAbsolutePath(), "rw");
        target = targetFile.getChannel();

        Assert.assertEquals(FILE_SIZE, inputFile.length());

        //create FadvisedFileRegion
        fileRegion = new FadvisedFileRegion(inputFile, position, count, false, 0, null, null, 1024, false);

        //test corner cases
        customShuffleTransferCornerCases(fileRegion, target, count);

        long pos = 0;
        long size;
        while ((size = fileRegion.customShuffleTransfer(target, pos)) > 0) {
            pos += size;
        }

        //assert size
        Assert.assertEquals(count, (int) pos);
        Assert.assertEquals(count, targetFile.length());
    } finally {
        if (fileRegion != null) {
            fileRegion.releaseExternalResources();
        }
        IOUtils.cleanup(LOG, target);
        IOUtils.cleanup(LOG, targetFile);
        IOUtils.cleanup(LOG, inputFile);
    }

    //Read the target file and verify that copy is done correctly
    byte[] buff = new byte[FILE_SIZE];
    FileInputStream in = new FileInputStream(outFile);
    try {
        int total = in.read(buff, 0, count);

        Assert.assertEquals(count, total);

        for (int i = 0; i < count; i++) {
            Assert.assertEquals(initBuff[position + i], buff[i]);
        }
    } finally {
        IOUtils.cleanup(LOG, in);
    }

    //delete files and folders
    inFile.delete();
    outFile.delete();
    testDir.delete();
    absLogDir.delete();
}

From source file:com.koda.integ.hbase.storage.LRUStorageRecycler.java

public void run() {
    LOG.info(Thread.currentThread().getName() + " started.");

    while (needContinue()) {
        // Get oldest file         
        int minId = storage.getMinId().get();
        String fileName = storage.getFilePath(minId);
        RandomAccessFile raf = storage.getFile(minId);

        try {/*from  w  w w .  ja  va2  s.  co m*/
            long size = raf.length();
            // STATISTICS
            totalFilesSize.addAndGet(size);
            LOG.info("Processing file: " + fileName + " size=" + size);
            // Its FIFO now
            processFile(raf);
            // STATISTICS
            totalScannedFiles.incrementAndGet();
            // Update current storage size
            storage.updateStorageSize(-size);
            adjustRecyclerParameters();

        } catch (Exception e) {
            LOG.error(fileName, e);
        }

    }
    LOG.info(Thread.currentThread().getName() + " stopped.");
}

From source file:org.apache.hadoop.hdfs.TestFileAppend3.java

/**
 * TC7: Corrupted replicas are present.//from w w  w .j  a v a 2s.  c  o m
 * @throws IOException an exception might be thrown
 */
@Test
public void testTC7() throws Exception {
    final short repl = 2;
    final Path p = new Path("/TC7/foo");
    System.out.println("p=" + p);

    //a. Create file with replication factor of 2. Write half block of data. Close file.
    final int len1 = (int) (BLOCK_SIZE / 2);
    {
        FSDataOutputStream out = fs.create(p, false, buffersize, repl, BLOCK_SIZE);
        AppendTestUtil.write(out, 0, len1);
        out.close();
    }
    DFSTestUtil.waitReplication(fs, p, repl);

    //b. Log into one datanode that has one replica of this block.
    //   Find the block file on this datanode and truncate it to zero size.
    final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1);
    assertEquals(1, locatedblocks.locatedBlockCount());
    final LocatedBlock lb = locatedblocks.get(0);
    final ExtendedBlock blk = lb.getBlock();
    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final File f = DataNodeTestUtils.getBlockFile(dn, blk.getBlockPoolId(), blk.getLocalBlock());
    final RandomAccessFile raf = new RandomAccessFile(f, "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    assertEquals(len1, raf.length());
    raf.setLength(0);
    raf.close();

    //c. Open file in "append mode".  Append a new block worth of data. Close file.
    final int len2 = (int) BLOCK_SIZE;
    {
        FSDataOutputStream out = fs.append(p);
        AppendTestUtil.write(out, len1, len2);
        out.close();
    }

    //d. Reopen file and read two blocks worth of data.
    AppendTestUtil.check(fs, p, len1 + len2);
}

From source file:org.gcaldaemon.core.Configurator.java

public static final void copyFile(File from, File to) throws Exception {
    if (from == null || to == null || !from.exists()) {
        return;//from w w w . ja  v  a2s  . c o m
    }
    RandomAccessFile fromFile = null;
    RandomAccessFile toFile = null;
    try {
        fromFile = new RandomAccessFile(from, "r");
        toFile = new RandomAccessFile(to, "rw");
        FileChannel fromChannel = fromFile.getChannel();
        FileChannel toChannel = toFile.getChannel();
        long length = fromFile.length();
        long start = 0;
        while (start < length) {
            start += fromChannel.transferTo(start, length - start, toChannel);
        }
        fromChannel.close();
        toChannel.close();
    } finally {
        if (fromFile != null) {
            fromFile.close();
        }
        if (toFile != null) {
            toFile.close();
        }
    }
}

From source file:ome.services.blitz.impl.ExporterI.java

/**
 * Read size bytes, and transition to "waiting" If any exception is thrown,
 * the offset for the current file will not be updated.
 *//*from www .  ja  v a2s  .  c  om*/
private byte[] read(long pos, int size) throws ServerError {
    if (size > MAX_SIZE) {
        throw new ApiUsageException("Max read size is: " + MAX_SIZE);
    }

    byte[] buf = new byte[size];

    RandomAccessFile ra = null;
    try {
        ra = new RandomAccessFile(file, "r");

        long l = ra.length();
        if (pos + size > l) {
            size = (int) (l - pos);
        }

        ra.seek(pos);
        int read = ra.read(buf);

        // Handle end of file
        if (read < 0) {
            buf = new byte[0];
        } else if (read < size) {
            byte[] newBuf = new byte[read];
            System.arraycopy(buf, 0, newBuf, 0, read);
            buf = newBuf;
        }

    } catch (IOException io) {
        throw new RuntimeException(io);
    } finally {

        if (ra != null) {
            try {
                ra.close();
            } catch (IOException e) {
                log.warn("IOException on file close");
            }
        }

    }

    return buf;
}

From source file:edu.tsinghua.lumaqq.qq.Util.java

/**
 * MD5???10002432//  w  w  w.ja  v  a 2s . c  om
 * @param file RandomAccessFile
 * @return MD5
 */
public static byte[] getFileMD5(RandomAccessFile file) {
    try {
        file.seek(0);
        byte[] buf = (file.length() > QQ.QQ_MAX_FILE_MD5_LENGTH) ? new byte[QQ.QQ_MAX_FILE_MD5_LENGTH]
                : new byte[(int) file.length()];
        file.readFully(buf);
        return DigestUtils.md5(buf);
    } catch (IOException e) {
        return null;
    }
}

From source file:org.pentaho.platform.web.servlet.UploadFileUtils.java

/**
 * Gets the uncompressed file size of a .gz file by reading the last four bytes of the file
 * //w  w w . j  a va 2 s  . c om
 * @param file
 * @return long uncompressed original file size
 * @throws IOException
 *           mbatchelor
 */
private long getUncompressedGZipFileSize(File file) throws IOException {
    long rtn = 0;
    RandomAccessFile gzipFile = new RandomAccessFile(file, "r");
    try {
        // go 4 bytes from end of file - the original uncompressed file size is there
        gzipFile.seek(gzipFile.length() - 4);
        byte[] intelSize = new byte[4];
        gzipFile.read(intelSize); // read the size ....
        // rfc1952; ISIZE is the input size modulo 2^32
        // 00F01E69 is really 691EF000
        // The &0xFF turns signed byte into unsigned.
        rtn = (((intelSize[3] & 0xFF) << 24)
                | ((intelSize[2] & 0xFF) << 16) + ((intelSize[1] & 0xFF) << 8) + (intelSize[0] & 0xFF))
                & 0xffffffffL;
    } finally {
        try {
            gzipFile.close();
        } catch (Exception ignored) {
            //ignored
        }
    }
    return rtn;
}