Example usage for java.io FileInputStream getChannel

List of usage examples for java.io FileInputStream getChannel

Introduction

In this page you can find the example usage for java.io FileInputStream getChannel.

Prototype

public FileChannel getChannel() 

Source Link

Document

Returns the unique java.nio.channels.FileChannel FileChannel object associated with this file input stream.

Usage

From source file:org.codehaus.preon.Codecs.java

public static <T> T decode(Codec<T> codec, Builder builder, File file)
        throws FileNotFoundException, IOException, DecodingException {
    FileInputStream in = null;
    FileChannel channel = null;//from  w w  w  .j  av a 2s  .  co  m
    try {
        in = new FileInputStream(file);
        channel = in.getChannel();
        int fileSize = (int) channel.size();
        ByteBuffer buffer = channel.map(FileChannel.MapMode.READ_ONLY, 0, fileSize);
        return decode(codec, buffer, builder);
    } finally {
        if (channel != null) {
            channel.close();
        }
    }
}

From source file:org.apache.hadoop.hdfs.BlockReaderLocalBase.java

/**
 * The only way this object can be instantiated.
 *///  w  w  w  .  ja va  2  s . co  m
public static BlockReaderLocalBase newBlockReader(Configuration conf, String file, int namespaceid, Block blk,
        DatanodeInfo node, long startOffset, long length, DFSClientMetrics metrics, boolean verifyChecksum,
        boolean clearOsBuffer, boolean positionalReadMode) throws IOException {

    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node);

    BlockPathInfo pathinfo = localDatanodeInfo.getOrComputePathInfo(namespaceid, blk, node, conf);

    // Another alternative is for datanode to pass whether it is an inline checksum
    // file and checksum metadata through BlockPathInfo, which is a cleaner approach.
    // However, we need to worry more about protocol compatible issue. We avoid this
    // trouble for now. We can always change to the other approach later.
    //
    boolean isInlineChecksum = Block.isInlineChecksumBlockFilename(new Path(pathinfo.getBlockPath()).getName());

    // check to see if the file exists. It may so happen that the
    // HDFS file has been deleted and this block-lookup is occuring
    // on behalf of a new HDFS file. This time, the block file could
    // be residing in a different portion of the fs.data.dir directory.
    // In this case, we remove this entry from the cache. The next
    // call to this method will repopulate the cache.
    try {

        // get a local file system
        FileChannel dataFileChannel;
        FileDescriptor dataFileDescriptor;
        File blkfile = new File(pathinfo.getBlockPath());
        FileInputStream fis = new FileInputStream(blkfile);
        dataFileChannel = fis.getChannel();
        dataFileDescriptor = fis.getFD();

        if (LOG.isDebugEnabled()) {
            LOG.debug("New BlockReaderLocal for file " + pathinfo.getBlockPath() + " of size "
                    + blkfile.length() + " startOffset " + startOffset + " length " + length);
        }

        DataChecksum checksum = null;
        if (isInlineChecksum) {
            GenStampAndChecksum gac = BlockInlineChecksumReader
                    .getGenStampAndChecksumFromInlineChecksumFile(new Path(pathinfo.getBlockPath()).getName());
            checksum = DataChecksum.newDataChecksum(gac.getChecksumType(), gac.getBytesPerChecksum());

            if (verifyChecksum) {

                return new BlockReaderLocalInlineChecksum(conf, file, blk, startOffset, length, pathinfo,
                        metrics, checksum, verifyChecksum, dataFileChannel, dataFileDescriptor, clearOsBuffer,
                        positionalReadMode);
            } else {
                return new BlockReaderLocalInlineChecksum(conf, file, blk, startOffset, length, pathinfo,
                        metrics, checksum, dataFileChannel, dataFileDescriptor, clearOsBuffer,
                        positionalReadMode);
            }
        } else if (verifyChecksum) {
            FileChannel checksumInChannel = null;
            // get the metadata file
            File metafile = new File(pathinfo.getMetaPath());
            FileInputStream checksumIn = new FileInputStream(metafile);
            checksumInChannel = checksumIn.getChannel();
            // read and handle the common header here. For now just a version
            BlockMetadataHeader header = BlockMetadataHeader.readHeader(new DataInputStream(checksumIn),
                    new NativeCrc32());
            short version = header.getVersion();

            if (version != FSDataset.FORMAT_VERSION_NON_INLINECHECKSUM) {
                LOG.warn("Wrong version (" + version + ") for metadata file for " + blk + " ignoring ...");
            }
            checksum = header.getChecksum();

            return new BlockReaderLocalWithChecksum(conf, file, blk, startOffset, length, pathinfo, metrics,
                    checksum, verifyChecksum, dataFileChannel, dataFileDescriptor, checksumInChannel,
                    clearOsBuffer, positionalReadMode);
        } else {
            return new BlockReaderLocalWithChecksum(conf, file, blk, startOffset, length, pathinfo, metrics,
                    dataFileChannel, dataFileDescriptor, clearOsBuffer, positionalReadMode);
        }

    } catch (FileNotFoundException e) {
        localDatanodeInfo.removeBlockLocalPathInfo(namespaceid, blk);
        DFSClient.LOG.warn("BlockReaderLoca: Removing " + blk + " from cache because local file "
                + pathinfo.getBlockPath() + " could not be opened.");
        throw e;
    }
}

From source file:edu.cmu.cs.lti.util.general.BasicConvenience.java

/**
 * Read a file by mapping it completely into memory.
 * /*from www  .  j av  a  2 s. co m*/
 * @param filename
 * @return the bytes contained in the file
 * @throws IOException
 */
public static byte[] fastReadFile(File file) throws IOException {
    FileInputStream stream = new FileInputStream(file);
    try {
        MappedByteBuffer buffer = stream.getChannel().map(MapMode.READ_ONLY, 0, file.length());
        byte[] bytes = new byte[(int) file.length()];
        buffer.get(bytes);
        return bytes;
    } finally {
        if (stream != null) {
            stream.close();
        }
    }
}

From source file:org.pepstock.jem.ant.tasks.utilities.SortTask.java

/**
 * Divides the file into small blocks. If the blocks
 * are too small, we shall create too many temporary files.
 * If they are too big, we shall be using too much memory.
 * /*from   w  ww.  j a  v a 2 s .  c om*/
 * @param filetobesorted
 * @param maxtmpfiles
 * @return block size
 * @throws IOException 
 */
public static long estimateBestSizeOfBlocks(FileInputStream filetobesorted, int maxtmpfiles)
        throws IOException {
    long sizeoffile = filetobesorted.getChannel().size() * 2;
    /**
     * We multiply by two because later on someone insisted on counting the
     * memory usage as 2 bytes per character. By this model, loading a file
     * with 1 character will use 2 bytes.
     */
    // we don't want to open up much more than maxtmpfiles temporary files,
    // better run
    // out of memory first.
    long blocksize = sizeoffile / maxtmpfiles + (sizeoffile % maxtmpfiles == 0 ? 0 : 1);

    // on the other hand, we don't want to create many temporary files
    // for naught. If blocksize is smaller than half the free memory, grow
    // it.
    long freemem = Runtime.getRuntime().freeMemory();
    if (blocksize < freemem / 2) {
        blocksize = freemem / 2;
    }
    return blocksize;
}

From source file:com.rvl.android.getnzb.LocalNZB.java

public static String readFile(File file) throws IOException {
    Log.d(Tags.LOG, "readfile(): Converting file to string. (" + file.getAbsolutePath() + ")");
    FileInputStream stream = new FileInputStream(file);
    try {/*w  w w.  ja  va2s. c o  m*/
        FileChannel fc = stream.getChannel();
        MappedByteBuffer bb = fc.map(FileChannel.MapMode.READ_ONLY, 0, fc.size());
        /* Instead of using default, pass in a decoder. */
        return Charset.defaultCharset().decode(bb).toString();
    } finally {
        stream.close();
    }
}

From source file:ValidateLicenseHeaders.java

/**
 * Add the default jboss lgpl header// www.  ja v  a2  s. c  o  m
 */
static void addDefaultHeader(File javaFile) throws IOException {
    if (addDefaultHeader) {
        FileInputStream fis = new FileInputStream(javaFile);
        FileChannel fc = fis.getChannel();
        int size = (int) fc.size();
        ByteBuffer contents = ByteBuffer.allocate(size);
        fc.read(contents);
        fis.close();

        ByteBuffer hdr = ByteBuffer.wrap(DEFAULT_HEADER.getBytes());
        FileOutputStream fos = new FileOutputStream(javaFile);
        fos.write(hdr.array());
        fos.write(contents.array());
        fos.close();
    }

    noheaders.add(javaFile);
}

From source file:org.wso2.carbon.esb.vfs.transport.test.ESBJAVA4679VFSPasswordSecurityTestCase.java

/**
 * Copy the given source file to the given destination
 *
 * @param sourceFile source file//from ww w .j  a  v a2s.  co m
 * @param destFile   destination file
 * @throws java.io.IOException
 */
public static void copyFile(File sourceFile, File destFile) throws IOException {
    if (!destFile.exists()) {
        destFile.createNewFile();
    }
    FileInputStream fileInputStream = null;
    FileOutputStream fileOutputStream = null;
    try {
        fileInputStream = new FileInputStream(sourceFile);
        fileOutputStream = new FileOutputStream(destFile);

        FileChannel source = fileInputStream.getChannel();
        FileChannel destination = fileOutputStream.getChannel();
        destination.transferFrom(source, 0, source.size());
    } finally {
        IOUtils.closeQuietly(fileInputStream);
        IOUtils.closeQuietly(fileOutputStream);
    }
}

From source file:com.turn.ttorrent.common.TorrentCreator.java

/**
 * Return the concatenation of the SHA-1 hashes of a file's pieces.
 *
 * <p>//from  w ww  .  j a  v  a 2 s .com
 * Hashes the given file piece by piece using the default Torrent piece
 * length (see {@link #PIECE_LENGTH}) and returns the concatenation of
 * these hashes, as a string.
 * </p>
 *
 * <p>
 * This is used for creating Torrent meta-info structures from a file.
 * </p>
 *
 * @param file The file to hash.
 */
public /* for testing */ static byte[] hashFiles(Executor executor, List<File> files, long nbytes,
        int pieceLength) throws InterruptedException, IOException {
    int npieces = (int) Math.ceil((double) nbytes / pieceLength);
    byte[] out = new byte[Torrent.PIECE_HASH_SIZE * npieces];
    CountDownLatch latch = new CountDownLatch(npieces);

    ByteBuffer buffer = ByteBuffer.allocate(pieceLength);

    long start = System.nanoTime();
    int piece = 0;
    for (File file : files) {
        logger.info("Hashing data from {} ({} pieces)...",
                new Object[] { file.getName(), (int) Math.ceil((double) file.length() / pieceLength) });

        FileInputStream fis = FileUtils.openInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.flip();
                    executor.execute(new ChunkHasher(out, piece, latch, buffer));
                    buffer = ByteBuffer.allocate(pieceLength);
                    piece++;
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.flip();
        executor.execute(new ChunkHasher(out, piece, latch, buffer));
        piece++;
    }

    // Wait for hashing tasks to complete.
    latch.await();
    long elapsed = System.nanoTime() - start;

    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.",
            new Object[] { files.size(), nbytes, piece, npieces, String.format("%.1f", elapsed / 1e6) });

    return out;
}

From source file:org.grouplens.lenskit.data.dao.packed.BinaryRatingDAO.java

/**
 * Open a binary rating DAO./*from   w  ww  .j  av a 2  s  . com*/
 * @param file The file to open.
 * @return A DAO backed by {@code file}.
 * @throws IOException If there is
 */
public static BinaryRatingDAO open(File file) throws IOException {
    FileInputStream input = new FileInputStream(file);
    try {
        FileChannel channel = input.getChannel();
        BinaryHeader header = BinaryHeader.read(channel);
        logger.info("Loading DAO with {} ratings of {} items from {} users", header.getRatingCount(),
                header.getItemCount(), header.getUserCount());

        ByteBuffer data = channel.map(FileChannel.MapMode.READ_ONLY, channel.position(),
                header.getRatingDataSize());
        channel.position(channel.position() + header.getRatingDataSize());

        ByteBuffer tableBuffer = channel.map(FileChannel.MapMode.READ_ONLY, channel.position(),
                channel.size() - channel.position());
        BinaryIndexTable utbl = BinaryIndexTable.fromBuffer(header.getUserCount(), tableBuffer);
        BinaryIndexTable itbl = BinaryIndexTable.fromBuffer(header.getItemCount(), tableBuffer);

        return new BinaryRatingDAO(file, header, data, utbl, itbl);
    } finally {
        input.close();
    }
}

From source file:ja.centre.util.io.Files.java

public static void copy(String source, String destination) throws IOException {
    FileInputStream fis = null;
    FileOutputStream fos = null;/* w  ww.j  ava2 s. co m*/
    try {
        fis = new FileInputStream(source);
        fos = new FileOutputStream(destination);

        FileChannel fic = null;
        FileChannel foc = null;
        try {
            fic = fis.getChannel();
            foc = fos.getChannel();

            ByteBuffer buffer = ByteBuffer.allocate(COPY_BUFFER_SIZE);
            do {
                buffer.flip();
                foc.write(buffer);
                buffer.clear();
            } while (fic.read(buffer) != -1);
        } finally {
            closeQuietly(fic);
            closeQuietly(foc);
        }
    } finally {
        closeQuietly(fis);
        closeQuietly(fos);
    }
}