Example usage for java.nio.channels FileChannel read

List of usage examples for java.nio.channels FileChannel read

Introduction

In this page you can find the example usage for java.nio.channels FileChannel read.

Prototype

public final long read(ByteBuffer[] dsts) throws IOException 

Source Link

Document

Reads a sequence of bytes from this channel into the given buffers.

Usage

From source file:com.owncloud.android.oc_framework.network.webdav.FileRequestEntity.java

@Override
public void writeRequest(final OutputStream out) throws IOException {
    //byte[] tmp = new byte[4096];
    ByteBuffer tmp = ByteBuffer.allocate(4096);
    int readResult = 0;

    // TODO(bprzybylski): each mem allocation can throw OutOfMemoryError we need to handle it
    //                    globally in some fashionable manner
    RandomAccessFile raf = new RandomAccessFile(mFile, "r");
    FileChannel channel = raf.getChannel();
    Iterator<OnDatatransferProgressListener> it = null;
    long transferred = 0;
    long size = mFile.length();
    if (size == 0)
        size = -1;/*from www  . ja va 2s . co  m*/
    try {
        while ((readResult = channel.read(tmp)) >= 0) {
            out.write(tmp.array(), 0, readResult);
            tmp.clear();
            transferred += readResult;
            synchronized (mDataTransferListeners) {
                it = mDataTransferListeners.iterator();
                while (it.hasNext()) {
                    it.next().onTransferProgress(readResult, transferred, size, mFile.getName());
                }
            }
        }

    } catch (IOException io) {
        Log.e("FileRequestException", io.getMessage());
        throw new RuntimeException(
                "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really",
                io);

    } finally {
        channel.close();
        raf.close();
    }
}

From source file:eu.alefzero.webdav.FileRequestEntity.java

@Override
public void writeRequest(final OutputStream out) throws IOException {
    //byte[] tmp = new byte[4096];
    ByteBuffer tmp = ByteBuffer.allocate(4096);
    int readResult = 0;

    // TODO(bprzybylski): each mem allocation can throw OutOfMemoryError we need to handle it
    //                    globally in some fashionable manner
    RandomAccessFile raf = new RandomAccessFile(mFile, "r");
    FileChannel channel = raf.getChannel();
    Iterator<OnDatatransferProgressListener> it = null;
    long transferred = 0;
    long size = mFile.length();
    if (size == 0)
        size = -1;/*ww w  .j  a  v  a 2s .  co m*/
    try {
        while ((readResult = channel.read(tmp)) >= 0) {
            out.write(tmp.array(), 0, readResult);
            tmp.clear();
            transferred += readResult;
            synchronized (mDataTransferListeners) {
                it = mDataTransferListeners.iterator();
                while (it.hasNext()) {
                    it.next().onTransferProgress(readResult, transferred, size, mFile.getName());
                }
            }
        }

    } catch (IOException io) {
        Log_OC.e("FileRequestException", io.getMessage());
        throw new RuntimeException(
                "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really",
                io);

    } finally {
        channel.close();
        raf.close();
    }
}

From source file:com.owncloud.android.lib.common.network.FileRequestEntity.java

@Override
public void writeRequest(final OutputStream out) throws IOException {
    //byte[] tmp = new byte[4096];
    ByteBuffer tmp = ByteBuffer.allocate(4096);
    int readResult = 0;

    //                    globally in some fashionable manner
    RandomAccessFile raf = new RandomAccessFile(mFile, "r");
    FileChannel channel = raf.getChannel();
    Iterator<OnDatatransferProgressListener> it = null;
    long transferred = 0;
    long size = mFile.length();
    if (size == 0)
        size = -1;/*from ww  w  . j  a va2  s  .co  m*/
    try {
        while ((readResult = channel.read(tmp)) >= 0) {
            out.write(tmp.array(), 0, readResult);
            tmp.clear();
            transferred += readResult;
            synchronized (mDataTransferListeners) {
                it = mDataTransferListeners.iterator();
                while (it.hasNext()) {
                    it.next().onTransferProgress(readResult, transferred, size, mFile.getAbsolutePath());
                }
            }
        }

    } catch (IOException io) {
        Log_OC.e("FileRequestException", io.getMessage());
        throw new RuntimeException(
                "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really",
                io);

    } finally {
        channel.close();
        raf.close();
    }
}

From source file:com.cerema.cloud2.lib.common.network.FileRequestEntity.java

@Override
public void writeRequest(final OutputStream out) throws IOException {
    //byte[] tmp = new byte[4096];
    ByteBuffer tmp = ByteBuffer.allocate(4096);
    int readResult = 0;

    // TODO(bprzybylski): each mem allocation can throw OutOfMemoryError we need to handle it
    //                    globally in some fashionable manner
    RandomAccessFile raf = new RandomAccessFile(mFile, "r");
    FileChannel channel = raf.getChannel();
    Iterator<OnDatatransferProgressListener> it = null;
    long transferred = 0;
    long size = mFile.length();
    if (size == 0)
        size = -1;//  w w w .  ja v a  2s.  c  om
    try {
        while ((readResult = channel.read(tmp)) >= 0) {
            out.write(tmp.array(), 0, readResult);
            tmp.clear();
            transferred += readResult;
            synchronized (mDataTransferListeners) {
                it = mDataTransferListeners.iterator();
                while (it.hasNext()) {
                    it.next().onTransferProgress(readResult, transferred, size, mFile.getAbsolutePath());
                }
            }
        }

    } catch (IOException io) {
        Log_OC.e("FileRequestException", io.getMessage());
        throw new RuntimeException(
                "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really",
                io);

    } finally {
        channel.close();
        raf.close();
    }
}

From source file:org.alfresco.patch.PatchServiceImpl.java

@Override
public PatchDocument getPatch(String nodeId, long nodeVersion) throws IOException {
    if (contentStore.exists(nodeId, nodeVersion - 1, true)) {
        // previous version
        NodeChecksums nodeChecksums = checksumService.getChecksums(nodeId, nodeVersion - 1);
        if (nodeChecksums != null) {
            // parameters version
            NodeInfo nodeInfo1 = contentDAO.getByNodeId(nodeId, nodeVersion, true);
            String contentPath1 = nodeInfo1.getContentPath();
            FileChannel inChannel = contentStore.getChannel(contentPath1);
            ByteBuffer buffer = ByteBuffer.allocate(1024 * 100);
            inChannel.read(buffer);
            buffer.flip();//from   w ww.  j a  v a  2s.  c  o m

            PatchDocument patchDocument = checksumService.createPatchDocument(nodeChecksums, buffer);
            return patchDocument;
        } else {
            throw new RuntimeException("No patches available, no checksums for node " + nodeId
                    + ", nodeVersion " + (nodeVersion - 1));
        }
    } else {
        throw new RuntimeException("No patches available, only a single version of the node");
    }
}

From source file:voldemort.store.cachestore.voldeimpl.StoreIterator.java

private boolean checkSignature(FileChannel channel) throws IOException {
    ByteBuffer intBytes = ByteBuffer.allocate(OFFSET);
    if (channel.size() == 0) {
        throw new StoreException("File size is 0");
    } else {/*from w  ww .j a  v a 2  s.c  o  m*/
        channel.read(intBytes);
        intBytes.rewind();
        if (intBytes.getInt() != MAGIC)
            throw new StoreException("Header mismatch expect " + MAGIC + " read " + intBytes.getInt());
    }
    return true;
}

From source file:net.librec.data.convertor.appender.SocialDataAppender.java

/**
 * Read data from the data file. Note that we didn't take care of the
 * duplicated lines./*from ww  w  .  ja v a2 s.  c  om*/
 *
 * @param inputDataPath
 *            the path of the data file
 * @throws IOException if I/O error occurs during reading
 */
private void readData(String inputDataPath) throws IOException {
    // Table {row-id, col-id, rate}
    Table<Integer, Integer, Double> dataTable = HashBasedTable.create();
    // Map {col-id, multiple row-id}: used to fast build a rating matrix
    Multimap<Integer, Integer> colMap = HashMultimap.create();
    // BiMap {raw id, inner id} userIds, itemIds
    final List<File> files = new ArrayList<File>();
    final ArrayList<Long> fileSizeList = new ArrayList<Long>();
    SimpleFileVisitor<Path> finder = new SimpleFileVisitor<Path>() {
        @Override
        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
            fileSizeList.add(file.toFile().length());
            files.add(file.toFile());
            return super.visitFile(file, attrs);
        }
    };
    Files.walkFileTree(Paths.get(inputDataPath), finder);
    long allFileSize = 0;
    for (Long everyFileSize : fileSizeList) {
        allFileSize = allFileSize + everyFileSize.longValue();
    }
    // loop every dataFile collecting from walkFileTree
    for (File dataFile : files) {
        FileInputStream fis = new FileInputStream(dataFile);
        FileChannel fileRead = fis.getChannel();
        ByteBuffer buffer = ByteBuffer.allocate(BSIZE);
        int len;
        String bufferLine = new String();
        byte[] bytes = new byte[BSIZE];
        while ((len = fileRead.read(buffer)) != -1) {
            buffer.flip();
            buffer.get(bytes, 0, len);
            bufferLine = bufferLine.concat(new String(bytes, 0, len)).replaceAll("\r", "\n");
            String[] bufferData = bufferLine.split("(\n)+");
            boolean isComplete = bufferLine.endsWith("\n");
            int loopLength = isComplete ? bufferData.length : bufferData.length - 1;
            for (int i = 0; i < loopLength; i++) {
                String line = new String(bufferData[i]);
                String[] data = line.trim().split("[ \t,]+");
                String userA = data[0];
                String userB = data[1];
                Double rate = (data.length >= 3) ? Double.valueOf(data[2]) : 1.0;
                if (userIds.containsKey(userA) && userIds.containsKey(userB)) {
                    int row = userIds.get(userA);
                    int col = userIds.get(userB);
                    dataTable.put(row, col, rate);
                    colMap.put(col, row);
                }
            }
            if (!isComplete) {
                bufferLine = bufferData[bufferData.length - 1];
            }
            buffer.clear();
        }
        fileRead.close();
        fis.close();
    }
    int numRows = userIds.size(), numCols = userIds.size();
    // build rating matrix
    userSocialMatrix = new SparseMatrix(numRows, numCols, dataTable, colMap);
    // release memory of data table
    dataTable = null;
}

From source file:com.sm.store.utils.FileStore.java

private boolean checkSignature(FileChannel channel) throws IOException {
    ByteBuffer intBytes = ByteBuffer.allocate(OFFSET);
    if (channel.size() == 0) {
        intBytes.putInt(MAGIC);/*w ww .  j a v  a2 s.c o m*/
        intBytes.flip();
        channel.write(intBytes);
        return true;
    } else {
        channel.read(intBytes);
        intBytes.rewind();
        if (intBytes.getInt() != MAGIC)
            throw new StoreException("Header mismatch expect " + MAGIC + " read " + intBytes.getInt());
    }
    return true;
}

From source file:org.zuinnote.hadoop.office.format.common.parser.msexcel.internal.EncryptedCachedDiskStringsTable.java

/**
 * /* w ww  .  ja  va2  s  .  co  m*/
 * Gets a String from cache or underlying encrypted/compressed file
 * 
 * @param index
 * @return
 * @throws IOException
 */

private String getString(int index) throws IOException {
    // check if it is in cache?
    if (this.cache.containsKey(index)) {
        return this.cache.get(index);
    }
    // if not we have to read it from the file
    long itemPosition = this.stringPositionInFileList.get(index);
    String result = null;
    if (this.tempRAF == null) {
        this.accessTempFile(itemPosition);
        byte[] readSize = new byte[4];
        this.in.read(readSize);
        int sizeOfString = ByteBuffer.wrap(readSize).getInt();
        byte[] strbytes = new byte[sizeOfString];
        this.in.read(strbytes);
        this.currentPos += readSize.length + strbytes.length;
        result = new String(strbytes, EncryptedCachedDiskStringsTable.encoding);
    } else {
        FileChannel fc = this.tempRAF.getChannel().position(itemPosition);
        ByteBuffer bb = ByteBuffer.allocate(4);
        // read size of String
        fc.read(bb);
        bb.flip();
        int sizeOfStr = bb.getInt();
        // read string
        bb = ByteBuffer.allocate(sizeOfStr);
        fc.read(bb);
        bb.flip();
        result = new String(bb.array(), EncryptedCachedDiskStringsTable.encoding);
    }
    if (this.cacheSize != 0) {
        this.cache.put(index, result);
    }
    return result;
}

From source file:org.wso2.msf4j.internal.entitywriter.FileEntityWriter.java

/**
 * Write the entity to the carbon message.
 *///from  w ww .  j a va  2s. c  o m
@Override
public void writeData(CarbonMessage carbonMessage, File file, String mediaType, int chunkSize,
        CarbonCallback cb) {
    if (mediaType == null || mediaType.equals(MediaType.WILDCARD)) {
        try {
            mediaType = MimeMapper.getMimeType(FilenameUtils.getExtension(file.getName()));
        } catch (MimeMappingException e) {
            mediaType = MediaType.WILDCARD;
        }
    }
    try {
        FileChannel fileChannel = new FileInputStream(file).getChannel();
        if (chunkSize == Response.NO_CHUNK || chunkSize == Response.DEFAULT_CHUNK_SIZE) {
            chunkSize = DEFAULT_CHUNK_SIZE;
        }
        carbonMessage.setHeader(Constants.HTTP_TRANSFER_ENCODING, CHUNKED);
        carbonMessage.setHeader(Constants.HTTP_CONTENT_TYPE, mediaType);
        carbonMessage.setBufferContent(false);
        cb.done(carbonMessage);

        ByteBuffer buffer = ByteBuffer.allocate(chunkSize);
        while (fileChannel.read(buffer) != -1) {
            buffer.flip();
            carbonMessage.addMessageBody(buffer);
        }
        fileChannel.close();
        carbonMessage.setEndOfMsgAdded(true);
    } catch (IOException e) {
        throw new RuntimeException("Error occurred while reading from file", e);
    }
}