Example usage for java.nio.channels FileChannel position

List of usage examples for java.nio.channels FileChannel position

Introduction

In this page you can find the example usage for java.nio.channels FileChannel position.

Prototype

public abstract FileChannel position(long newPosition) throws IOException;

Source Link

Document

Sets this channel's file position.

Usage

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

private void writeJunkJournal(File journalDir) throws Exception {
    long logId = System.currentTimeMillis();
    File fn = new File(journalDir, Long.toHexString(logId) + ".txn");

    FileChannel fc = new RandomAccessFile(fn, "rw").getChannel();

    ByteBuffer zeros = ByteBuffer.allocate(512);
    fc.write(zeros, 4 * 1024 * 1024);/* w  ww .  ja  v  a 2  s.  c  om*/
    fc.position(0);

    for (int i = 1; i <= 10; i++) {
        fc.write(ByteBuffer.wrap("JunkJunkJunk".getBytes()));
    }
}

From source file:org.multibit.file.FileHandler.java

public static void copyFile(File sourceFile, File destinationFile) throws IOException {
    if (!destinationFile.exists()) {
        destinationFile.createNewFile();
    }//from w w w. java 2 s  .c  om
    FileInputStream fileInputStream = null;
    FileOutputStream fileOutputStream = null;
    FileChannel source = null;
    FileChannel destination = null;
    try {
        fileInputStream = new FileInputStream(sourceFile);
        source = fileInputStream.getChannel();
        fileOutputStream = new FileOutputStream(destinationFile);
        destination = fileOutputStream.getChannel();
        long transfered = 0;
        long bytes = source.size();
        while (transfered < bytes) {
            transfered += destination.transferFrom(source, 0, source.size());
            destination.position(transfered);
        }
    } finally {
        if (source != null) {
            source.close();
            source = null;
        } else if (fileInputStream != null) {
            fileInputStream.close();
            fileInputStream = null;
        }
        if (destination != null) {
            destination.close();
            destination = null;
        } else if (fileOutputStream != null) {
            fileOutputStream.flush();
            fileOutputStream.close();
        }
    }
}

From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java

private void writePreV2Journal(File journalDir, int numEntries) throws Exception {
    long logId = System.currentTimeMillis();
    File fn = new File(journalDir, Long.toHexString(logId) + ".txn");

    FileChannel fc = new RandomAccessFile(fn, "rw").getChannel();

    ByteBuffer zeros = ByteBuffer.allocate(512);
    fc.write(zeros, 4 * 1024 * 1024);//from   w ww. j  a va  2s. c om
    fc.position(0);

    byte[] data = "JournalTestData".getBytes();
    long lastConfirmed = LedgerHandle.INVALID_ENTRY_ID;
    for (int i = 1; i <= numEntries; i++) {
        ByteBuffer packet = ClientUtil.generatePacket(1, i, lastConfirmed, i * data.length, data)
                .toByteBuffer();
        lastConfirmed = i;
        ByteBuffer lenBuff = ByteBuffer.allocate(4);
        lenBuff.putInt(packet.remaining());
        lenBuff.flip();

        fc.write(lenBuff);
        fc.write(packet);
    }
}

From source file:org.neo4j.io.pagecache.impl.SingleFilePageSwapper.java

private long lockPositionWriteVector(long filePageId, FileChannel channel, long fileOffset, ByteBuffer[] srcs)
        throws IOException {
    try {/* w  w  w.j  av  a  2s .com*/
        long toWrite = filePageSize * (long) srcs.length;
        long bytesWritten = 0;
        synchronized (positionLock(channel)) {
            channel.position(fileOffset);
            do {
                bytesWritten += channel.write(srcs);
            } while (bytesWritten < toWrite);
            return bytesWritten;
        }
    } catch (ClosedChannelException e) {
        // AsynchronousCloseException is a subclass of
        // ClosedChannelException, and ClosedByInterruptException is in
        // turn a subclass of AsynchronousCloseException.
        tryReopen(filePageId, e);
        boolean interrupted = Thread.interrupted();
        // Recurse because this is hopefully a very rare occurrence.
        channel = unwrappedChannel(filePageId);
        long bytesWritten = lockPositionWriteVector(filePageId, channel, fileOffset, srcs);
        if (interrupted) {
            Thread.currentThread().interrupt();
        }
        return bytesWritten;
    }
}

From source file:org.neo4j.io.pagecache.impl.SingleFilePageSwapper.java

private long lockPositionReadVector(long filePageId, FileChannel channel, long fileOffset, ByteBuffer[] srcs)
        throws IOException {
    try {/* ww  w .j a  v a  2  s  .  c  o  m*/
        long toRead = filePageSize * (long) srcs.length;
        long read, readTotal = 0;
        synchronized (positionLock(channel)) {
            channel.position(fileOffset);
            do {
                read = channel.read(srcs);
            } while (read != -1 && (readTotal += read) < toRead);
            return readTotal;
        }
    } catch (ClosedChannelException e) {
        // AsynchronousCloseException is a subclass of
        // ClosedChannelException, and ClosedByInterruptException is in
        // turn a subclass of AsynchronousCloseException.
        tryReopen(filePageId, e);
        boolean interrupted = Thread.interrupted();
        // Recurse because this is hopefully a very rare occurrence.
        channel = unwrappedChannel(filePageId);
        long bytesWritten = lockPositionReadVector(filePageId, channel, fileOffset, srcs);
        if (interrupted) {
            Thread.currentThread().interrupt();
        }
        return bytesWritten;
    }
}

From source file:eu.stratosphere.nephele.taskmanager.runtime.EnvelopeConsumptionLog.java

private void loadNextOutstandingEnvelopes() {

    final int pos = this.outstandingEnvelopesAsIntBuffer.position();

    if (pos > 0) {

        final int rem = this.outstandingEnvelopesAsIntBuffer.remaining();

        for (int i = 0; i < rem; ++i) {
            this.outstandingEnvelopesAsIntBuffer.put(i, this.outstandingEnvelopesAsIntBuffer.get(i + pos));
        }//w  ww. j  a va 2s . c o  m

        this.outstandingEnvelopesAsIntBuffer.position(0);
        this.outstandingEnvelopesAsIntBuffer.limit(rem);
    }

    if (this.numberOfEntriesReadFromLog == this.numberOfInitialLogEntries) {
        return;
    }

    FileChannel fc = null;

    try {

        this.outstandingEnvelopesAsByteBuffer
                .position(this.outstandingEnvelopesAsIntBuffer.limit() * SIZE_OF_INTEGER);
        this.outstandingEnvelopesAsByteBuffer.limit(this.outstandingEnvelopesAsByteBuffer.capacity());

        fc = new FileInputStream(this.logFile).getChannel();
        fc.position(this.numberOfEntriesReadFromLog * SIZE_OF_INTEGER);

        int totalBytesRead = 0;

        while (this.outstandingEnvelopesAsByteBuffer.hasRemaining()) {

            final int bytesRead = fc.read(this.outstandingEnvelopesAsByteBuffer);
            if (bytesRead == -1) {
                break;
            }

            totalBytesRead += bytesRead;
        }

        if (totalBytesRead % SIZE_OF_INTEGER != 0) {
            LOG.error("Read " + totalBytesRead + " from " + this.logFile.getAbsolutePath()
                    + ", file may be corrupt");
        }

        final int numberOfNewEntries = totalBytesRead / SIZE_OF_INTEGER;

        this.outstandingEnvelopesAsIntBuffer
                .limit(this.outstandingEnvelopesAsIntBuffer.limit() + numberOfNewEntries);

        this.numberOfEntriesReadFromLog += numberOfNewEntries;

        fc.close();

    } catch (IOException ioe) {
        LOG.error(StringUtils.stringifyException(ioe));
    } finally {

        if (fc != null) {
            try {
                fc.close();
            } catch (IOException ioe) {
            }
        }
    }
}

From source file:org.cloudata.core.commitlog.CommitLogServer.java

public int readLastLog(String dirName) {
    //LOG.debug("readLastLog is called with dirName : " + dirName);

    if (commitLogStoreFile.exists() == false) {
        LOG.warn("A dir directory does not exist");
        return -1;
    }//w ww.  j  a  va 2s  . c  om

    File[] fileList = commitLogStoreFile.listFiles();
    if (fileList == null || fileList.length == 0) {
        LOG.warn("file does not exist");
        return -1;
    }

    File lastFile = null;
    long lastModified = 0l;

    for (int i = 0; i < fileList.length; i++) {
        if (fileList[i].getName().startsWith(dirName) && fileList[i].lastModified() > lastModified) {
            lastFile = fileList[i];
            lastModified = lastFile.lastModified();
        }
    }

    if (lastFile.length() == 0) {
        LOG.warn("the length of the file is zero");
        return -1;
    }

    try {
        FileChannel ch = new FileInputStream(lastFile).getChannel();
        ch.position(0);
        return startFileTransferChannel(dirName, ch);
    } catch (FileNotFoundException e) {
        LOG.warn("File : " + lastFile.getName() + " does not exist", e);
    } catch (IOException e) {
        LOG.warn("Fail to set position to zero", e);
    }
    return -1;
}

From source file:dk.statsbiblioteket.util.LineReaderTest.java

public void testNIO() throws Exception {
    byte[] INITIAL = new byte[] { 1, 2, 3, 4 };
    byte[] EXTRA = new byte[] { 5, 6, 7, 8 };
    byte[] FULL = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 };
    byte[] FIFTH = new byte[] { 87 };
    byte[] FULL_WITH_FIFTH = new byte[] { 1, 2, 3, 4, 87, 6, 7, 8 };

    // Create temp-file with content
    File temp = createTempFile();
    FileOutputStream fileOut = new FileOutputStream(temp, true);
    fileOut.write(INITIAL);/*from  ww  w. j  av a2 s .  co  m*/
    fileOut.close();

    checkContent("The plain test-file should be correct", temp, INITIAL);
    {
        // Read the 4 bytes
        RandomAccessFile input = new RandomAccessFile(temp, "r");
        FileChannel channelIn = input.getChannel();
        ByteBuffer buffer = ByteBuffer.allocate(4096);
        channelIn.position(0);
        assertEquals("Buffer read should read full length", INITIAL.length, channelIn.read(buffer));
        buffer.position(0);

        checkContent("Using buffer should produce the right bytes", INITIAL, buffer);
        channelIn.close();
        input.close();
    }
    {
        // Fill new buffer
        ByteBuffer outBuffer = ByteBuffer.allocate(4096);
        outBuffer.put(EXTRA);
        outBuffer.flip();
        assertEquals("The limit of the outBuffer should be correct", EXTRA.length, outBuffer.limit());

        // Append new buffer to end
        RandomAccessFile output = new RandomAccessFile(temp, "rw");
        FileChannel channelOut = output.getChannel();
        channelOut.position(INITIAL.length);
        assertEquals("All bytes should be written", EXTRA.length, channelOut.write(outBuffer));
        channelOut.close();
        output.close();
        checkContent("The resulting file should have the full output", temp, FULL);
    }

    {
        // Fill single byte buffer
        ByteBuffer outBuffer2 = ByteBuffer.allocate(4096);
        outBuffer2.put(FIFTH);
        outBuffer2.flip();
        assertEquals("The limit of the second outBuffer should be correct", FIFTH.length, outBuffer2.limit());

        // Insert byte in the middle
        RandomAccessFile output2 = new RandomAccessFile(temp, "rw");
        FileChannel channelOut2 = output2.getChannel();
        channelOut2.position(4);
        assertEquals("The FIFTH should be written", FIFTH.length, channelOut2.write(outBuffer2));
        channelOut2.close();
        output2.close();
        checkContent("The resulting file with fifth should be complete", temp, FULL_WITH_FIFTH);
    }
}

From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java

private void initializeBucketTableFromLastCommittedBucketFile() throws BucketTableManagerException {
    FileInputStream tableStream = null;
    FileChannel fileChannel = null;
    try {/*  w  w  w  . j a v  a 2  s. co m*/
        File latestCommittedFile = getLatestCommitedFile();
        if (latestCommittedFile != null) {
            tableStream = new FileInputStream(latestCommittedFile);
            fileChannel = tableStream.getChannel();
            ByteBuffer buffer = ByteBuffer.allocate(HEADERSIZE);
            fileChannel.position(0L);
            int read = fileChannel.read(buffer);
            if (read < HEADERSIZE) {
                fileChannel.close();
                throw new BucketTableManagerException(
                        "Wrong bucket table header size: " + read + "/" + HEADERSIZE);
            }
            // Check content of header. Start with Big Endian (default for Java)
            buffer.rewind();
            byteOrder = ByteOrder.BIG_ENDIAN;
            buffer.order(byteOrder);
            int magic = buffer.getInt();
            if (magic == MAGICSTART_BADENDIAN) {
                byteOrder = ByteOrder.LITTLE_ENDIAN;
                buffer.order(byteOrder);
            } else if (magic != MAGICSTART) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Read number of buckets
            long headerMapSize = buffer.getLong();
            // Read checkPoint
            NeedlePointer includedCheckpoint = new NeedlePointer();
            includedCheckpoint.getNeedlePointerFromBuffer(buffer);
            // Read second magic number
            magic = buffer.getInt();
            if (magic != MAGICEND) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Check number of buckets against requested map size
            if (headerMapSize != mapSize) {
                // Map size does not match
                fileChannel.close();
                throw new BucketTableManagerException(
                        "Requested map size " + mapSize + " does not match header map size " + headerMapSize);
            }
            // Sets initial checkpoint
            bucketTable.setInitialCheckPoint(includedCheckpoint);
            // Now reads all entries
            logger.info("Hot start: loading buckets...");
            for (int i = 0; i < nbBuffers; i++) {
                bucketTable.prepareBufferForReading(i);
                read = fileChannel.read(bucketTable.getBuffer(i));
                if (read < bucketTable.getBuffer(i).limit())
                    throw new BucketTableManagerException("Incomplete bucket table file "
                            + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE);
                //else
                //   logger.info("Hot start: loaded "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ;
            }
            // Checks second magic marker
            buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE);
            buffer.rewind();
            buffer.limit(INTSIZE);
            if (fileChannel.read(buffer) < INTSIZE)
                throw new BucketTableManagerException(
                        "Incomplete bucket table file, missing secong magic number "
                                + latestCommittedFile.getName());
            buffer.rewind();
            magic = buffer.getInt();
            if (magic != MAGICSTART) {
                fileChannel.close();
                throw new BucketTableManagerException("Bad header in bucket table file");
            }
            // Now reads clean counters
            while (true) {
                buffer.rewind();
                buffer.limit(NeedleLogInfo.INFOSIZE);
                read = fileChannel.read(buffer);
                if (read > 0 && read < NeedleLogInfo.INFOSIZE)
                    throw new BucketTableManagerException("Incomplete bucket table file, log info too short "
                            + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE);
                if (read <= 0)
                    break;
                else {
                    NeedleLogInfo nli = new NeedleLogInfo(useAverage);
                    buffer.rewind();
                    nli.getNeedleLogInfo(buffer);
                    logInfoPerLogNumber.put(new Integer(nli.getNeedleFileNumber()), nli);
                }
            }
            logger.info("Hot start: loaded " + (nbBuffers * entriesPerBuffer) + " buckets");

        } else {
            // Empty file
            bucketTable.setInitialCheckPoint(new NeedlePointer());
            bucketTable.format();
        }
    } catch (IOException ie) {
        throw new BucketTableManagerException("Failed initializing bucket table", ie);
    } catch (BufferUnderflowException bue) {
        throw new BucketTableManagerException("Bucket table too short", bue);
    } finally {
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (IOException ex) {
                throw new BucketTableManagerException("Error while closing file channel", ex);
            }
        }
    }
}

From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java

private void commitBucketTableToDisk() throws BucketTableManagerException {
    File currentFile = null;//  w w  w . j  av  a 2 s  .c o  m
    FileChannel fileChannel = null;
    ByteBuffer headerBuffer = null;
    try {
        logger.warn("Start commit bucket table...");
        if (bucketTable.getRequestedCheckPoint() == null || bucketTable.getRequestedCheckPoint().isEmpty())
            throw new BucketTableManagerException("commit requested while there is no requested checkpoint");
        currentFile = getLatestCommitedFile();
        File nextFile = getNextFile(getLatestCommitedFile());
        fileChannel = (new RandomAccessFile(nextFile, "rw")).getChannel();
        // Write header with empty checkpoint 
        headerBuffer = ByteBuffer.allocate(HEADERSIZE);
        fileChannel.position(0L);
        headerBuffer.putInt(MAGICSTART);
        headerBuffer.putLong(mapSize);
        // NeedlePointer lastCheckPoint = bucketTable.getLastCheckPoint() ; // Reset checkpoint to no checkpoint done
        NeedlePointer lastCheckPoint = new NeedlePointer(); // Empty needle
        lastCheckPoint.putNeedlePointerToBuffer(headerBuffer);
        headerBuffer.putInt(MAGICEND);
        headerBuffer.flip(); // truncate buffer
        fileChannel.write(headerBuffer);
        // Now writes buffers
        for (int i = 0; i < nbBuffers; i++) {
            bucketTable.prepareBufferForWriting(i);
            int written = fileChannel.write(bucketTable.getBuffer(i));
            if (written < bucketTable.getBuffer(i).limit())
                throw new BucketTableManagerException("Incomplete write for bucket table file "
                        + nextFile.getName() + ", expected " + mapSize + HEADERSIZE);
            // else
            // logger.info("Bucket table commit: written "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ;
            try {
                Thread.sleep(10);
            } catch (Throwable th) {

            }
        }
        // Writes second magic number
        ByteBuffer buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE);
        buffer.rewind();
        buffer.limit(INTSIZE);
        buffer.putInt(MAGICSTART);
        buffer.rewind();
        fileChannel.write(buffer);
        // Write Needle Log Info
        Iterator<NeedleLogInfo> it = logInfoPerLogNumber.values().iterator();
        while (it.hasNext()) {
            buffer.rewind();
            buffer.limit(NeedleLogInfo.INFOSIZE);
            NeedleLogInfo nli = it.next();
            nli.putNeedleLogInfo(buffer, true);
            int written = fileChannel.write(buffer);
            if (written < NeedleLogInfo.INFOSIZE)
                throw new BucketTableManagerException(
                        "Incomplete write for bucket table file, writing log infos " + nextFile.getName());
        }
        // Writes checkpoint
        headerBuffer = ByteBuffer.allocate(NeedlePointer.POINTERSIZE);
        headerBuffer.rewind();
        headerBuffer.limit(NeedlePointer.POINTERSIZE);
        // System.out.println("Writing checkpoint in index "+bucketTable.getRequestedCheckPoint()) ;
        bucketTable.getRequestedCheckPoint().putNeedlePointerToBuffer(headerBuffer, true); // Flip buffer after write
        headerBuffer.rewind();
        // fileChannel.force(false) ;
        if (fileChannel.write(headerBuffer, CHECKPOINTOFFSET) < NeedlePointer.POINTERSIZE) {
            throw new BucketTableManagerException("Could not write checkpoint to " + nextFile.getName());
        }
        fileChannel.force(true);
        fileChannel.close();
        if (!nextFile.renameTo(getCommittedFile(nextFile)))
            throw new BucketTableManagerException(
                    "Could not rename " + nextFile.getName() + " to " + getCommittedFile(nextFile).getName());

        logger.warn("Committed bucket table.");
    } catch (IOException ie) {
        throw new BucketTableManagerException("Failed writting bucket table", ie);
    } finally {
        headerBuffer = null; //May ease garbage collection
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (Exception ex) {
                throw new BucketTableManagerException("Failed to close file channel", ex);
            }
        }
    }
    try {
        if (currentFile != null) {
            if (!currentFile.delete())
                logger.error("Failed deleting previous bucket table" + currentFile.getName());
        }
    } catch (Throwable th) {
        logger.error("Failed deleting previous bucket table" + currentFile.getName(), th);
    }
}