List of usage examples for java.nio.channels FileChannel close
public final void close() throws IOException
From source file:org.geowebcache.storage.blobstore.file.FileBlobStore.java
private void writeFile(File target, TileObject stObj, boolean existed) throws StorageException { // first write to temp file tmp.mkdirs();// ww w. j a va2 s . c o m File temp = new File(tmp, UUID.randomUUID().toString()); try { // Open the output stream and read the blob into the tile FileOutputStream fos = null; FileChannel channel = null; try { fos = new FileOutputStream(temp); channel = fos.getChannel(); try { stObj.getBlob().transferTo(channel); } catch (IOException ioe) { throw new StorageException(ioe.getMessage() + " for " + target.getAbsolutePath()); } finally { try { if (channel != null) { channel.close(); } } catch (IOException ioe) { throw new StorageException(ioe.getMessage() + " for " + target.getAbsolutePath()); } } } catch (FileNotFoundException ioe) { throw new StorageException(ioe.getMessage() + " for " + target.getAbsolutePath()); } finally { IOUtils.closeQuietly(fos); } // rename to final position. This will fail if another GWC also wrote this // file, in such case we'll just eliminate this one if (FileUtils.renameFile(temp, target)) { temp = null; } else if (existed) { // if we are trying to overwrite and old tile, on windows that might fail... delete // and rename instead if (target.delete() && FileUtils.renameFile(temp, target)) { temp = null; } } } finally { if (temp != null) { log.warn("Tile " + target.getPath() + " was already written by another thread/process"); temp.delete(); } } }
From source file:com.aipo.social.opensocial.spi.AipoStorageService.java
@Override public boolean copyFile(String srcRootPath, String srcDir, String srcFileName, String destRootPath, String destDir, String destFileName, SecurityToken paramSecurityToken) throws ProtocolException { File srcPath = new File( getAbsolutePath(srcRootPath) + separator() + Database.getDomainName() + separator() + srcDir); if (!srcPath.exists()) { try {//w w w . ja v a2 s . c o m srcPath.mkdirs(); } catch (Exception e) { return false; } } File destPath = new File( getAbsolutePath(destRootPath) + separator() + Database.getDomainName() + separator() + destDir); if (!destPath.exists()) { try { destPath.mkdirs(); } catch (Exception e) { return false; } } File from = new File(srcPath + separator() + srcFileName); File to = new File(destPath + separator() + destFileName); boolean res = true; FileChannel srcChannel = null; FileChannel destChannel = null; try { srcChannel = new FileInputStream(from).getChannel(); destChannel = new FileOutputStream(to).getChannel(); destChannel.transferFrom(srcChannel, 0, srcChannel.size()); } catch (Exception ex) { res = false; } finally { if (destChannel != null) { try { destChannel.close(); } catch (IOException ex) { res = false; } } if (srcChannel != null) { try { srcChannel.close(); } catch (IOException ex) { res = false; } } } return res; }
From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java
private void initializeBucketTableFromLastCommittedBucketFile() throws BucketTableManagerException { FileInputStream tableStream = null; FileChannel fileChannel = null; try {/*from w w w . j a v a 2 s.com*/ File latestCommittedFile = getLatestCommitedFile(); if (latestCommittedFile != null) { tableStream = new FileInputStream(latestCommittedFile); fileChannel = tableStream.getChannel(); ByteBuffer buffer = ByteBuffer.allocate(HEADERSIZE); fileChannel.position(0L); int read = fileChannel.read(buffer); if (read < HEADERSIZE) { fileChannel.close(); throw new BucketTableManagerException( "Wrong bucket table header size: " + read + "/" + HEADERSIZE); } // Check content of header. Start with Big Endian (default for Java) buffer.rewind(); byteOrder = ByteOrder.BIG_ENDIAN; buffer.order(byteOrder); int magic = buffer.getInt(); if (magic == MAGICSTART_BADENDIAN) { byteOrder = ByteOrder.LITTLE_ENDIAN; buffer.order(byteOrder); } else if (magic != MAGICSTART) { fileChannel.close(); throw new BucketTableManagerException("Bad header in bucket table file"); } // Read number of buckets long headerMapSize = buffer.getLong(); // Read checkPoint NeedlePointer includedCheckpoint = new NeedlePointer(); includedCheckpoint.getNeedlePointerFromBuffer(buffer); // Read second magic number magic = buffer.getInt(); if (magic != MAGICEND) { fileChannel.close(); throw new BucketTableManagerException("Bad header in bucket table file"); } // Check number of buckets against requested map size if (headerMapSize != mapSize) { // Map size does not match fileChannel.close(); throw new BucketTableManagerException( "Requested map size " + mapSize + " does not match header map size " + headerMapSize); } // Sets initial checkpoint bucketTable.setInitialCheckPoint(includedCheckpoint); // Now reads all entries logger.info("Hot start: loading buckets..."); for (int i = 0; i < nbBuffers; i++) { bucketTable.prepareBufferForReading(i); read = fileChannel.read(bucketTable.getBuffer(i)); if (read < bucketTable.getBuffer(i).limit()) throw new BucketTableManagerException("Incomplete bucket table file " + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE); //else // logger.info("Hot start: loaded "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ; } // Checks second magic marker buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE); buffer.rewind(); buffer.limit(INTSIZE); if (fileChannel.read(buffer) < INTSIZE) throw new BucketTableManagerException( "Incomplete bucket table file, missing secong magic number " + latestCommittedFile.getName()); buffer.rewind(); magic = buffer.getInt(); if (magic != MAGICSTART) { fileChannel.close(); throw new BucketTableManagerException("Bad header in bucket table file"); } // Now reads clean counters while (true) { buffer.rewind(); buffer.limit(NeedleLogInfo.INFOSIZE); read = fileChannel.read(buffer); if (read > 0 && read < NeedleLogInfo.INFOSIZE) throw new BucketTableManagerException("Incomplete bucket table file, log info too short " + latestCommittedFile.getName() + ", expected " + mapSize + HEADERSIZE); if (read <= 0) break; else { NeedleLogInfo nli = new NeedleLogInfo(useAverage); buffer.rewind(); nli.getNeedleLogInfo(buffer); logInfoPerLogNumber.put(new Integer(nli.getNeedleFileNumber()), nli); } } logger.info("Hot start: loaded " + (nbBuffers * entriesPerBuffer) + " buckets"); } else { // Empty file bucketTable.setInitialCheckPoint(new NeedlePointer()); bucketTable.format(); } } catch (IOException ie) { throw new BucketTableManagerException("Failed initializing bucket table", ie); } catch (BufferUnderflowException bue) { throw new BucketTableManagerException("Bucket table too short", bue); } finally { if (fileChannel != null) { try { fileChannel.close(); } catch (IOException ex) { throw new BucketTableManagerException("Error while closing file channel", ex); } } } }
From source file:eu.stratosphere.nephele.services.iomanager.IOManagerPerformanceBenchmark.java
@SuppressWarnings("resource") private final void speedTestNIO(int bufferSize, boolean direct) throws IOException { final Channel.ID tmpChannel = ioManager.createChannel(); File tempFile = null;/*from w w w .j av a 2s. co m*/ FileChannel fs = null; try { tempFile = new File(tmpChannel.getPath()); RandomAccessFile raf = new RandomAccessFile(tempFile, "rw"); fs = raf.getChannel(); ByteBuffer buf = direct ? ByteBuffer.allocateDirect(bufferSize) : ByteBuffer.allocate(bufferSize); long writeStart = System.currentTimeMillis(); int valsLeft = NUM_INTS_WRITTEN; while (valsLeft-- > 0) { if (buf.remaining() < 4) { buf.flip(); fs.write(buf); buf.clear(); } buf.putInt(valsLeft); } if (buf.position() > 0) { buf.flip(); fs.write(buf); } fs.close(); raf.close(); fs = null; long writeElapsed = System.currentTimeMillis() - writeStart; // ---------------------------------------------------------------- raf = new RandomAccessFile(tempFile, "r"); fs = raf.getChannel(); buf.clear(); long readStart = System.currentTimeMillis(); fs.read(buf); buf.flip(); valsLeft = NUM_INTS_WRITTEN; while (valsLeft-- > 0) { if (buf.remaining() < 4) { buf.compact(); fs.read(buf); buf.flip(); } if (buf.getInt() != valsLeft) { throw new IOException(); } } fs.close(); raf.close(); long readElapsed = System.currentTimeMillis() - readStart; LOG.info("NIO Channel with buffer " + bufferSize + ": write " + writeElapsed + " msecs, read " + readElapsed + " msecs."); } finally { // close if possible if (fs != null) { fs.close(); fs = null; } // try to delete the file if (tempFile != null) { tempFile.delete(); } } }
From source file:eu.stratosphere.nephele.taskmanager.runtime.EnvelopeConsumptionLog.java
private void writeAnnouncedEnvelopesBufferToDisk() { FileChannel fc = null; try {/*from www . j a v a 2 s . c o m*/ this.announcedEnvelopesAsIntBuffer.flip(); this.announcedEnvelopesAsByteBuffer .position(this.announcedEnvelopesAsIntBuffer.position() * SIZE_OF_INTEGER); this.announcedEnvelopesAsByteBuffer.limit(this.announcedEnvelopesAsIntBuffer.limit() * SIZE_OF_INTEGER); fc = new FileOutputStream(this.logFile, true).getChannel(); while (this.announcedEnvelopesAsByteBuffer.hasRemaining()) { fc.write(this.announcedEnvelopesAsByteBuffer); } } catch (IOException ioe) { LOG.error(StringUtils.stringifyException(ioe)); } finally { if (fc != null) { try { fc.close(); } catch (IOException ioe) { } } this.announcedEnvelopesAsIntBuffer.clear(); this.announcedEnvelopesAsByteBuffer.clear(); } }
From source file:edu.cmu.graphchi.shards.QueryShard.java
private void loadInEdgeStartBuffer() throws IOException { File inEdgeStartBufferFile = new File( ChiFilenames.getFilenameShardsAdjStartIndices(adjFile.getAbsolutePath())); FileChannel inEdgeStartChannel = new java.io.RandomAccessFile(inEdgeStartBufferFile, "r").getChannel(); inEdgeStartBuffer = inEdgeStartChannel.map(FileChannel.MapMode.READ_ONLY, 0, inEdgeStartBufferFile.length()) .asIntBuffer();/* w w w .j a v a 2 s .c om*/ inEdgeStartChannel.close(); }
From source file:dk.statsbiblioteket.util.LineReaderTest.java
public void testNIO() throws Exception { byte[] INITIAL = new byte[] { 1, 2, 3, 4 }; byte[] EXTRA = new byte[] { 5, 6, 7, 8 }; byte[] FULL = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }; byte[] FIFTH = new byte[] { 87 }; byte[] FULL_WITH_FIFTH = new byte[] { 1, 2, 3, 4, 87, 6, 7, 8 }; // Create temp-file with content File temp = createTempFile(); FileOutputStream fileOut = new FileOutputStream(temp, true); fileOut.write(INITIAL);//from w ww . j a v a 2 s. c om fileOut.close(); checkContent("The plain test-file should be correct", temp, INITIAL); { // Read the 4 bytes RandomAccessFile input = new RandomAccessFile(temp, "r"); FileChannel channelIn = input.getChannel(); ByteBuffer buffer = ByteBuffer.allocate(4096); channelIn.position(0); assertEquals("Buffer read should read full length", INITIAL.length, channelIn.read(buffer)); buffer.position(0); checkContent("Using buffer should produce the right bytes", INITIAL, buffer); channelIn.close(); input.close(); } { // Fill new buffer ByteBuffer outBuffer = ByteBuffer.allocate(4096); outBuffer.put(EXTRA); outBuffer.flip(); assertEquals("The limit of the outBuffer should be correct", EXTRA.length, outBuffer.limit()); // Append new buffer to end RandomAccessFile output = new RandomAccessFile(temp, "rw"); FileChannel channelOut = output.getChannel(); channelOut.position(INITIAL.length); assertEquals("All bytes should be written", EXTRA.length, channelOut.write(outBuffer)); channelOut.close(); output.close(); checkContent("The resulting file should have the full output", temp, FULL); } { // Fill single byte buffer ByteBuffer outBuffer2 = ByteBuffer.allocate(4096); outBuffer2.put(FIFTH); outBuffer2.flip(); assertEquals("The limit of the second outBuffer should be correct", FIFTH.length, outBuffer2.limit()); // Insert byte in the middle RandomAccessFile output2 = new RandomAccessFile(temp, "rw"); FileChannel channelOut2 = output2.getChannel(); channelOut2.position(4); assertEquals("The FIFTH should be written", FIFTH.length, channelOut2.write(outBuffer2)); channelOut2.close(); output2.close(); checkContent("The resulting file with fifth should be complete", temp, FULL_WITH_FIFTH); } }
From source file:org.apache.cordova.core.FileUtils.java
/** * Moved this code into it's own method so moveTo could use it when the move is across file systems *///from ww w . j ava 2 s .co m private void copyAction(File srcFile, File destFile) throws FileNotFoundException, IOException { FileInputStream istream = new FileInputStream(srcFile); FileOutputStream ostream = new FileOutputStream(destFile); FileChannel input = istream.getChannel(); FileChannel output = ostream.getChannel(); try { input.transferTo(0, input.size(), output); } finally { istream.close(); ostream.close(); input.close(); output.close(); } }
From source file:edu.cmu.graphchi.shards.QueryShard.java
void loadPointers() throws IOException { File pointerFile = new File(ChiFilenames.getFilenameShardsAdjPointers(adjFile.getAbsolutePath())); if (!pinIndexToMemory) { FileChannel ptrFileChannel = new java.io.RandomAccessFile(pointerFile, "r").getChannel(); pointerIdxBuffer = ptrFileChannel.map(FileChannel.MapMode.READ_ONLY, 0, pointerFile.length()) .asLongBuffer();/*w ww . j a v a2s .c o m*/ ptrFileChannel.close(); } else { byte[] data = new byte[(int) pointerFile.length()]; if (data.length == 0) return; totalOrigSize += data.length; FileInputStream fis = new FileInputStream(pointerFile); int i = 0; while (i < data.length) { i += fis.read(data, i, data.length - i); } fis.close(); pointerIdxBuffer = ByteBuffer.wrap(data).asLongBuffer(); long[] vertices = new long[pointerIdxBuffer.capacity() - 1]; long[] offs = new long[vertices.length]; for (int j = 0; j < vertices.length; j++) { long x = pointerIdxBuffer.get(j); vertices[j] = VertexIdTranslate.getVertexId(x); offs[j] = VertexIdTranslate.getAux(x); } boolean extraZero = (offs.length > 1 && offs[1] == offs[0]); if (extraZero) { vertices = Arrays.copyOfRange(vertices, 1, vertices.length); offs = Arrays.copyOfRange(offs, 1, offs.length); } gammaSeqVertices = new IncreasingEliasGammaSeq(vertices); gammaSeqOffs = new IncreasingEliasGammaSeq(offs); totalPinnedSize += gammaSeqVertices.sizeInBytes(); totalPinnedSize += gammaSeqOffs.sizeInBytes(); pointerIdxBuffer = null; } }
From source file:com.yobidrive.diskmap.needles.NeedleManager.java
public void close() { Enumeration<FileChannel> channels = channelMap.elements(); while (channels.hasMoreElements()) { FileChannel fc = channels.nextElement(); try {/* w w w . j a v a2 s .c o m*/ fc.force(true); fc.close(); } catch (Throwable th) { logger.error("Error closing needle channel", th); } } }