List of usage examples for java.io RandomAccessFile getChannel
public final FileChannel getChannel()
From source file:com.owncloud.android.lib.common.network.FileRequestEntity.java
@Override public void writeRequest(final OutputStream out) throws IOException { //byte[] tmp = new byte[4096]; ByteBuffer tmp = ByteBuffer.allocate(4096); int readResult = 0; // globally in some fashionable manner RandomAccessFile raf = new RandomAccessFile(mFile, "r"); FileChannel channel = raf.getChannel(); Iterator<OnDatatransferProgressListener> it = null; long transferred = 0; long size = mFile.length(); if (size == 0) size = -1;/*from www . j a va2 s . c om*/ try { while ((readResult = channel.read(tmp)) >= 0) { out.write(tmp.array(), 0, readResult); tmp.clear(); transferred += readResult; synchronized (mDataTransferListeners) { it = mDataTransferListeners.iterator(); while (it.hasNext()) { it.next().onTransferProgress(readResult, transferred, size, mFile.getAbsolutePath()); } } } } catch (IOException io) { Log_OC.e("FileRequestException", io.getMessage()); throw new RuntimeException( "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really", io); } finally { channel.close(); raf.close(); } }
From source file:com.cerema.cloud2.lib.common.network.FileRequestEntity.java
@Override public void writeRequest(final OutputStream out) throws IOException { //byte[] tmp = new byte[4096]; ByteBuffer tmp = ByteBuffer.allocate(4096); int readResult = 0; // TODO(bprzybylski): each mem allocation can throw OutOfMemoryError we need to handle it // globally in some fashionable manner RandomAccessFile raf = new RandomAccessFile(mFile, "r"); FileChannel channel = raf.getChannel(); Iterator<OnDatatransferProgressListener> it = null; long transferred = 0; long size = mFile.length(); if (size == 0) size = -1;// ww w . j a v a 2 s .c o m try { while ((readResult = channel.read(tmp)) >= 0) { out.write(tmp.array(), 0, readResult); tmp.clear(); transferred += readResult; synchronized (mDataTransferListeners) { it = mDataTransferListeners.iterator(); while (it.hasNext()) { it.next().onTransferProgress(readResult, transferred, size, mFile.getAbsolutePath()); } } } } catch (IOException io) { Log_OC.e("FileRequestException", io.getMessage()); throw new RuntimeException( "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really", io); } finally { channel.close(); raf.close(); } }
From source file:com.turn.ttorrent.client.TorrentByteStorage.java
/** Move the partial file to its final location. * * <p>//from www . j av a 2s.c o m * This method needs to make sure reads can still happen seemlessly during * the operation. The partial is first flushed to the storage device before * being copied to its target location. The {@link FileChannel} is then * switched to this new file before the partial is removed. * </p> */ public synchronized void complete() throws IOException { this.channel.force(true); // Nothing more to do if we're already on the target file. if (this.current.equals(this.target)) { return; } FileUtils.deleteQuietly(this.target); FileUtils.copyFile(this.current, this.target); logger.debug("Re-opening torrent byte storage at " + this.target.getAbsolutePath() + "."); RandomAccessFile raf = new RandomAccessFile(this.target, "rw"); raf.setLength(this.size); this.channel = raf.getChannel(); this.raf.close(); this.raf = raf; this.current = this.target; FileUtils.deleteQuietly(this.partial); }
From source file:it.doqui.index.ecmengine.business.personalization.multirepository.FileContentWriterDynamic.java
@Override protected WritableByteChannel getDirectWritableChannel() throws ContentIOException { try {/* w w w. jav a2 s . c om*/ // we may not write to an existing file - EVER!! if (file.exists() && file.length() > 0) { throw new IOException("File exists - overwriting not allowed"); } // create the channel WritableByteChannel channel = null; if (allowRandomAccess) { RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw"); // will create it channel = randomAccessFile.getChannel(); } else { OutputStream os = new FileOutputStream(file); channel = Channels.newChannel(os); } // done if (logger.isDebugEnabled()) { logger.debug("Opened write channel to file: \n" + " file: " + file + "\n" + " random-access: " + allowRandomAccess); } return channel; } catch (Throwable e) { throw new ContentIOException("Failed to open file channel: " + this, e); } }
From source file:com.owncloud.android.operations.ChunkedUploadFileOperation.java
@Override protected int uploadFile(WebdavClient client) throws HttpException, IOException { int status = -1; FileChannel channel = null;//from www. j a v a 2s .c om RandomAccessFile raf = null; try { File file = new File(getStoragePath()); raf = new RandomAccessFile(file, "r"); channel = raf.getChannel(); mEntity = new ChunkFromFileChannelRequestEntity(channel, getMimeType(), CHUNK_SIZE, file); ((ProgressiveDataTransferer) mEntity).addDatatransferProgressListeners(getDataTransferListeners()); long offset = 0; String uriPrefix = client.getBaseUri() + WebdavUtils.encodePath(getRemotePath()) + "-chunking-" + Math.abs((new Random()).nextInt(9000) + 1000) + "-"; long chunkCount = (long) Math.ceil((double) file.length() / CHUNK_SIZE); for (int chunkIndex = 0; chunkIndex < chunkCount; chunkIndex++, offset += CHUNK_SIZE) { if (mPutMethod != null) { mPutMethod.releaseConnection(); // let the connection available for other methods } mPutMethod = new PutMethod(uriPrefix + chunkCount + "-" + chunkIndex); mPutMethod.addRequestHeader(OC_CHUNKED_HEADER, OC_CHUNKED_HEADER); ((ChunkFromFileChannelRequestEntity) mEntity).setOffset(offset); mPutMethod.setRequestEntity(mEntity); status = client.executeMethod(mPutMethod); client.exhaustResponse(mPutMethod.getResponseBodyAsStream()); Log_OC.d(TAG, "Upload of " + getStoragePath() + " to " + getRemotePath() + ", chunk index " + chunkIndex + ", count " + chunkCount + ", HTTP result status " + status); if (!isSuccess(status)) break; } } finally { if (channel != null) channel.close(); if (raf != null) raf.close(); if (mPutMethod != null) mPutMethod.releaseConnection(); // let the connection available for other methods } return status; }
From source file:com.owncloud.android.lib.resources.files.ChunkedUploadRemoteFileOperation.java
@Override protected int uploadFile(OwnCloudClient client) throws HttpException, IOException { int status = -1; FileChannel channel = null;/* w ww. j a v a 2 s.c o m*/ RandomAccessFile raf = null; try { File file = new File(mLocalPath); raf = new RandomAccessFile(file, "r"); channel = raf.getChannel(); mEntity = new ChunkFromFileChannelRequestEntity(channel, mMimeType, CHUNK_SIZE, file); //((ProgressiveDataTransferer)mEntity).addDatatransferProgressListeners(getDataTransferListeners()); synchronized (mDataTransferListeners) { ((ProgressiveDataTransferer) mEntity).addDatatransferProgressListeners(mDataTransferListeners); } long offset = 0; String uriPrefix = client.getWebdavUri() + WebdavUtils.encodePath(mRemotePath) + "-chunking-" + Math.abs((new Random()).nextInt(9000) + 1000) + "-"; long chunkCount = (long) Math.ceil((double) file.length() / CHUNK_SIZE); for (int chunkIndex = 0; chunkIndex < chunkCount; chunkIndex++, offset += CHUNK_SIZE) { if (mPutMethod != null) { mPutMethod.releaseConnection(); // let the connection available for other methods } mPutMethod = new PutMethod(uriPrefix + chunkCount + "-" + chunkIndex); mPutMethod.addRequestHeader(OC_CHUNKED_HEADER, OC_CHUNKED_HEADER); ((ChunkFromFileChannelRequestEntity) mEntity).setOffset(offset); mPutMethod.setRequestEntity(mEntity); status = client.executeMethod(mPutMethod); client.exhaustResponse(mPutMethod.getResponseBodyAsStream()); Log_OC.d(TAG, "Upload of " + mLocalPath + " to " + mRemotePath + ", chunk index " + chunkIndex + ", count " + chunkCount + ", HTTP result status " + status); if (!isSuccess(status)) break; } } finally { if (channel != null) channel.close(); if (raf != null) raf.close(); if (mPutMethod != null) mPutMethod.releaseConnection(); // let the connection available for other methods } return status; }
From source file:com.linkedin.pinot.common.utils.MmapUtils.java
/** * Memory maps a file, tracking usage information. * * @param randomAccessFile The random access file to mmap * @param mode The mmap mode//from w w w . ja v a 2s . com * @param position The byte position to mmap * @param size The number of bytes to mmap * @param file The file that is mmap'ed * @param details Additional details about the allocation */ public static MappedByteBuffer mmapFile(RandomAccessFile randomAccessFile, FileChannel.MapMode mode, long position, long size, File file, String details) throws IOException { final String context; final AllocationContext allocationContext; if (file != null) { context = file.getAbsolutePath() + " (" + details + ")"; allocationContext = new AllocationContext(file, details, AllocationType.MMAP); } else { context = "no file (" + details + ")"; allocationContext = new AllocationContext(details, AllocationType.MMAP); } MMAP_BUFFER_USAGE.addAndGet(size); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Memory mapping file, mmap size {} with context {}, allocation after operation {}", size, context, getTrackedAllocationStatus()); } MappedByteBuffer byteBuffer = null; try { byteBuffer = randomAccessFile.getChannel().map(mode, position, size); MMAP_BUFFER_COUNT.incrementAndGet(); } catch (Exception e) { LOGGER.error("Failed to mmap file (size {}, context {})", size, context, e); LOGGER.error("Allocation status {}", getTrackedAllocationStatus()); Utils.rethrowException(e); } BUFFER_TO_CONTEXT_MAP.put(byteBuffer, allocationContext); return byteBuffer; }
From source file:org.apache.hadoop.mapreduce.task.reduce.InMemoryLinkMapOutput.java
@Override public void shuffle(MapHost host, InputStream input, long compressedLength, long decompressedLength, ShuffleClientMetrics metrics, Reporter reporter) throws IOException { String mapHostName = host.getHostName().split(":")[0]; String app_path = conf.get(MRConfig.LOCAL_DIR); LOG.debug("original app_path " + app_path); String[] app_path_parts = app_path.split("/"); app_path_parts[app_path_parts.length - 5] = mapHostName; StringBuilder builder = new StringBuilder(); for (String s : app_path_parts) { builder.append(s);// w ww . ja v a2 s . c om builder.append("/"); } app_path = builder.toString(); String src = app_path + "output/" + getMapId() + "/file.out"; File f = new File(src); if (f.exists()) { LOG.debug("shuffleToLink: the src " + src + " EXIST!"); } //LOG.debug("src file size: "+f.length()); //input = new FileInputStream(src); //input.skip(offset); RandomAccessFile raf = new RandomAccessFile(f, "r"); input = Channels.newInputStream(raf.getChannel().position(offset)); IFileInputStream checksumIn = new IFileInputStream(input, compressedLength, conf); input = checksumIn; // Are map-outputs compressed? if (codec != null) { decompressor.reset(); input = codec.createInputStream(input, decompressor); } try { LOG.debug("offset: " + offset); LOG.debug("memory.length: " + memory.length); LOG.debug("compressedLength: " + compressedLength); LOG.debug("decompressedLength: " + decompressedLength); // TO-DO: would offset and length be OK to be int? IOUtils.readFully(input, memory, 0, memory.length); metrics.inputBytes((int) memory.length); reporter.progress(); LOG.info("Read " + memory.length + " bytes from map-output for " + getMapId()); /** * We've gotten the amount of data we were expecting. Verify the * decompressor has nothing more to offer. This action also forces * the decompressor to read any trailing bytes that weren't critical * for decompression, which is necessary to keep the stream in sync. */ //if (input.read() >= 0) { // throw new IOException( // "Unexpected extra bytes from input stream for " // + getMapId()); //} input.close(); raf.close(); } catch (IOException ioe) { // Close the streams IOUtils.cleanup(LOG, input); // Re-throw throw ioe; } finally { CodecPool.returnDecompressor(decompressor); } }
From source file:it.doqui.index.ecmengine.business.personalization.multirepository.FileContentReaderDynamic.java
@Override protected ReadableByteChannel getDirectReadableChannel() throws ContentIOException { try {/*from w w w . ja v a 2 s . c o m*/ // the file must exist if (!file.exists()) { throw new IOException("File does not exist: " + file); } // create the channel ReadableByteChannel channel = null; if (allowRandomAccess) { RandomAccessFile randomAccessFile = new RandomAccessFile(file, "r"); // won't create it channel = randomAccessFile.getChannel(); } else { InputStream is = new FileInputStream(file); channel = Channels.newChannel(is); } // done if (logger.isDebugEnabled()) { logger.debug("Opened write channel to file: \n" + " file: " + file + "\n" + " random-access: " + allowRandomAccess); } return channel; } catch (Throwable e) { throw new ContentIOException("Failed to open file channel: " + this, e); } }
From source file:com.norconex.commons.lang.io.CachedOutputStream.java
@SuppressWarnings("resource") private void cacheToFile() throws IOException { fileCache = File.createTempFile("CachedOutputStream-", "-temp", cacheDirectory); fileCache.deleteOnExit();/*from ww w .j a va 2s. co m*/ LOG.debug("Reached max cache size. Swapping to file: " + fileCache); RandomAccessFile f = new RandomAccessFile(fileCache, "rw"); FileChannel channel = f.getChannel(); fileOutputStream = Channels.newOutputStream(channel); IOUtils.write(memOutputStream.toByteArray(), fileOutputStream); memOutputStream = null; }