List of usage examples for java.nio.channels FileChannel write
public abstract int write(ByteBuffer src, long position) throws IOException;
From source file:com.buaa.cfs.utils.IOUtils.java
/** * Write a ByteBuffer to a FileChannel at a given offset, handling short writes. * * @param fc The FileChannel to write to * @param buf The input buffer/*from w w w .j a va2 s . c o m*/ * @param offset The offset in the file to start writing at * * @throws IOException On I/O error */ public static void writeFully(FileChannel fc, ByteBuffer buf, long offset) throws IOException { do { offset += fc.write(buf, offset); } while (buf.remaining() > 0); }
From source file:org.neo4j.io.fs.FileUtils.java
public static void writeAll(FileChannel channel, ByteBuffer src, long position) throws IOException { long filePosition = position; long expectedEndPosition = filePosition + src.limit() - src.position(); int bytesWritten; while ((filePosition += (bytesWritten = channel.write(src, filePosition))) < expectedEndPosition) { if (bytesWritten <= 0) { throw new IOException("Unable to write to disk, reported bytes written was " + bytesWritten); }// w ww. java2 s . co m } }
From source file:org.linagora.linshare.webservice.uploadrequest.impl.FlowUploaderRestServiceImpl.java
@Path("/") @POST//from w w w . j a va 2 s.co m @Consumes("multipart/form-data") @Override public Response uploadChunk(@Multipart(CHUNK_NUMBER) long chunkNumber, @Multipart(TOTAL_CHUNKS) long totalChunks, @Multipart(CHUNK_SIZE) long chunkSize, @Multipart(TOTAL_SIZE) long totalSize, @Multipart(IDENTIFIER) String identifier, @Multipart(FILENAME) String filename, @Multipart(RELATIVE_PATH) String relativePath, @Multipart(FILE) InputStream file, MultipartBody body, @Multipart(REQUEST_URL_UUID) String uploadRequestUrlUuid, @Multipart(PASSWORD) String password) throws BusinessException { logger.debug("upload chunk number : " + chunkNumber); identifier = cleanIdentifier(identifier); Validate.isTrue(isValid(chunkNumber, chunkSize, totalSize, identifier, filename)); try { logger.debug("writing chunk number : " + chunkNumber); java.nio.file.Path tempFile = getTempFile(identifier); FileChannel fc = FileChannel.open(tempFile, StandardOpenOption.CREATE, StandardOpenOption.APPEND); byte[] byteArray = IOUtils.toByteArray(file); fc.write(ByteBuffer.wrap(byteArray), (chunkNumber - 1) * chunkSize); fc.close(); chunkedFiles.get(identifier).addChunk(chunkNumber); if (isUploadFinished(identifier, chunkSize, totalSize)) { logger.debug("upload finished "); InputStream inputStream = Files.newInputStream(tempFile, StandardOpenOption.READ); File tempFile2 = getTempFile(inputStream, "rest-flowuploader", filename); try { uploadRequestUrlFacade.addUploadRequestEntry(uploadRequestUrlUuid, password, tempFile2, filename); } finally { deleteTempFile(tempFile2); } ChunkedFile remove = chunkedFiles.remove(identifier); Files.deleteIfExists(remove.getPath()); return Response.ok("upload success").build(); } else { logger.debug("upload pending "); } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } return Response.ok("upload success").build(); }
From source file:org.linagora.linshare.webservice.userv2.impl.FlowDocumentUploaderRestServiceImpl.java
@Path("/") @POST/* w w w .j a va 2s . c o m*/ @Consumes("multipart/form-data") @Override public FlowDto uploadChunk(@Multipart(CHUNK_NUMBER) long chunkNumber, @Multipart(TOTAL_CHUNKS) long totalChunks, @Multipart(CHUNK_SIZE) long chunkSize, @Multipart(CURRENT_CHUNK_SIZE) long currentChunkSize, @Multipart(TOTAL_SIZE) long totalSize, @Multipart(IDENTIFIER) String identifier, @Multipart(FILENAME) String filename, @Multipart(RELATIVE_PATH) String relativePath, @Multipart(FILE) InputStream file, MultipartBody body, @Multipart(value = WORK_GROUP_UUID, required = false) String workGroupUuid, @Multipart(value = WORK_GROUP_FOLDER_UUID, required = false) String workGroupFolderUuid, @Multipart(value = ASYNC_TASK, required = false) boolean async) throws BusinessException { logger.debug("upload chunk number : " + chunkNumber); identifier = cleanIdentifier(identifier); boolean isValid = FlowUploaderUtils.isValid(chunkNumber, chunkSize, totalSize, identifier, filename); Validate.isTrue(isValid); checkIfMaintenanceIsEnabled(); FlowDto flow = new FlowDto(chunkNumber); try { logger.debug("writing chunk number : " + chunkNumber); java.nio.file.Path tempFile = FlowUploaderUtils.getTempFile(identifier, chunkedFiles); ChunkedFile currentChunkedFile = chunkedFiles.get(identifier); if (!currentChunkedFile.hasChunk(chunkNumber)) { FileChannel fc = FileChannel.open(tempFile, StandardOpenOption.CREATE, StandardOpenOption.APPEND); ByteArrayOutputStream output = new ByteArrayOutputStream(); IOUtils.copy(file, output); fc.write(ByteBuffer.wrap(output.toByteArray()), (chunkNumber - 1) * chunkSize); fc.close(); if (sizeValidation) { if (output.size() != currentChunkSize) { String msg = String.format("File size does not match, found : %1$d, announced : %2$d", output.size(), currentChunkSize); logger.error(msg); flow.setChunkUploadSuccess(false); flow.setErrorMessage(msg); return flow; } } currentChunkedFile.addChunk(chunkNumber); } else { logger.error("currentChunkedFile.hasChunk(chunkNumber) !!! " + currentChunkedFile); logger.error("chunkedNumber skipped : " + chunkNumber); } logger.debug("nb uploading files : " + chunkedFiles.size()); logger.debug("current chuckedfile uuid : " + identifier); logger.debug("current chuckedfiles" + chunkedFiles.toString()); if (FlowUploaderUtils.isUploadFinished(identifier, chunkSize, totalSize, chunkedFiles)) { flow.setLastChunk(true); logger.debug("upload finished : " + chunkNumber + " : " + identifier); InputStream inputStream = Files.newInputStream(tempFile, StandardOpenOption.READ); File tempFile2 = getTempFile(inputStream, "rest-flowuploader", filename); if (sizeValidation) { long currSize = tempFile2.length(); if (currSize != totalSize) { String msg = String.format("File size does not match, found : %1$d, announced : %2$d", currSize, totalSize); logger.error(msg); flow.setChunkUploadSuccess(false); flow.setErrorMessage(msg); return flow; } } EntryDto uploadedDocument = new EntryDto(); flow.setIsAsync(async); boolean isWorkGroup = !Strings.isNullOrEmpty(workGroupUuid); if (async) { logger.debug("Async mode is used"); // Asynchronous mode AccountDto actorDto = documentFacade.getAuthenticatedAccountDto(); AsyncTaskDto asyncTask = null; try { if (isWorkGroup) { ThreadEntryTaskContext threadEntryTaskContext = new ThreadEntryTaskContext(actorDto, actorDto.getUuid(), workGroupUuid, tempFile2, filename, workGroupFolderUuid); asyncTask = asyncTaskFacade.create(totalSize, getTransfertDuration(identifier), filename, null, AsyncTaskType.THREAD_ENTRY_UPLOAD); ThreadEntryUploadAsyncTask task = new ThreadEntryUploadAsyncTask(threadEntryAsyncFacade, threadEntryTaskContext, asyncTask); taskExecutor.execute(task); flow.completeAsyncTransfert(asyncTask); } else { DocumentTaskContext documentTaskContext = new DocumentTaskContext(actorDto, actorDto.getUuid(), tempFile2, filename, null, null); asyncTask = asyncTaskFacade.create(totalSize, getTransfertDuration(identifier), filename, null, AsyncTaskType.DOCUMENT_UPLOAD); DocumentUploadAsyncTask task = new DocumentUploadAsyncTask(documentAsyncFacade, documentTaskContext, asyncTask); taskExecutor.execute(task); flow.completeAsyncTransfert(asyncTask); } } catch (Exception e) { logAsyncFailure(asyncTask, e); deleteTempFile(tempFile2); ChunkedFile remove = chunkedFiles.remove(identifier); Files.deleteIfExists(remove.getPath()); throw e; } } else { try { if (isWorkGroup) { uploadedDocument = threadEntryFacade.create(null, workGroupUuid, workGroupFolderUuid, tempFile2, filename); } else { uploadedDocument = documentFacade.create(tempFile2, filename, "", null); } flow.completeTransfert(uploadedDocument); } finally { deleteTempFile(tempFile2); ChunkedFile remove = chunkedFiles.remove(identifier); if (remove != null) { Files.deleteIfExists(remove.getPath()); } else { logger.error("Should not happen !!!"); logger.error("chunk number: " + chunkNumber); logger.error("chunk identifier: " + identifier); logger.error("chunk filename: " + filename); logger.error("chunks : " + chunkedFiles.toString()); } } } return flow; } else { logger.debug("upload pending "); flow.setChunkUploadSuccess(true); } } catch (BusinessException e) { logger.error(e.getMessage()); logger.debug("Exception : ", e); flow.setChunkUploadSuccess(false); flow.setErrorMessage(e.getMessage()); flow.setErrCode(e.getErrorCode().getCode()); } catch (Exception e) { logger.error(e.getMessage()); logger.debug("Exception : ", e); flow.setChunkUploadSuccess(false); flow.setErrorMessage(e.getMessage()); } return flow; }
From source file:org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.java
/** check if nn.getCorruptFiles() returns a file that has corrupted blocks */ @Test(timeout = 300000)//from w w w.j a va2 s .com public void testListCorruptFilesCorruptedBlock() throws Exception { MiniDFSCluster cluster = null; Random random = new Random(); try { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans directories conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports // Set short retry timeouts so this test runs faster conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); // create two files with one block each DFSTestUtil util = new DFSTestUtil.Builder().setName("testCorruptFilesCorruptedBlock").setNumFiles(2) .setMaxLevels(1).setMaxSize(512).build(); util.createFiles(fs, "/srcdat10"); // fetch bad file list from namenode. There should be none. final NameNode namenode = cluster.getNameNode(); Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.getNamesystem() .listCorruptFileBlocks("/", null); assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.", badFiles.size() == 0); // Now deliberately corrupt one block String bpid = cluster.getNamesystem().getBlockPoolId(); File storageDir = cluster.getInstanceStorageDir(0, 1); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); assertTrue("data directory does not exist", data_dir.exists()); List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir); assertTrue("Data directory does not contain any blocks or there was an " + "IO error", metaFiles != null && !metaFiles.isEmpty()); File metaFile = metaFiles.get(0); RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); FileChannel channel = file.getChannel(); long position = channel.size() - 2; int length = 2; byte[] buffer = new byte[length]; random.nextBytes(buffer); channel.write(ByteBuffer.wrap(buffer), position); file.close(); LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset " + position + " length " + length); // read all files to trigger detection of corrupted replica try { util.checkFiles(fs, "/srcdat10"); } catch (BlockMissingException e) { System.out.println("Received BlockMissingException as expected."); } catch (IOException e) { assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " + " but received IOException " + e, false); } // fetch bad file list from namenode. There should be one file. badFiles = namenode.getNamesystem().listCorruptFileBlocks("/", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1); util.cleanup(fs, "/srcdat10"); } finally { if (cluster != null) { cluster.shutdown(); } } }
From source file:com.sm.connector.server.ServerStore.java
private void writeIndexBlock(CacheBlock<byte[]> block, long keyOffset2Len, FileChannel channel) throws IOException { long pos = OFFSET + (long) block.getRecordNo() * RECORD_SIZE; checkFileSize(pos, RECORD_SIZE);//from w w w .j a v a 2 s . c o m ByteBuffer buf = ByteBuffer.allocate(RECORD_SIZE); buf.put(block.getStatus()); buf.putLong(keyOffset2Len); buf.putLong(block.getDataOffset2Len()); buf.putLong(block.getBlock2Version()); buf.putShort(block.getNode()); buf.flip(); channel.write(buf, pos); }
From source file:org.apache.hadoop.hdfs.server.namenode.TestListCorruptFileBlocks.java
/** * Check that listCorruptFileBlocks works while the namenode is still in safemode. */// w w w . j av a 2 s .c om @Test(timeout = 300000) public void testListCorruptFileBlocksInSafeMode() throws Exception { MiniDFSCluster cluster = null; Random random = new Random(); try { Configuration conf = new HdfsConfiguration(); // datanode scans directories conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode sends block reports conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // never leave safemode automatically conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, 1.5f); // start populating repl queues immediately conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, 0f); // Set short retry timeouts so this test runs faster conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build(); cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); FileSystem fs = cluster.getFileSystem(); // create two files with one block each DFSTestUtil util = new DFSTestUtil.Builder().setName("testListCorruptFileBlocksInSafeMode") .setNumFiles(2).setMaxLevels(1).setMaxSize(512).build(); util.createFiles(fs, "/srcdat10"); // fetch bad file list from namenode. There should be none. Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = cluster.getNameNode().getNamesystem() .listCorruptFileBlocks("/", null); assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.", badFiles.size() == 0); // Now deliberately corrupt one block File storageDir = cluster.getInstanceStorageDir(0, 0); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, cluster.getNamesystem().getBlockPoolId()); assertTrue("data directory does not exist", data_dir.exists()); List<File> metaFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir); assertTrue("Data directory does not contain any blocks or there was an " + "IO error", metaFiles != null && !metaFiles.isEmpty()); File metaFile = metaFiles.get(0); RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); FileChannel channel = file.getChannel(); long position = channel.size() - 2; int length = 2; byte[] buffer = new byte[length]; random.nextBytes(buffer); channel.write(ByteBuffer.wrap(buffer), position); file.close(); LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset " + position + " length " + length); // read all files to trigger detection of corrupted replica try { util.checkFiles(fs, "/srcdat10"); } catch (BlockMissingException e) { System.out.println("Received BlockMissingException as expected."); } catch (IOException e) { assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException " + " but received IOException " + e, false); } // fetch bad file list from namenode. There should be one file. badFiles = cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1); // restart namenode cluster.restartNameNode(0); fs = cluster.getFileSystem(); // wait until replication queues have been initialized while (!cluster.getNameNode().namesystem.isPopulatingReplQueues()) { try { LOG.info("waiting for replication queues"); Thread.sleep(1000); } catch (InterruptedException ignore) { } } // read all files to trigger detection of corrupted replica try { util.checkFiles(fs, "/srcdat10"); } catch (BlockMissingException e) { System.out.println("Received BlockMissingException as expected."); } catch (IOException e) { assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException " + " but received IOException " + e, false); } // fetch bad file list from namenode. There should be one file. badFiles = cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null); LOG.info("Namenode has bad files. " + badFiles.size()); assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.", badFiles.size() == 1); // check that we are still in safe mode assertTrue("Namenode is not in safe mode", cluster.getNameNode().isInSafeMode()); // now leave safe mode so that we can clean up cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); util.cleanup(fs, "/srcdat10"); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw e; } finally { if (cluster != null) { cluster.shutdown(); } } }
From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java
private void writeJunkJournal(File journalDir) throws Exception { long logId = System.currentTimeMillis(); File fn = new File(journalDir, Long.toHexString(logId) + ".txn"); FileChannel fc = new RandomAccessFile(fn, "rw").getChannel(); ByteBuffer zeros = ByteBuffer.allocate(512); fc.write(zeros, 4 * 1024 * 1024); fc.position(0);//from w ww.ja v a 2s. com for (int i = 1; i <= 10; i++) { fc.write(ByteBuffer.wrap("JunkJunkJunk".getBytes())); } }
From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java
private void writePreV2Journal(File journalDir, int numEntries) throws Exception { long logId = System.currentTimeMillis(); File fn = new File(journalDir, Long.toHexString(logId) + ".txn"); FileChannel fc = new RandomAccessFile(fn, "rw").getChannel(); ByteBuffer zeros = ByteBuffer.allocate(512); fc.write(zeros, 4 * 1024 * 1024); fc.position(0);// ww w . j a va 2s . co m byte[] data = "JournalTestData".getBytes(); long lastConfirmed = LedgerHandle.INVALID_ENTRY_ID; for (int i = 1; i <= numEntries; i++) { ByteBuffer packet = ClientUtil.generatePacket(1, i, lastConfirmed, i * data.length, data) .toByteBuffer(); lastConfirmed = i; ByteBuffer lenBuff = ByteBuffer.allocate(4); lenBuff.putInt(packet.remaining()); lenBuff.flip(); fc.write(lenBuff); fc.write(packet); } }
From source file:voldemort.store.cachestore.impl.ChannelStore.java
public void writeIndexBlock(CacheBlock<byte[]> block, long keyOffset2Len, FileChannel channel) throws IOException { long pos = OFFSET + (long) block.getRecordNo() * RECORD_SIZE; checkFileSize(pos, RECORD_SIZE);//from w ww . j a v a 2s .com ByteBuffer buf = ByteBuffer.allocate(RECORD_SIZE); buf.put(block.getStatus()); buf.putLong(keyOffset2Len); buf.putLong(block.getDataOffset2Len()); buf.putLong(block.getBlock2Version()); buf.putShort(block.getNode()); buf.flip(); channel.write(buf, pos); }