List of usage examples for java.nio.file Files newByteChannel
public static SeekableByteChannel newByteChannel(Path path, OpenOption... options) throws IOException
From source file:Test.java
License:asdf
public static void main(String[] args) throws IOException { Path path = Paths.get("/users.txt"); final String newLine = System.getProperty("line.separator"); try (SeekableByteChannel sbc = Files.newByteChannel(path, StandardOpenOption.APPEND)) { String output = newLine + "asdf" + newLine; ByteBuffer buffer = ByteBuffer.wrap(output.getBytes()); sbc.write(buffer);// w w w . j a v a 2s.com } }
From source file:Test.java
License:asdf
public static void main(String[] args) throws IOException { Path path = Paths.get("/users.txt"); final String newLine = System.getProperty("line.separator"); try (SeekableByteChannel sbc = Files.newByteChannel(path, StandardOpenOption.WRITE)) { ByteBuffer buffer;// w ww . j a v a 2 s . co m long position = sbc.size(); sbc.position(position); System.out.println("Position: " + sbc.position()); buffer = ByteBuffer.wrap((newLine + "asdf").getBytes()); sbc.write(buffer); System.out.println("Position: " + sbc.position()); buffer = ByteBuffer.wrap((newLine + "asdf").getBytes()); sbc.write(buffer); System.out.println("Position: " + sbc.position()); } }
From source file:io.horizondb.io.files.DirectFileDataInput.java
/** * Creates a new <code>DirectFileDataInput</code> to read data from the specified file. * /*from w w w . j ava 2 s . com*/ * @param path the file path. * @param bufferSize the size of the buffer being used. */ public DirectFileDataInput(Path path, int bufferSize) throws IOException { notNull(path, "path parameter must not be null"); isTrue(bufferSize > 0, "the buffer size must be greater than zero"); this.channel = (FileChannel) Files.newByteChannel(path, StandardOpenOption.READ); this.buffer = ByteBuffer.allocateDirect(bufferSize); this.slice = Buffers.wrap(this.buffer); fillBuffer(); }
From source file:de.bluepair.sci.client.SHAUtils.java
public static <T> Map<String, String> sha512(Path path, Predicate<T> gard, T testValue, long blockSizePref, boolean forceBlockSize) { if (Files.notExists(path)) { return null; }//from www . j a v a 2s . c om MessageDigest md = getDigest(); MessageDigest md1 = getDigest(); if (!gard.test(testValue)) { return null; } long blockSize = blockSizePref; long size = -1; try { size = Files.size(path); if (!forceBlockSize) {// maximal 10 hashsummen // sonst hab ich zu viele in der datei // stehen! while (size / blockSize > 10) { blockSize += blockSizePref; } } } catch (IOException e) { blockSize = blockSizePref; return null; } Map<String, String> map = new HashMap<>(); long lastStart = 0; long stepDown = blockSize; try (final SeekableByteChannel fileChannel = Files.newByteChannel(path, StandardOpenOption.READ);) { final ByteBuffer buffer = ByteBuffer.allocateDirect(8192); int last; do { if (!gard.test(testValue) || Files.notExists(path)) { return null; } buffer.clear(); last = fileChannel.read(buffer); buffer.flip(); md.update(buffer); // calc 2checksups buffer.flip(); md1.update(buffer); if (last > 0) { stepDown -= last; } // wenn ich ein 100mb netzwerk habe // ~ca. 5MB bertragung // also bei abbruch kann wiederaufgesetzt werden wenn die summen // bekannt sind. // ~hnlich Blcke berechen also // 0-5 c1 // 0-10 c2 // 5-10 c3 ... if (stepDown <= 0 || (last <= 0)) { long len = (blockSize + Math.abs(stepDown)); if (stepDown > 0) { // kottektur wenn last <0 len = blockSize - stepDown; } stepDown = blockSize; map.put("sha512_" + lastStart + "_" + len, Hex.encodeHexString(md1.digest())); lastStart += len; md1.reset(); } } while (last > 0); } catch (IOException ex) { Logger.getLogger(FileAnalysis.class.getName()).log(Level.SEVERE, null, ex); return null; } final byte[] sha1hash = md.digest(); map.put("sha512", Hex.encodeHexString(sha1hash)); return map; }
From source file:com.cate.javatransmitter.FileHandler.java
public void setFile(Path inputPath) { //TODO add file selection this.inputPath = inputPath; try {/*from w w w . j a va2s .c om*/ this.sbc = Files.newByteChannel(inputPath, StandardOpenOption.READ); this.nofChunks = (int) Math.ceil((double) (sbc.size()) / packetSize); System.out.println("File Size = " + sbc.size() + " Bytes"); System.out.println("File Size = " + sbc.size() + " Bytes"); } catch (IOException ex) { Logger.getLogger(FileHandler.class.getName()).log(Level.SEVERE, null, ex); System.exit(0); } this.chunkCounter = 0; buf = ByteBuffer.allocate(packetSize); //catch (IOException x) { // System.out.println("caught exception: " + x); // return null; // } // }
From source file:com.arpnetworking.tsdcore.tailer.StatefulTailer.java
private void fileLoop() throws IOException, InterruptedException { SeekableByteChannel reader = null; InitialPosition nextInitialPosition = _initialPosition; try {//w w w .ja v a 2s . com while (isRunning()) { // Attempt to open the file try { reader = Files.newByteChannel(_file.toPath(), StandardOpenOption.READ); LOGGER.trace(String.format("Opened file; file=%s", _file)); } catch (final NoSuchFileException e) { _listener.fileNotFound(); _trigger.waitOnTrigger(); } if (reader != null) { // Attempt to resume from checkpoint long position = nextInitialPosition.get(reader); // Any subsequent file opens we should start at the beginning nextInitialPosition = InitialPosition.START; _hash = computeHash(reader, REQUIRED_BYTES_FOR_HASH); if (_hash.isPresent()) { position = _positionStore.getPosition(_hash.get()).or(position).longValue(); } LOGGER.trace( String.format("Starting tail; file=%s, position=%d", _file, Long.valueOf(position))); reader.position(position); // Read the file readLoop(reader); // Reset per file state IOUtils.closeQuietly(reader); reader = null; _hash = Optional.absent(); } } } finally { IOUtils.closeQuietly(reader); reader = null; _hash = Optional.absent(); } }
From source file:com.github.horrorho.inflatabledonkey.pcs.xfile.FileAssembler.java
static void truncate(Path file, long to) throws UncheckedIOException { // TODO should really limit our written data stream. try {//from ww w. ja va 2 s . c o m if (to == 0) { return; } long size = Files.size(file); if (size > to) { Files.newByteChannel(file, WRITE).truncate(to).close(); logger.debug("-- truncate() - truncated: {}, {} > {}", file, size, to); } else if (size < to) { logger.warn("-- truncate() - cannot truncate: {}, {} > {}", file, size, to); } } catch (IOException ex) { throw new UncheckedIOException(ex); } }
From source file:burstcoin.jminer.core.reader.task.ReaderLoadDriveTask.java
private boolean load(PlotFile plotFile) { try (SeekableByteChannel sbc = Files.newByteChannel(plotFile.getFilePath(), EnumSet.of(StandardOpenOption.READ))) { long currentScoopPosition = scoopNumber * plotFile.getStaggeramt() * MiningPlot.SCOOP_SIZE; long partSize = plotFile.getStaggeramt() / plotFile.getNumberOfParts(); ByteBuffer partBuffer = ByteBuffer.allocate((int) (partSize * MiningPlot.SCOOP_SIZE)); // optimized plotFiles only have one chunk! for (int chunkNumber = 0; chunkNumber < plotFile.getNumberOfChunks(); chunkNumber++) { long currentChunkPosition = chunkNumber * plotFile.getStaggeramt() * MiningPlot.PLOT_SIZE; sbc.position(currentScoopPosition + currentChunkPosition); for (int partNumber = 0; partNumber < plotFile.getNumberOfParts(); partNumber++) { sbc.read(partBuffer);//from w ww . j a v a2 s .com if (Reader.blockNumber != blockNumber) { LOG.trace("loadDriveThread stopped!"); partBuffer.clear(); sbc.close(); return true; } else { long chunkPartStartNonce = plotFile.getStartnonce() + (chunkNumber * plotFile.getStaggeramt()) + (partNumber * partSize); final byte[] scoops = partBuffer.array(); publisher.publishEvent(new ReaderLoadedPartEvent(blockNumber, scoops, chunkPartStartNonce)); } partBuffer.clear(); } } sbc.close(); } catch (NoSuchFileException exception) { LOG.error("File not found ... please restart to rescan plot-files, maybe set rescan to 'true': " + exception.getMessage()); } catch (ClosedByInterruptException e) { // we reach this, if we do not wait for task on shutdown - ByteChannel closed by thread interruption LOG.trace("reader stopped cause of new block ..."); } catch (IOException e) { LOG.error("IOException: " + e.getMessage()); } return false; }
From source file:com.arpnetworking.metrics.common.tailer.StatefulTailer.java
private void fileLoop() { SeekableByteChannel reader = null; InitialPosition nextInitialPosition = _initialPosition; try {/*from ww w.ja v a 2s .c o m*/ while (isRunning()) { // Attempt to open the file try { reader = Files.newByteChannel(_file, StandardOpenOption.READ); LOGGER.trace().setMessage("Opened file").addData("file", _file).log(); } catch (final NoSuchFileException e) { _listener.fileNotFound(); _trigger.waitOnTrigger(); } if (reader != null) { // Position the reader resume(reader, nextInitialPosition); _listener.fileOpened(); // Any subsequent file opens we should start at the beginning nextInitialPosition = InitialPosition.START; // Read the file readLoop(reader); // Reset per file state IOUtils.closeQuietly(reader); reader = null; _hash = Optional.empty(); } } // Clients may elect to kill the stateful tailer on an exception by calling stop, or they // may log the exception and continue. In the latter case it is strongly recommended that // clients pause before continuing; otherwise, if the error persists the stateful tailer // may create non-trivial load on the io subsystem. // NOTE: Any non-exception throwable will kill the stateful tailer. } catch (final InterruptedException e) { Thread.currentThread().interrupt(); handleThrowable(e); // CHECKSTYLE.OFF: IllegalCatch - Allow clients to decide how to handle exceptions } catch (final Exception e) { // CHECKSTYLE.ON: IllegalCatch handleThrowable(e); } finally { IOUtils.closeQuietly(reader); reader = null; _hash = Optional.empty(); } }
From source file:com.sastix.cms.server.services.content.impl.HashedDirectoryServiceImpl.java
/** * Writes file to disk and copy the contents of the input byte array. * * @param file a Path with file path.//from w ww . j a va 2 s . com * @param url a remote/local url to be saved as file * @throws IOException */ private void writeFile(final Path file, final URL url) throws IOException { if (url.toString().startsWith("jar:file:///")) { try { writeZipFile(file, url); } catch (Exception e) { throw new IOException(e); } } else { //Create the file SeekableByteChannel sbc = null; try { //Creates a new Readable Byte channel from URL final ReadableByteChannel rbc = Channels.newChannel(url.openStream()); //Create the file sbc = Files.newByteChannel(file, FILE_OPEN_OPTIONS); //Clears the buffer buffer.clear(); //Read input Channel while (rbc.read(buffer) != -1) { // prepare the buffer to be drained buffer.flip(); // write to the channel, may block sbc.write(buffer); // If partial transfer, shift remainder down // If buffer is empty, same as doing clear() buffer.compact(); } // EOF will leave buffer in fill state buffer.flip(); // make sure the buffer is fully drained. while (buffer.hasRemaining()) { sbc.write(buffer); } } finally { if (sbc != null) { sbc.close(); } } } }