Example usage for java.nio.file StandardOpenOption READ

List of usage examples for java.nio.file StandardOpenOption READ

Introduction

In this page you can find the example usage for java.nio.file StandardOpenOption READ.

Prototype

StandardOpenOption READ

To view the source code for java.nio.file StandardOpenOption READ.

Click Source Link

Document

Open for read access.

Usage

From source file:cross.datastructures.pipeline.ResultAwareCommandPipeline.java

/**
 * Calculates a byte-level digest of the given files.
 *
 * @param files the files to calculate the digest for.
 * @return the hexadecimal, zero-padded digest, or null if any exceptions
 *         occurred/*from   w ww . j ava2 s . c o  m*/
 */
public String digest(Collection<File> files) {
    try {
        MessageDigest digest = MessageDigest.getInstance("SHA-1");
        for (File file : files) {
            try (InputStream is = Files.newInputStream(file.toPath(), StandardOpenOption.READ)) {
                byte[] buffer = new byte[8192];
                int read = 0;
                while ((read = is.read(buffer)) > 0) {
                    digest.update(buffer, 0, read);
                }
            } catch (IOException ioex) {
                Logger.getLogger(ResultAwareCommandPipeline.class.getName()).log(Level.SEVERE, null, ioex);
                return null;
            }
        }
        byte[] sha1 = digest.digest();
        BigInteger bigInt = new BigInteger(1, sha1);
        return StringUtils.leftPad(bigInt.toString(16), 40, "0");
    } catch (NoSuchAlgorithmException ex) {
        Logger.getLogger(ResultAwareCommandPipeline.class.getName()).log(Level.SEVERE, null, ex);
    }
    return null;
}

From source file:ubicrypt.core.Utils.java

public static InputStream readIs(final Path path) {
    final PipedInputStream pis = new PipedInputStream();
    final AtomicLong pos = new AtomicLong(0);
    try {/*from ww w .  j a v  a 2  s  . co  m*/
        final PipedOutputStream ostream = new PipedOutputStream(pis);
        final AsynchronousFileChannel channel = AsynchronousFileChannel.open(path, StandardOpenOption.READ);
        final ByteBuffer buffer = ByteBuffer.allocate(1 << 16);
        channel.read(buffer, pos.get(), buffer, new CompletionHandler<Integer, ByteBuffer>() {
            @Override
            public void completed(final Integer result, final ByteBuffer buf) {
                try {
                    if (result == -1) {
                        ostream.close();
                        return;
                    }
                    final byte[] bytes = new byte[result];
                    System.arraycopy(buf.array(), 0, bytes, 0, result);
                    ostream.write(bytes);
                    ostream.flush();
                    if (result < 1 << 16) {
                        ostream.close();
                        return;
                    }
                    pos.addAndGet(result);
                    final ByteBuffer buffer = ByteBuffer.allocate(1 << 16);
                    channel.read(buffer, pos.get(), buffer, this);
                } catch (final IOException e) {
                    Throwables.propagate(e);
                }
            }

            @Override
            public void failed(final Throwable exc, final ByteBuffer attachment) {
                log.error(exc.getMessage(), exc);
            }
        });
    } catch (final IOException e) {
        if (e instanceof NoSuchFileException) {
            throw new NotFoundException(path);
        }
        Throwables.propagate(e);
    }
    return pis;
}

From source file:de.elomagic.carafile.client.CaraFileClient.java

/**
 * Uploads a file (Multi chunk upload)./*from  w w w. j  av  a2 s .  c o  m*/
 * <p/>
 * Multi chunk upload means that the file will be devived in one or more chunks and each chunk can be downloaded to a different peer.
 *
 * @param path Must be a file and not a directory. File will not be deleted
 * @param filename Name of the file. If null then name of the parameter path will be used
 * @return Returns the {@link MetaData} of the uploaded stream
 * @throws IOException Thrown when unable to call REST services
 * @throws java.security.GeneralSecurityException Thrown when unable to determine SHA-1 of the file
 * @see CaraFileClient#uploadFile(java.net.URI, java.io.InputStream, java.lang.String, long)
 */
public MetaData uploadFile(final Path path, final String filename)
        throws IOException, GeneralSecurityException {
    if (registryURI == null) {
        throw new IllegalArgumentException("Parameter 'registryURI' must not be null!");
    }

    if (path == null) {
        throw new IllegalArgumentException("Parameter 'path' must not be null!");
    }

    if (Files.notExists(path)) {
        throw new FileNotFoundException("File \"" + path + "\" doesn't exists!");
    }

    if (Files.isDirectory(path)) {
        throw new IOException("Parameter 'path' is not a file!");
    }

    String fn = filename == null ? path.getFileName().toString() : filename;

    MetaData md = CaraFileUtils.createMetaData(path, fn);
    md.setRegistryURI(registryURI);

    String json = JsonUtil.write(md);

    LOG.debug("Register " + md.getId() + " file at " + registryURI.toString());
    URI uri = CaraFileUtils.buildURI(registryURI, "registry", "register");
    HttpResponse response = executeRequest(Request.Post(uri).bodyString(json, ContentType.APPLICATION_JSON))
            .returnResponse();

    if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
        throw new IOException("Unable to register file. " + response.getStatusLine().getReasonPhrase());
    }

    Set<PeerData> peerDataSet = downloadPeerSet();

    byte[] buffer = new byte[md.getChunkSize()];
    try (InputStream in = Files.newInputStream(path, StandardOpenOption.READ);
            BufferedInputStream bis = new BufferedInputStream(in, md.getChunkSize())) {
        int bytesRead;
        int chunkIndex = 0;
        while ((bytesRead = bis.read(buffer)) > 0) {
            String chunkId = md.getChunk(chunkIndex).getId();

            URI peerURI = peerSelector.getURI(peerDataSet, chunkIndex);
            URI seedChunkUri = CaraFileUtils.buildURI(peerURI, "peer", "seedChunk", chunkId);

            LOG.debug("Uploading chunk " + chunkId + " to peer " + seedChunkUri.toString() + ";Index="
                    + chunkIndex + ";Length=" + bytesRead);
            response = executeRequest(Request.Post(seedChunkUri).bodyStream(
                    new ByteArrayInputStream(buffer, 0, bytesRead), ContentType.APPLICATION_OCTET_STREAM))
                            .returnResponse();

            if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
                throw new IOException("Unable to upload file. " + response.getStatusLine().getStatusCode() + " "
                        + response.getStatusLine().getReasonPhrase());
            }

            chunkIndex++;
        }
    }

    return md;
}

From source file:org.linagora.linshare.webservice.userv2.impl.FlowDocumentUploaderRestServiceImpl.java

@Path("/")
@POST//from w ww .ja va2 s.  c o  m
@Consumes("multipart/form-data")
@Override
public FlowDto uploadChunk(@Multipart(CHUNK_NUMBER) long chunkNumber, @Multipart(TOTAL_CHUNKS) long totalChunks,
        @Multipart(CHUNK_SIZE) long chunkSize, @Multipart(CURRENT_CHUNK_SIZE) long currentChunkSize,
        @Multipart(TOTAL_SIZE) long totalSize, @Multipart(IDENTIFIER) String identifier,
        @Multipart(FILENAME) String filename, @Multipart(RELATIVE_PATH) String relativePath,
        @Multipart(FILE) InputStream file, MultipartBody body,
        @Multipart(value = WORK_GROUP_UUID, required = false) String workGroupUuid,
        @Multipart(value = WORK_GROUP_FOLDER_UUID, required = false) String workGroupFolderUuid,
        @Multipart(value = ASYNC_TASK, required = false) boolean async) throws BusinessException {
    logger.debug("upload chunk number : " + chunkNumber);
    identifier = cleanIdentifier(identifier);
    boolean isValid = FlowUploaderUtils.isValid(chunkNumber, chunkSize, totalSize, identifier, filename);
    Validate.isTrue(isValid);
    checkIfMaintenanceIsEnabled();
    FlowDto flow = new FlowDto(chunkNumber);
    try {
        logger.debug("writing chunk number : " + chunkNumber);
        java.nio.file.Path tempFile = FlowUploaderUtils.getTempFile(identifier, chunkedFiles);
        ChunkedFile currentChunkedFile = chunkedFiles.get(identifier);
        if (!currentChunkedFile.hasChunk(chunkNumber)) {
            FileChannel fc = FileChannel.open(tempFile, StandardOpenOption.CREATE, StandardOpenOption.APPEND);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(file, output);
            fc.write(ByteBuffer.wrap(output.toByteArray()), (chunkNumber - 1) * chunkSize);
            fc.close();
            if (sizeValidation) {
                if (output.size() != currentChunkSize) {
                    String msg = String.format("File size does not match, found : %1$d, announced : %2$d",
                            output.size(), currentChunkSize);
                    logger.error(msg);
                    flow.setChunkUploadSuccess(false);
                    flow.setErrorMessage(msg);
                    return flow;
                }
            }
            currentChunkedFile.addChunk(chunkNumber);
        } else {
            logger.error("currentChunkedFile.hasChunk(chunkNumber) !!! " + currentChunkedFile);
            logger.error("chunkedNumber skipped : " + chunkNumber);
        }

        logger.debug("nb uploading files : " + chunkedFiles.size());
        logger.debug("current chuckedfile uuid : " + identifier);
        logger.debug("current chuckedfiles" + chunkedFiles.toString());
        if (FlowUploaderUtils.isUploadFinished(identifier, chunkSize, totalSize, chunkedFiles)) {
            flow.setLastChunk(true);
            logger.debug("upload finished : " + chunkNumber + " : " + identifier);
            InputStream inputStream = Files.newInputStream(tempFile, StandardOpenOption.READ);
            File tempFile2 = getTempFile(inputStream, "rest-flowuploader", filename);
            if (sizeValidation) {
                long currSize = tempFile2.length();
                if (currSize != totalSize) {
                    String msg = String.format("File size does not match, found : %1$d, announced : %2$d",
                            currSize, totalSize);
                    logger.error(msg);
                    flow.setChunkUploadSuccess(false);
                    flow.setErrorMessage(msg);
                    return flow;
                }
            }
            EntryDto uploadedDocument = new EntryDto();
            flow.setIsAsync(async);
            boolean isWorkGroup = !Strings.isNullOrEmpty(workGroupUuid);
            if (async) {
                logger.debug("Async mode is used");
                // Asynchronous mode
                AccountDto actorDto = documentFacade.getAuthenticatedAccountDto();
                AsyncTaskDto asyncTask = null;
                try {
                    if (isWorkGroup) {
                        ThreadEntryTaskContext threadEntryTaskContext = new ThreadEntryTaskContext(actorDto,
                                actorDto.getUuid(), workGroupUuid, tempFile2, filename, workGroupFolderUuid);
                        asyncTask = asyncTaskFacade.create(totalSize, getTransfertDuration(identifier),
                                filename, null, AsyncTaskType.THREAD_ENTRY_UPLOAD);
                        ThreadEntryUploadAsyncTask task = new ThreadEntryUploadAsyncTask(threadEntryAsyncFacade,
                                threadEntryTaskContext, asyncTask);
                        taskExecutor.execute(task);
                        flow.completeAsyncTransfert(asyncTask);
                    } else {
                        DocumentTaskContext documentTaskContext = new DocumentTaskContext(actorDto,
                                actorDto.getUuid(), tempFile2, filename, null, null);
                        asyncTask = asyncTaskFacade.create(totalSize, getTransfertDuration(identifier),
                                filename, null, AsyncTaskType.DOCUMENT_UPLOAD);
                        DocumentUploadAsyncTask task = new DocumentUploadAsyncTask(documentAsyncFacade,
                                documentTaskContext, asyncTask);
                        taskExecutor.execute(task);
                        flow.completeAsyncTransfert(asyncTask);
                    }
                } catch (Exception e) {
                    logAsyncFailure(asyncTask, e);
                    deleteTempFile(tempFile2);
                    ChunkedFile remove = chunkedFiles.remove(identifier);
                    Files.deleteIfExists(remove.getPath());
                    throw e;
                }
            } else {
                try {
                    if (isWorkGroup) {
                        uploadedDocument = threadEntryFacade.create(null, workGroupUuid, workGroupFolderUuid,
                                tempFile2, filename);
                    } else {
                        uploadedDocument = documentFacade.create(tempFile2, filename, "", null);
                    }
                    flow.completeTransfert(uploadedDocument);
                } finally {
                    deleteTempFile(tempFile2);
                    ChunkedFile remove = chunkedFiles.remove(identifier);
                    if (remove != null) {
                        Files.deleteIfExists(remove.getPath());
                    } else {
                        logger.error("Should not happen !!!");
                        logger.error("chunk number: " + chunkNumber);
                        logger.error("chunk identifier: " + identifier);
                        logger.error("chunk filename: " + filename);
                        logger.error("chunks : " + chunkedFiles.toString());
                    }
                }
            }
            return flow;
        } else {
            logger.debug("upload pending ");
            flow.setChunkUploadSuccess(true);
        }
    } catch (BusinessException e) {
        logger.error(e.getMessage());
        logger.debug("Exception : ", e);
        flow.setChunkUploadSuccess(false);
        flow.setErrorMessage(e.getMessage());
        flow.setErrCode(e.getErrorCode().getCode());
    } catch (Exception e) {
        logger.error(e.getMessage());
        logger.debug("Exception : ", e);
        flow.setChunkUploadSuccess(false);
        flow.setErrorMessage(e.getMessage());
    }
    return flow;
}

From source file:com.splicemachine.derby.stream.control.ControlDataSetProcessor.java

private InputStream getFileStream(String s) throws IOException {
    DistributedFileSystem dfs = SIDriver.driver().fileSystem();
    InputStream value;/*  w w  w. ja  v  a2  s  . com*/
    if (dfs.getInfo(s).isDirectory()) {
        //we need to open a Stream against each file in the directory
        InputStream inputStream = null;
        boolean sequenced = false;
        try (DirectoryStream<Path> stream = Files.newDirectoryStream(dfs.getPath(s))) {
            for (Path p : stream) {
                if (inputStream == null) {
                    inputStream = newInputStream(dfs, p, StandardOpenOption.READ);
                } else {
                    inputStream = new SequenceInputStream(inputStream,
                            newInputStream(dfs, p, StandardOpenOption.READ));
                }
            }
        }
        value = inputStream;
    } else {
        value = newInputStream(dfs, dfs.getPath(s), StandardOpenOption.READ);
    }
    return value;
}

From source file:spdxedit.SpdxLogic.java

public static String getChecksumForFile(Path path) throws IOException {
    try (InputStream is = Files.newInputStream(path, StandardOpenOption.READ)) {
        return DigestUtils.shaHex(is);
    }// ww w  .j  a v  a  2  s  .  c o m
}

From source file:com.amazonaws.services.kinesis.producer.Daemon.java

private void connectToChild() throws IOException {
    long start = System.nanoTime();
    while (true) {
        try {/*w  w  w  . j  av a  2 s.c o m*/
            inChannel = FileChannel.open(Paths.get(inPipe.getAbsolutePath()), StandardOpenOption.READ);
            outChannel = FileChannel.open(Paths.get(outPipe.getAbsolutePath()), StandardOpenOption.WRITE);
            outStream = Channels.newOutputStream(outChannel);
            break;
        } catch (IOException e) {
            if (inChannel != null && inChannel.isOpen()) {
                inChannel.close();
            }
            if (outChannel != null && outChannel.isOpen()) {
                outChannel.close();
            }
            try {
                Thread.sleep(100);
            } catch (InterruptedException e1) {
            }
            if (System.nanoTime() - start > 2e9) {
                throw e;
            }
        }
    }
}

From source file:org.apache.kylin.cube.inmemcubing.ConcurrentDiskStore.java

private void openReadChannel() throws IOException {
    if (readChannel == null) {
        readChannel = FileChannel.open(diskFile.toPath(), StandardOpenOption.READ);
    }//from  www .j  av a2 s.c om
}

From source file:org.lockss.repository.TestRepositoryNodeImpl.java

void findMaxDirPathNio(File root) {
    int maxName = findMaxDirname(root) - 10;
    String one = mkstr("onedir", maxName) + "/";
    for (int rpt = 1; rpt < 1000; rpt++) {
        String path = StringUtils.repeat(one, rpt);
        File dir = new File(root, path);
        String dirstr = dir.getPath();
        boolean res = dir.mkdirs();
        if (!res) {
            log.info("mkdirs failed at " + dirstr.length() + " chars");
            break;
        }//from   ww w  . ja v a 2 s. co m
        log.info("mkdirs ok: " + dirstr.length());
        File f = new File(dir, "foobbb");
        try {
            Path npath = Paths.get(f.getPath());
            Files.createFile(npath);
            FileChannel ochan = FileChannel.open(npath, StandardOpenOption.WRITE);
            OutputStream os = Channels.newOutputStream(ochan);
            os.write((byte) 44);
            os.close();

            FileChannel ichan = FileChannel.open(npath, StandardOpenOption.READ);
            InputStream is = Channels.newInputStream(ichan);
            int bb = is.read();
            is.close();
            assertEquals(44, bb);
            log.info("file ok at " + npath.toString().length() + " chars");
        } catch (FileNotFoundException fnfe) {
            log.error("FNF: " + f.getPath().length(), fnfe);
        } catch (IOException ioe) {
            log.error("IOE: " + f.getPath().length() + ", " + ioe.getMessage());
        }
    }
}

From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java

@Override
public void copy(Path source, Path target, CopyOption... options) throws IOException {
    List<CopyOption> optionList = Arrays.asList(options);
    if (!optionList.contains(StandardCopyOption.REPLACE_EXISTING)) {
        if (Files.exists(target))
            throw new java.nio.file.FileAlreadyExistsException(source.toString(), target.toString(),
                    "could not copy file to destination");
    } else {//from  w ww .ja  va  2  s  . c o  m
        Files.deleteIfExists(target);
    }

    FileSystem sourceFS = source.getFileSystem();
    FileSystem targetFS = target.getFileSystem();

    if (optionList.contains(HadoopCopyOption.REMOTE_COPY) && sourceFS.equals(targetFS)) {

        remoteCopy(source, target, options);
        return;

    }
    try (SeekableByteChannel sourceChannel = sourceFS.provider().newByteChannel(source,
            EnumSet.of(StandardOpenOption.READ))) {

        Set<StandardOpenOption> openOptions = EnumSet.of(StandardOpenOption.WRITE);

        if (optionList.contains(StandardCopyOption.REPLACE_EXISTING))
            openOptions.add(StandardOpenOption.CREATE);
        else
            openOptions.add(StandardOpenOption.CREATE_NEW);
        List<FileAttribute<?>> fileAttributes = new ArrayList<>();
        if (optionList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {

            Set<String> sourceAttrViews = sourceFS.supportedFileAttributeViews();
            Set<String> targetAttrViews = targetFS.supportedFileAttributeViews();
            if (sourceAttrViews.contains(PosixFileAttributeViewImpl.NAME)
                    && targetAttrViews.contains(PosixFileAttributeViewImpl.NAME)) {
                PosixFileAttributes posixAttributes = sourceFS.provider().readAttributes(source,
                        PosixFileAttributes.class);
                fileAttributes.add(PosixFilePermissions.asFileAttribute(posixAttributes.permissions()));
            }

            if (sourceAttrViews.contains(HadoopFileAttributeViewImpl.NAME)
                    && targetAttrViews.contains(HadoopFileAttributeViewImpl.NAME)) {
                final HadoopFileAttributes hdfsAttributes = sourceFS.provider().readAttributes(source,
                        HadoopFileAttributes.class);
                fileAttributes.add(new FileAttribute<Long>() {
                    @Override
                    public String name() {
                        return HadoopFileAttributeViewImpl.NAME + ":blockSize";
                    }

                    @Override
                    public Long value() {
                        return hdfsAttributes.getBlockSize();
                    }
                });
                fileAttributes.add(new FileAttribute<Short>() {
                    @Override
                    public String name() {
                        return HadoopFileAttributeViewImpl.NAME + ":replication";
                    }

                    @Override
                    public Short value() {
                        return hdfsAttributes.getReplication();
                    }
                });

            }
        }

        FileAttribute<?>[] attributes = fileAttributes.toArray(new FileAttribute<?>[fileAttributes.size()]);

        try (SeekableByteChannel targetChannel = targetFS.provider().newByteChannel(target, openOptions,
                attributes)) {
            int buffSize = getConfiguration().getInt(DFSConfigKeys.DFS_STREAM_BUFFER_SIZE_KEY,
                    DFSConfigKeys.DFS_STREAM_BUFFER_SIZE_DEFAULT);
            ByteBuffer buffer = ByteBuffer.allocate(buffSize);
            buffer.clear();
            while (sourceChannel.read(buffer) > 0) {
                buffer.flip();
                targetChannel.write(buffer);
                buffer.clear();
            }

        }
        if (optionList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
            BasicFileAttributes attrs = sourceFS.provider().readAttributes(source, BasicFileAttributes.class);
            BasicFileAttributeView view = targetFS.provider().getFileAttributeView(target,
                    BasicFileAttributeView.class);
            view.setTimes(attrs.lastModifiedTime(), attrs.lastAccessTime(), attrs.creationTime());

        }

    }

}