Example usage for java.nio.file Files newInputStream

List of usage examples for java.nio.file Files newInputStream

Introduction

In this page you can find the example usage for java.nio.file Files newInputStream.

Prototype

public static InputStream newInputStream(Path path, OpenOption... options) throws IOException 

Source Link

Document

Opens a file, returning an input stream to read from the file.

Usage

From source file:cross.datastructures.pipeline.ResultAwareCommandPipeline.java

/**
 * Calculates a byte-level digest of the given files.
 *
 * @param files the files to calculate the digest for.
 * @return the hexadecimal, zero-padded digest, or null if any exceptions
 *         occurred//from  w ww.  j  a v  a  2  s.  com
 */
public String digest(Collection<File> files) {
    try {
        MessageDigest digest = MessageDigest.getInstance("SHA-1");
        for (File file : files) {
            try (InputStream is = Files.newInputStream(file.toPath(), StandardOpenOption.READ)) {
                byte[] buffer = new byte[8192];
                int read = 0;
                while ((read = is.read(buffer)) > 0) {
                    digest.update(buffer, 0, read);
                }
            } catch (IOException ioex) {
                Logger.getLogger(ResultAwareCommandPipeline.class.getName()).log(Level.SEVERE, null, ioex);
                return null;
            }
        }
        byte[] sha1 = digest.digest();
        BigInteger bigInt = new BigInteger(1, sha1);
        return StringUtils.leftPad(bigInt.toString(16), 40, "0");
    } catch (NoSuchAlgorithmException ex) {
        Logger.getLogger(ResultAwareCommandPipeline.class.getName()).log(Level.SEVERE, null, ex);
    }
    return null;
}

From source file:de.elomagic.carafile.client.CaraFileClient.java

/**
 * Uploads a file (Multi chunk upload)./*  w ww. j a  va 2s . c  o  m*/
 * <p/>
 * Multi chunk upload means that the file will be devived in one or more chunks and each chunk can be downloaded to a different peer.
 *
 * @param path Must be a file and not a directory. File will not be deleted
 * @param filename Name of the file. If null then name of the parameter path will be used
 * @return Returns the {@link MetaData} of the uploaded stream
 * @throws IOException Thrown when unable to call REST services
 * @throws java.security.GeneralSecurityException Thrown when unable to determine SHA-1 of the file
 * @see CaraFileClient#uploadFile(java.net.URI, java.io.InputStream, java.lang.String, long)
 */
public MetaData uploadFile(final Path path, final String filename)
        throws IOException, GeneralSecurityException {
    if (registryURI == null) {
        throw new IllegalArgumentException("Parameter 'registryURI' must not be null!");
    }

    if (path == null) {
        throw new IllegalArgumentException("Parameter 'path' must not be null!");
    }

    if (Files.notExists(path)) {
        throw new FileNotFoundException("File \"" + path + "\" doesn't exists!");
    }

    if (Files.isDirectory(path)) {
        throw new IOException("Parameter 'path' is not a file!");
    }

    String fn = filename == null ? path.getFileName().toString() : filename;

    MetaData md = CaraFileUtils.createMetaData(path, fn);
    md.setRegistryURI(registryURI);

    String json = JsonUtil.write(md);

    LOG.debug("Register " + md.getId() + " file at " + registryURI.toString());
    URI uri = CaraFileUtils.buildURI(registryURI, "registry", "register");
    HttpResponse response = executeRequest(Request.Post(uri).bodyString(json, ContentType.APPLICATION_JSON))
            .returnResponse();

    if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
        throw new IOException("Unable to register file. " + response.getStatusLine().getReasonPhrase());
    }

    Set<PeerData> peerDataSet = downloadPeerSet();

    byte[] buffer = new byte[md.getChunkSize()];
    try (InputStream in = Files.newInputStream(path, StandardOpenOption.READ);
            BufferedInputStream bis = new BufferedInputStream(in, md.getChunkSize())) {
        int bytesRead;
        int chunkIndex = 0;
        while ((bytesRead = bis.read(buffer)) > 0) {
            String chunkId = md.getChunk(chunkIndex).getId();

            URI peerURI = peerSelector.getURI(peerDataSet, chunkIndex);
            URI seedChunkUri = CaraFileUtils.buildURI(peerURI, "peer", "seedChunk", chunkId);

            LOG.debug("Uploading chunk " + chunkId + " to peer " + seedChunkUri.toString() + ";Index="
                    + chunkIndex + ";Length=" + bytesRead);
            response = executeRequest(Request.Post(seedChunkUri).bodyStream(
                    new ByteArrayInputStream(buffer, 0, bytesRead), ContentType.APPLICATION_OCTET_STREAM))
                            .returnResponse();

            if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
                throw new IOException("Unable to upload file. " + response.getStatusLine().getStatusCode() + " "
                        + response.getStatusLine().getReasonPhrase());
            }

            chunkIndex++;
        }
    }

    return md;
}

From source file:org.linagora.linshare.webservice.userv2.impl.FlowDocumentUploaderRestServiceImpl.java

@Path("/")
@POST/*from ww  w  . j  a v  a  2s.  c om*/
@Consumes("multipart/form-data")
@Override
public FlowDto uploadChunk(@Multipart(CHUNK_NUMBER) long chunkNumber, @Multipart(TOTAL_CHUNKS) long totalChunks,
        @Multipart(CHUNK_SIZE) long chunkSize, @Multipart(CURRENT_CHUNK_SIZE) long currentChunkSize,
        @Multipart(TOTAL_SIZE) long totalSize, @Multipart(IDENTIFIER) String identifier,
        @Multipart(FILENAME) String filename, @Multipart(RELATIVE_PATH) String relativePath,
        @Multipart(FILE) InputStream file, MultipartBody body,
        @Multipart(value = WORK_GROUP_UUID, required = false) String workGroupUuid,
        @Multipart(value = WORK_GROUP_FOLDER_UUID, required = false) String workGroupFolderUuid,
        @Multipart(value = ASYNC_TASK, required = false) boolean async) throws BusinessException {
    logger.debug("upload chunk number : " + chunkNumber);
    identifier = cleanIdentifier(identifier);
    boolean isValid = FlowUploaderUtils.isValid(chunkNumber, chunkSize, totalSize, identifier, filename);
    Validate.isTrue(isValid);
    checkIfMaintenanceIsEnabled();
    FlowDto flow = new FlowDto(chunkNumber);
    try {
        logger.debug("writing chunk number : " + chunkNumber);
        java.nio.file.Path tempFile = FlowUploaderUtils.getTempFile(identifier, chunkedFiles);
        ChunkedFile currentChunkedFile = chunkedFiles.get(identifier);
        if (!currentChunkedFile.hasChunk(chunkNumber)) {
            FileChannel fc = FileChannel.open(tempFile, StandardOpenOption.CREATE, StandardOpenOption.APPEND);
            ByteArrayOutputStream output = new ByteArrayOutputStream();
            IOUtils.copy(file, output);
            fc.write(ByteBuffer.wrap(output.toByteArray()), (chunkNumber - 1) * chunkSize);
            fc.close();
            if (sizeValidation) {
                if (output.size() != currentChunkSize) {
                    String msg = String.format("File size does not match, found : %1$d, announced : %2$d",
                            output.size(), currentChunkSize);
                    logger.error(msg);
                    flow.setChunkUploadSuccess(false);
                    flow.setErrorMessage(msg);
                    return flow;
                }
            }
            currentChunkedFile.addChunk(chunkNumber);
        } else {
            logger.error("currentChunkedFile.hasChunk(chunkNumber) !!! " + currentChunkedFile);
            logger.error("chunkedNumber skipped : " + chunkNumber);
        }

        logger.debug("nb uploading files : " + chunkedFiles.size());
        logger.debug("current chuckedfile uuid : " + identifier);
        logger.debug("current chuckedfiles" + chunkedFiles.toString());
        if (FlowUploaderUtils.isUploadFinished(identifier, chunkSize, totalSize, chunkedFiles)) {
            flow.setLastChunk(true);
            logger.debug("upload finished : " + chunkNumber + " : " + identifier);
            InputStream inputStream = Files.newInputStream(tempFile, StandardOpenOption.READ);
            File tempFile2 = getTempFile(inputStream, "rest-flowuploader", filename);
            if (sizeValidation) {
                long currSize = tempFile2.length();
                if (currSize != totalSize) {
                    String msg = String.format("File size does not match, found : %1$d, announced : %2$d",
                            currSize, totalSize);
                    logger.error(msg);
                    flow.setChunkUploadSuccess(false);
                    flow.setErrorMessage(msg);
                    return flow;
                }
            }
            EntryDto uploadedDocument = new EntryDto();
            flow.setIsAsync(async);
            boolean isWorkGroup = !Strings.isNullOrEmpty(workGroupUuid);
            if (async) {
                logger.debug("Async mode is used");
                // Asynchronous mode
                AccountDto actorDto = documentFacade.getAuthenticatedAccountDto();
                AsyncTaskDto asyncTask = null;
                try {
                    if (isWorkGroup) {
                        ThreadEntryTaskContext threadEntryTaskContext = new ThreadEntryTaskContext(actorDto,
                                actorDto.getUuid(), workGroupUuid, tempFile2, filename, workGroupFolderUuid);
                        asyncTask = asyncTaskFacade.create(totalSize, getTransfertDuration(identifier),
                                filename, null, AsyncTaskType.THREAD_ENTRY_UPLOAD);
                        ThreadEntryUploadAsyncTask task = new ThreadEntryUploadAsyncTask(threadEntryAsyncFacade,
                                threadEntryTaskContext, asyncTask);
                        taskExecutor.execute(task);
                        flow.completeAsyncTransfert(asyncTask);
                    } else {
                        DocumentTaskContext documentTaskContext = new DocumentTaskContext(actorDto,
                                actorDto.getUuid(), tempFile2, filename, null, null);
                        asyncTask = asyncTaskFacade.create(totalSize, getTransfertDuration(identifier),
                                filename, null, AsyncTaskType.DOCUMENT_UPLOAD);
                        DocumentUploadAsyncTask task = new DocumentUploadAsyncTask(documentAsyncFacade,
                                documentTaskContext, asyncTask);
                        taskExecutor.execute(task);
                        flow.completeAsyncTransfert(asyncTask);
                    }
                } catch (Exception e) {
                    logAsyncFailure(asyncTask, e);
                    deleteTempFile(tempFile2);
                    ChunkedFile remove = chunkedFiles.remove(identifier);
                    Files.deleteIfExists(remove.getPath());
                    throw e;
                }
            } else {
                try {
                    if (isWorkGroup) {
                        uploadedDocument = threadEntryFacade.create(null, workGroupUuid, workGroupFolderUuid,
                                tempFile2, filename);
                    } else {
                        uploadedDocument = documentFacade.create(tempFile2, filename, "", null);
                    }
                    flow.completeTransfert(uploadedDocument);
                } finally {
                    deleteTempFile(tempFile2);
                    ChunkedFile remove = chunkedFiles.remove(identifier);
                    if (remove != null) {
                        Files.deleteIfExists(remove.getPath());
                    } else {
                        logger.error("Should not happen !!!");
                        logger.error("chunk number: " + chunkNumber);
                        logger.error("chunk identifier: " + identifier);
                        logger.error("chunk filename: " + filename);
                        logger.error("chunks : " + chunkedFiles.toString());
                    }
                }
            }
            return flow;
        } else {
            logger.debug("upload pending ");
            flow.setChunkUploadSuccess(true);
        }
    } catch (BusinessException e) {
        logger.error(e.getMessage());
        logger.debug("Exception : ", e);
        flow.setChunkUploadSuccess(false);
        flow.setErrorMessage(e.getMessage());
        flow.setErrCode(e.getErrorCode().getCode());
    } catch (Exception e) {
        logger.error(e.getMessage());
        logger.debug("Exception : ", e);
        flow.setChunkUploadSuccess(false);
        flow.setErrorMessage(e.getMessage());
    }
    return flow;
}

From source file:codes.thischwa.c5c.impl.LocalConnector.java

@Override
public StreamContent download(String backendPath) throws C5CException {
    Path file = buildRealPath(backendPath);
    try {//from  w w w.j  a va 2  s.c  om
        InputStream in = new BufferedInputStream(Files.newInputStream(file, StandardOpenOption.READ));
        return buildStreamContent(in, Files.size(file));
    } catch (FileNotFoundException e) {
        logger.error("Requested file not exits: {}", file.toAbsolutePath());
        throw new FilemanagerException(FilemanagerAction.DOWNLOAD, FilemanagerException.Key.FileNotExists,
                backendPath);
    } catch (IOException | SecurityException e) {
        String msg = String.format("Error while downloading {}: {}", file.getFileName().toFile(),
                e.getMessage());
        logger.error(msg, e);
        throw new C5CException(FilemanagerAction.DOWNLOAD, msg);
    }
}

From source file:com.themodernway.server.core.io.IO.java

public static final InputStream toInputStream(final Path path, final OpenOption... options) throws IOException {
    return Files.newInputStream(CommonOps.requireNonNull(path), options);
}

From source file:org.neo4j.io.fs.FileUtils.java

public static InputStream openAsInputStream(Path path) throws IOException {
    return Files.newInputStream(path, READ);
}

From source file:codes.thischwa.c5c.impl.LocalConnector.java

@Override
public String editFile(String backendPath) throws C5CException {
    Path file = buildRealPath(backendPath);
    InputStream in = null;//from  ww w  . jav  a 2s.  c om
    try {
        in = Files.newInputStream(file, StandardOpenOption.READ);
        return IOUtils.toString(in, PropertiesLoader.getDefaultEncoding());
    } catch (IOException e) {
        throw new C5CException(FilemanagerAction.EDITFILE, e.getMessage());
    } finally {
        IOUtils.closeQuietly(in);
    }
}

From source file:info.novatec.inspectit.rcp.storage.util.DataRetriever.java

/**
 * Returns cached data for the given hash locally. This method can be used when storage if fully
 * downloaded.//from  w  w  w . j  a  v  a2 s  . c  om
 * 
 * @param <E>
 *            Type of the objects are wanted.
 * 
 * @param localStorageData
 *            {@link LocalStorageData} that points to the wanted storage.
 * @param hash
 *            Hash under which the cached data is stored.
 * @return Returns cached data for the storage if the cached data exists for given hash. If data
 *         does not exist <code>null</code> is returned.
 * @throws SerializationException
 *             If {@link SerializationException} occurs.
 * @throws IOException
 *             If {@link IOException} occurs.
 */
@SuppressWarnings("unchecked")
public <E extends DefaultData> List<E> getCachedDataLocally(LocalStorageData localStorageData, int hash)
        throws IOException, SerializationException {
    Path path = storageManager.getCachedDataPath(localStorageData, hash);
    if (Files.notExists(path)) {
        return null;
    } else {
        ISerializer serializer = null;
        try {
            serializer = serializerQueue.take();
        } catch (InterruptedException e) {
            Thread.interrupted();
        }

        Input input = null;
        try (InputStream inputStream = Files.newInputStream(path, StandardOpenOption.READ)) {
            input = new Input(inputStream);
            Object object = serializer.deserialize(input);
            List<E> receivedData = (List<E>) object;
            return receivedData;
        } finally {
            if (null != input) {
                input.close();
            }
            serializerQueue.add(serializer);
        }
    }
}

From source file:com.google.pubsub.flic.controllers.GCEController.java

/**
 * Uploads a given file to Google Storage.
 *//*from   ww  w . j a  v  a  2  s  . co  m*/
private void uploadFile(Path filePath) throws IOException {
    try {
        byte[] md5hash = Base64.decodeBase64(
                storage.objects().get(projectName + "-cloud-pubsub-loadtest", filePath.getFileName().toString())
                        .execute().getMd5Hash());
        try (InputStream inputStream = Files.newInputStream(filePath, StandardOpenOption.READ)) {
            if (Arrays.equals(md5hash, DigestUtils.md5(inputStream))) {
                log.info("File " + filePath.getFileName() + " is current, reusing.");
                return;
            }
        }
        log.info("File " + filePath.getFileName() + " is out of date, uploading new version.");
        storage.objects().delete(projectName + "-cloud-pubsub-loadtest", filePath.getFileName().toString())
                .execute();
    } catch (GoogleJsonResponseException e) {
        if (e.getStatusCode() != NOT_FOUND) {
            throw e;
        }
    }
    try (InputStream inputStream = Files.newInputStream(filePath, StandardOpenOption.READ)) {
        storage.objects()
                .insert(projectName + "-cloud-pubsub-loadtest", null,
                        new InputStreamContent("application/octet-stream", inputStream))
                .setName(filePath.getFileName().toString()).execute();
        log.info("File " + filePath.getFileName() + " created.");
    }
}

From source file:com.github.podd.resources.UploadArtifactResourceImpl.java

private InferredOWLOntologyID uploadFileAndLoadArtifactIntoPodd(final Representation entity)
        throws ResourceException {
    List<FileItem> items;/*  w w w.ja v a2  s. c  o  m*/
    Path filePath = null;
    String contentType = null;

    // 1: Create a factory for disk-based file items
    final DiskFileItemFactory factory = new DiskFileItemFactory(1000240, this.tempDirectory.toFile());

    // 2: Create a new file upload handler
    final RestletFileUpload upload = new RestletFileUpload(factory);
    final Map<String, String> props = new HashMap<String, String>();
    try {
        // 3: Request is parsed by the handler which generates a list of
        // FileItems
        items = upload.parseRequest(this.getRequest());

        for (final FileItem fi : items) {
            final String name = fi.getName();

            if (name == null) {
                props.put(fi.getFieldName(), new String(fi.get(), StandardCharsets.UTF_8));
            } else {
                // FIXME: Strip everything up to the last . out of the
                // filename so that
                // the filename can be used for content type determination
                // where
                // possible.
                // InputStream uploadedFileInputStream =
                // fi.getInputStream();
                try {
                    // Note: These are Java-7 APIs
                    contentType = fi.getContentType();
                    props.put("Content-Type", fi.getContentType());

                    filePath = Files.createTempFile(this.tempDirectory, "ontologyupload-", name);
                    final File file = filePath.toFile();
                    file.deleteOnExit();
                    fi.write(file);
                } catch (final IOException ioe) {
                    throw ioe;
                } catch (final Exception e) {
                    // avoid throwing a generic exception just because the
                    // apache
                    // commons library throws Exception
                    throw new IOException(e);
                }
            }
        }
    } catch (final IOException | FileUploadException e) {
        throw new ResourceException(Status.CLIENT_ERROR_BAD_REQUEST, e);
    }

    this.log.info("props={}", props.toString());

    if (filePath == null) {
        throw new ResourceException(Status.CLIENT_ERROR_BAD_REQUEST,
                "Did not submit a valid file and filename");
    }

    this.log.info("filename={}", filePath.toAbsolutePath().toString());
    this.log.info("contentType={}", contentType);

    RDFFormat format = null;

    // If the content type was application/octet-stream then use the file
    // name instead
    // Browsers attach this content type when they are not sure what the
    // real type is
    if (MediaType.APPLICATION_OCTET_STREAM.getName().equals(contentType)) {
        format = Rio.getParserFormatForFileName(filePath.getFileName().toString());

        this.log.info("octet-stream contentType filename format={}", format);
    }
    // Otherwise use the content type directly in preference to using the
    // filename
    else if (contentType != null) {
        format = Rio.getParserFormatForMIMEType(contentType);

        this.log.info("non-octet-stream contentType format={}", format);
    }

    // If the content type choices failed to resolve the type, then try the
    // filename
    if (format == null) {
        format = Rio.getParserFormatForFileName(filePath.getFileName().toString());

        this.log.info("non-content-type filename format={}", format);
    }

    // Or fallback to RDF/XML which at minimum is able to detect when the
    // document is
    // structurally invalid
    if (format == null) {
        this.log.warn("Could not determine RDF format from request so falling back to RDF/XML");
        format = RDFFormat.RDFXML;
    }

    try (final InputStream inputStream = new BufferedInputStream(
            Files.newInputStream(filePath, StandardOpenOption.READ));) {
        return this.uploadFileAndLoadArtifactIntoPodd(inputStream, format, DanglingObjectPolicy.REPORT,
                DataReferenceVerificationPolicy.DO_NOT_VERIFY);
    } catch (final IOException e) {
        throw new ResourceException(Status.SERVER_ERROR_INTERNAL, "File IO error occurred", e);
    }

}