Example usage for java.nio.file StandardOpenOption WRITE

List of usage examples for java.nio.file StandardOpenOption WRITE

Introduction

In this page you can find the example usage for java.nio.file StandardOpenOption WRITE.

Prototype

StandardOpenOption WRITE

To view the source code for java.nio.file StandardOpenOption WRITE.

Click Source Link

Document

Open for write access.

Usage

From source file:org.lockss.repository.TestRepositoryNodeImpl.java

void findMaxDirPathNio(File root) {
    int maxName = findMaxDirname(root) - 10;
    String one = mkstr("onedir", maxName) + "/";
    for (int rpt = 1; rpt < 1000; rpt++) {
        String path = StringUtils.repeat(one, rpt);
        File dir = new File(root, path);
        String dirstr = dir.getPath();
        boolean res = dir.mkdirs();
        if (!res) {
            log.info("mkdirs failed at " + dirstr.length() + " chars");
            break;
        }/*from   w  w  w  .  j  a v  a2 s .  c om*/
        log.info("mkdirs ok: " + dirstr.length());
        File f = new File(dir, "foobbb");
        try {
            Path npath = Paths.get(f.getPath());
            Files.createFile(npath);
            FileChannel ochan = FileChannel.open(npath, StandardOpenOption.WRITE);
            OutputStream os = Channels.newOutputStream(ochan);
            os.write((byte) 44);
            os.close();

            FileChannel ichan = FileChannel.open(npath, StandardOpenOption.READ);
            InputStream is = Channels.newInputStream(ichan);
            int bb = is.read();
            is.close();
            assertEquals(44, bb);
            log.info("file ok at " + npath.toString().length() + " chars");
        } catch (FileNotFoundException fnfe) {
            log.error("FNF: " + f.getPath().length(), fnfe);
        } catch (IOException ioe) {
            log.error("IOE: " + f.getPath().length() + ", " + ioe.getMessage());
        }
    }
}

From source file:com.facebook.buck.cxx.ArchiveStepIntegrationTest.java

@Test
public void thinArchives() throws IOException, InterruptedException {
    assumeTrue(Platform.detect() == Platform.MACOS || Platform.detect() == Platform.LINUX);
    ProjectFilesystem filesystem = new ProjectFilesystem(tmp.getRoot());
    CxxPlatform platform = CxxPlatformUtils.build(new CxxBuckConfig(FakeBuckConfig.builder().build()));
    assumeTrue(platform.getAr().supportsThinArchives());

    // Build up the paths to various files the archive step will use.
    SourcePathResolver sourcePathResolver = new SourcePathResolver(new SourcePathRuleFinder(
            new BuildRuleResolver(TargetGraph.EMPTY, new DefaultTargetNodeToBuildRuleTransformer())));
    Archiver archiver = platform.getAr();

    Path output = filesystem.getPath("foo/libthin.a");
    filesystem.mkdirs(output.getParent());

    // Create a really large input file so it's obvious that the archive is thin.
    Path input = filesystem.getPath("bar/blah.dat");
    filesystem.mkdirs(input.getParent());
    byte[] largeInputFile = new byte[1024 * 1024];
    byte[] fillerToRepeat = "hello\n".getBytes(StandardCharsets.UTF_8);
    for (int i = 0; i < largeInputFile.length; i++) {
        largeInputFile[i] = fillerToRepeat[i % fillerToRepeat.length];
    }//from  www .ja  va 2 s . c o m
    filesystem.writeBytesToPath(largeInputFile, input);

    // Build an archive step.
    ArchiveStep archiveStep = new ArchiveStep(filesystem, archiver.getEnvironment(),
            archiver.getCommandPrefix(sourcePathResolver), ImmutableList.of(), getArchiveOptions(true), output,
            ImmutableList.of(input), archiver);

    // Execute the archive step and verify it ran successfully.
    ExecutionContext executionContext = TestExecutionContext.newInstance();
    TestConsole console = (TestConsole) executionContext.getConsole();
    int exitCode = archiveStep.execute(executionContext).getExitCode();
    assertEquals("archive step failed: " + console.getTextWrittenToStdErr(), 0, exitCode);

    // Verify that the thin header is present.
    assertThat(filesystem.readFirstLine(output), Matchers.equalTo(Optional.of("!<thin>")));

    // Verify that even though the archived contents is really big, the archive is still small.
    assertThat(filesystem.getFileSize(output), Matchers.lessThan(1000L));

    // NOTE: Replace the thin header with a normal header just so the commons compress parser
    // can parse the archive contents.
    try (OutputStream outputStream = Files.newOutputStream(filesystem.resolve(output),
            StandardOpenOption.WRITE)) {
        outputStream.write(ObjectFileScrubbers.GLOBAL_HEADER);
    }

    // Now read the archive entries and verify that the timestamp, UID, and GID fields are
    // zero'd out.
    try (ArArchiveInputStream stream = new ArArchiveInputStream(
            new FileInputStream(filesystem.resolve(output).toFile()))) {
        ArArchiveEntry entry = stream.getNextArEntry();

        // Verify that the input names are relative paths from the outputs parent dir.
        assertThat(entry.getName(), Matchers.equalTo(output.getParent().relativize(input).toString()));
    }
}

From source file:org.apache.nifi.controller.StandardFlowService.java

@Override
public void overwriteFlow(final InputStream is) throws IOException {
    writeLock.lock();/*from w w w .j  a  v  a 2s .  c o  m*/
    try (final OutputStream output = Files.newOutputStream(flowXml, StandardOpenOption.WRITE,
            StandardOpenOption.CREATE); final OutputStream gzipOut = new GZIPOutputStream(output);) {
        FileUtils.copy(is, gzipOut);
    } finally {
        writeLock.unlock();
    }
}

From source file:org.schedulesdirect.grabber.ScheduleTask.java

protected Map<String, Collection<String>> getStaleStationIds() {
    Map<String, Collection<String>> staleIds = new HashMap<>();
    DefaultJsonRequest req = factory.get(DefaultJsonRequest.Action.POST, RestNouns.SCHEDULE_MD5S,
            clnt.getHash(), clnt.getUserAgent(), clnt.getBaseUrl());
    JSONArray data = new JSONArray();
    for (int i = 0; i < this.req.length(); ++i) {
        JSONObject o = new JSONObject();
        o.put("stationID", this.req.getString(i));
        data.put(o);//from  w w  w.jav a 2 s  .c o  m
    }
    try {
        JSONObject result = Config.get().getObjectMapper().readValue(req.submitForJson(data), JSONObject.class);
        if (!JsonResponseUtils.isErrorResponse(result)) {
            Iterator<?> idItr = result.keys();
            while (idItr.hasNext()) {
                String stationId = idItr.next().toString();
                boolean schedFileExists = Files
                        .exists(vfs.getPath("schedules", String.format("%s.txt", stationId)));
                Path cachedMd5File = vfs.getPath("md5s", String.format("%s.txt", stationId));
                JSONObject cachedMd5s = Files.exists(cachedMd5File)
                        ? Config.get().getObjectMapper()
                                .readValue(new String(Files.readAllBytes(cachedMd5File),
                                        ZipEpgClient.ZIP_CHARSET.toString()), JSONObject.class)
                        : new JSONObject();
                JSONObject stationInfo = result.getJSONObject(stationId);
                Iterator<?> dateItr = stationInfo.keys();
                while (dateItr.hasNext()) {
                    String date = dateItr.next().toString();
                    JSONObject dateInfo = stationInfo.getJSONObject(date);
                    if (!schedFileExists || isScheduleStale(dateInfo, cachedMd5s.optJSONObject(date))) {
                        Collection<String> dates = staleIds.get(stationId);
                        if (dates == null) {
                            dates = new ArrayList<String>();
                            staleIds.put(stationId, dates);
                        }
                        dates.add(date);
                        if (LOG.isDebugEnabled())
                            LOG.debug(String.format("Station %s/%s queued for refresh!", stationId, date));
                    } else if (LOG.isDebugEnabled())
                        LOG.debug(String.format("Station %s is unchanged on the server; skipping it!",
                                stationId));
                }
                Files.write(cachedMd5File, stationInfo.toString(3).getBytes(ZipEpgClient.ZIP_CHARSET),
                        StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING,
                        StandardOpenOption.CREATE);
            }
        }
    } catch (Throwable t) {
        Grabber.failedTask = true;
        LOG.error("Error processing cache; returning partial stale list!", t);
    }
    return staleIds;
}

From source file:com.spectralogic.ds3client.integration.Smoke_Test.java

@Test
public void getContents() throws IOException, URISyntaxException, XmlProcessingException, InterruptedException {
    final String bucketName = "test_get_contents";

    try {//from   w w w . j av a 2s.co  m
        HELPERS.ensureBucketExists(bucketName, envDataPolicyId);
        loadBookTestData(client, bucketName);

        final Ds3ClientHelpers.Job job = HELPERS.startReadAllJob(bucketName);

        final UUID jobId = job.getJobId();

        job.transfer(new Ds3ClientHelpers.ObjectChannelBuilder() {
            @Override
            public SeekableByteChannel buildChannel(final String key) throws IOException {
                final Path filePath = Files.createTempFile("ds3", key);
                return Files.newByteChannel(filePath, StandardOpenOption.DELETE_ON_CLOSE,
                        StandardOpenOption.WRITE);
            }
        });

        assertThat(JobStatusHelper.getJobStatusWithRetries(client, jobId, JobStatus.COMPLETED),
                is(JobStatus.COMPLETED));

    } finally {
        deleteAllContents(client, bucketName);
    }
}

From source file:fr.gael.dhus.sync.impl.ODataProductSynchronizer.java

/**
 * Uses the given `http_client` to download `url` into `out_tmp`.
 * Renames `out_tmp` to the value of the filename param of the Content-Disposition header field.
 * Returns a path to the renamed file./*ww w  . j a v a2 s  .  c  o m*/
 *
 * @param http_client synchronous interruptible HTTP client.
 * @param out_tmp download destination file on disk (will be created if does not exist).
 * @param url what to download.
 * @return Path to file with its actual name.
 * @throws IOException Anything went wrong (with IO or network, or if the HTTP header field
 *       Content-Disposition is missing).
 * @throws InterruptedException Thread has been interrupted.
 */
private DownloadResult downloadValidateRename(InterruptibleHttpClient http_client, Path out_tmp, String url)
        throws IOException, InterruptedException {
    try (FileChannel output = FileChannel.open(out_tmp, StandardOpenOption.CREATE_NEW,
            StandardOpenOption.WRITE)) {

        HttpResponse response = http_client.interruptibleGet(url, output);

        // If the response's status code is not 200, something wrong happened
        if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
            Formatter ff = new Formatter();
            ff.format(
                    "Synchronizer#%d cannot download product at %s,"
                            + " remote dhus returned message '%s' (HTTP%d)",
                    getId(), url, response.getStatusLine().getReasonPhrase(),
                    response.getStatusLine().getStatusCode());
            throw new IOException(ff.out().toString());
        }

        // Gets the filename from the HTTP header field `Content-Disposition'
        Pattern pat = Pattern.compile("filename=\"(.+?)\"", Pattern.CASE_INSENSITIVE);
        String contdis = response.getFirstHeader("Content-Disposition").getValue();
        Matcher m = pat.matcher(contdis);
        if (!m.find()) {
            throw new IOException("Synchronizer#" + getId()
                    + " Missing HTTP header field `Content-Disposition` that determines the filename");
        }
        String filename = m.group(1);
        if (filename == null || filename.isEmpty()) {
            throw new IOException(
                    "Synchronizer#" + getId() + " Invalid filename in HTTP header field `Content-Disposition`");
        }

        // Renames the downloaded file
        output.close();
        Path dest = out_tmp.getParent().resolve(filename);
        Files.move(out_tmp, dest, StandardCopyOption.ATOMIC_MOVE);

        DownloadResult res = new DownloadResult(dest, response.getEntity().getContentType().getValue(),
                response.getEntity().getContentLength());

        return res;
    } finally {
        if (Files.exists(out_tmp)) {
            Files.delete(out_tmp);
        }
    }
}

From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java

@Override
public void copy(Path source, Path target, CopyOption... options) throws IOException {
    List<CopyOption> optionList = Arrays.asList(options);
    if (!optionList.contains(StandardCopyOption.REPLACE_EXISTING)) {
        if (Files.exists(target))
            throw new java.nio.file.FileAlreadyExistsException(source.toString(), target.toString(),
                    "could not copy file to destination");
    } else {/* w w w  .  j a  v  a  2s.  c  o m*/
        Files.deleteIfExists(target);
    }

    FileSystem sourceFS = source.getFileSystem();
    FileSystem targetFS = target.getFileSystem();

    if (optionList.contains(HadoopCopyOption.REMOTE_COPY) && sourceFS.equals(targetFS)) {

        remoteCopy(source, target, options);
        return;

    }
    try (SeekableByteChannel sourceChannel = sourceFS.provider().newByteChannel(source,
            EnumSet.of(StandardOpenOption.READ))) {

        Set<StandardOpenOption> openOptions = EnumSet.of(StandardOpenOption.WRITE);

        if (optionList.contains(StandardCopyOption.REPLACE_EXISTING))
            openOptions.add(StandardOpenOption.CREATE);
        else
            openOptions.add(StandardOpenOption.CREATE_NEW);
        List<FileAttribute<?>> fileAttributes = new ArrayList<>();
        if (optionList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {

            Set<String> sourceAttrViews = sourceFS.supportedFileAttributeViews();
            Set<String> targetAttrViews = targetFS.supportedFileAttributeViews();
            if (sourceAttrViews.contains(PosixFileAttributeViewImpl.NAME)
                    && targetAttrViews.contains(PosixFileAttributeViewImpl.NAME)) {
                PosixFileAttributes posixAttributes = sourceFS.provider().readAttributes(source,
                        PosixFileAttributes.class);
                fileAttributes.add(PosixFilePermissions.asFileAttribute(posixAttributes.permissions()));
            }

            if (sourceAttrViews.contains(HadoopFileAttributeViewImpl.NAME)
                    && targetAttrViews.contains(HadoopFileAttributeViewImpl.NAME)) {
                final HadoopFileAttributes hdfsAttributes = sourceFS.provider().readAttributes(source,
                        HadoopFileAttributes.class);
                fileAttributes.add(new FileAttribute<Long>() {
                    @Override
                    public String name() {
                        return HadoopFileAttributeViewImpl.NAME + ":blockSize";
                    }

                    @Override
                    public Long value() {
                        return hdfsAttributes.getBlockSize();
                    }
                });
                fileAttributes.add(new FileAttribute<Short>() {
                    @Override
                    public String name() {
                        return HadoopFileAttributeViewImpl.NAME + ":replication";
                    }

                    @Override
                    public Short value() {
                        return hdfsAttributes.getReplication();
                    }
                });

            }
        }

        FileAttribute<?>[] attributes = fileAttributes.toArray(new FileAttribute<?>[fileAttributes.size()]);

        try (SeekableByteChannel targetChannel = targetFS.provider().newByteChannel(target, openOptions,
                attributes)) {
            int buffSize = getConfiguration().getInt(DFSConfigKeys.DFS_STREAM_BUFFER_SIZE_KEY,
                    DFSConfigKeys.DFS_STREAM_BUFFER_SIZE_DEFAULT);
            ByteBuffer buffer = ByteBuffer.allocate(buffSize);
            buffer.clear();
            while (sourceChannel.read(buffer) > 0) {
                buffer.flip();
                targetChannel.write(buffer);
                buffer.clear();
            }

        }
        if (optionList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
            BasicFileAttributes attrs = sourceFS.provider().readAttributes(source, BasicFileAttributes.class);
            BasicFileAttributeView view = targetFS.provider().getFileAttributeView(target,
                    BasicFileAttributeView.class);
            view.setTimes(attrs.lastModifiedTime(), attrs.lastAccessTime(), attrs.creationTime());

        }

    }

}

From source file:org.cryptomator.cryptofs.CryptoFileSystemImpl.java

void createDirectory(CryptoPath cleartextDir, FileAttribute<?>... attrs) throws IOException {
    CryptoPath cleartextParentDir = cleartextDir.getParent();
    if (cleartextParentDir == null) {
        return;//from   w  w  w . j  a  v a  2  s .  c  om
    }
    Path ciphertextParentDir = cryptoPathMapper.getCiphertextDirPath(cleartextParentDir);
    if (!Files.exists(ciphertextParentDir)) {
        throw new NoSuchFileException(cleartextParentDir.toString());
    }
    Path ciphertextFile = cryptoPathMapper.getCiphertextFilePath(cleartextDir, CiphertextFileType.FILE);
    if (Files.exists(ciphertextFile)) {
        throw new FileAlreadyExistsException(cleartextDir.toString());
    }
    Path ciphertextDirFile = cryptoPathMapper.getCiphertextFilePath(cleartextDir, CiphertextFileType.DIRECTORY);
    boolean success = false;
    try {
        Directory ciphertextDir = cryptoPathMapper.getCiphertextDir(cleartextDir);
        try (FileChannel channel = FileChannel.open(ciphertextDirFile,
                EnumSet.of(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), attrs)) {
            channel.write(ByteBuffer.wrap(ciphertextDir.dirId.getBytes(UTF_8)));
        }
        Files.createDirectories(ciphertextDir.path);
        success = true;
    } finally {
        if (!success) {
            Files.delete(ciphertextDirFile);
            dirIdProvider.delete(ciphertextDirFile);
        }
    }
}

From source file:org.eclipse.winery.generators.ia.Generator.java

private void generateJavaFile(File javaService) throws IOException {

    // Generate methods
    StringBuilder sb = new StringBuilder();

    for (TOperation op : this.tinterface.getOperation()) {
        // Annotations
        sb.append("\t@WebMethod\n");
        sb.append("\t@SOAPBinding\n");
        sb.append("\t@Oneway\n");

        // Signatur
        String operationReturn = "void";
        sb.append("\tpublic " + operationReturn + " " + op.getName() + "(\n");

        // Parameter
        boolean first = true;
        if (op.getInputParameters() != null) {
            for (TParameter parameter : op.getInputParameters().getInputParameter()) {
                String parameterName = parameter.getName();

                if (first) {
                    first = false;/*from w  ww.j  av  a2 s.c  o  m*/
                    sb.append("\t\t");
                } else {
                    sb.append(",\n\t\t");
                }

                // Generate @WebParam
                sb.append("@WebParam(name=\"" + parameterName + "\", targetNamespace=\"" + this.namespace
                        + "\") ");

                // Handle required and optional parameters using @XmlElement
                if (parameter.getRequired().equals(TBoolean.YES)) {
                    sb.append("@XmlElement(required=true)");
                } else {
                    sb.append("@XmlElement(required=false)");
                }

                sb.append(" String " + parameterName);
            }
        }
        sb.append("\n\t) {\n");

        // If there are output parameters we generate the respective HashMap
        boolean outputParamsExist = (op.getOutputParameters() != null)
                && (!op.getOutputParameters().getOutputParameter().isEmpty());
        if (outputParamsExist) {
            sb.append("\t\t// This HashMap holds the return parameters of this operation.\n");
            sb.append("\t\tfinal HashMap<String,String> returnParameters = new HashMap<String, String>();\n\n");
        }

        sb.append("\t\t// TODO: Implement your operation here.\n");

        // Generate code to set output parameters
        if (outputParamsExist) {
            for (TParameter outputParam : op.getOutputParameters().getOutputParameter()) {
                sb.append("\n\n\t\t// Output Parameter '" + outputParam.getName() + "' ");
                if (outputParam.getRequired().equals(TBoolean.YES)) {
                    sb.append("(required)");
                } else {
                    sb.append("(optional)");
                }
                sb.append("\n\t\t// TODO: Set " + outputParam.getName() + " parameter here.");
                sb.append(
                        "\n\t\t// Do NOT delete the next line of code. Set \"\" as value if you want to return nothing or an empty result!");
                sb.append("\n\t\treturnParameters.put(\"" + outputParam.getName() + "\", \"TODO\");");
            }
            sb.append("\n\n\t\tsendResponse(returnParameters);\n");
        }

        sb.append("\t}\n\n");
    }

    // Read file and replace placeholders
    Charset cs = Charset.defaultCharset();
    List<String> lines = new ArrayList<>();
    for (String line : Files.readAllLines(javaService.toPath(), cs)) {
        // Replace web service method
        line = line.replaceAll(Generator.PLACEHOLDER_GENERATED_WEBSERVICE_METHODS, sb.toString());
        lines.add(line);
    }

    // Write file
    OpenOption[] options = new OpenOption[] { StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING };
    Files.write(javaService.toPath(), lines, cs, options);
}

From source file:de.decoit.visa.rdf.RDFManager.java

/**
 * Load RDF/XML data into a named model. The model will be read from the
 * specified input stream. It can be specified if the existing model will be
 * replaced or the new data will be appended to the current model.
 *
 * @param pIS Input stream to read the model from
 * @param pReplace Specify if the new data will replace the existing model
 *            or if it will be appended to the model
 * @param pModelName The local name of the model the RDF information will be
 *            stored into/* w  w  w  .  ja v  a 2  s .c  o m*/
 * @throws IOException
 * @throws RDFSourceException
 */
public void loadRDF(InputStream pIS, boolean pReplace, String pModelName)
        throws IOException, RDFSourceException {
    ds.begin(ReadWrite.WRITE);

    try {
        activeNamedModel = ds.getNamedModel(VISA.createModelURI(pModelName));

        // If the model contains statements, clear it before importing the
        // new statements
        if (!activeNamedModel.isEmpty()) {
            activeNamedModel.removeAll();
        }

        // Read the RDF file into the model
        activeNamedModel.read(pIS, null);

        HashSet<String> addedLocNames = new HashSet<>();

        if (pReplace) {
            // Clear the topology storage
            TEBackend.TOPOLOGY_STORAGE.clear();

            // Clear the default model
            ds.getDefaultModel().removeAll();

            // Get the root node (the node which is subject of device
            // properties) of this model
            QueryExecution qexec = QueryExecutionFactory
                    .create(getRootNodeSPARQL(VISA.createModelURI(pModelName)), ds);
            ArrayList<QuerySolution> rsList = resultSetToList(qexec.execSelect());

            if (rsList.size() == 1) {
                rootNode = rsList.get(0).getResource("root");
            } else if (rsList.size() > 1) {
                activeNamedModel
                        .write(Files.newOutputStream(Paths.get("error_dump.rdf"), StandardOpenOption.CREATE,
                                StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING));
                throw new RDFSourceException("Multiple VSA (root) nodes found in RDF/XML file");
            } else {
                throw new RDFSourceException("No VSA (root) node found in RDF/XML file");
            }
        } else {
            // Alter the local names of the nodes
            preventLocalNameCollisions(VISA.createModelURI(pModelName));

            // Alter the root node to fit the root node of the current model
            alterRootNode(VISA.createModelURI(pModelName), rootNode);
        }

        // Process data stored in the model and create topology objects
        // from it
        addedLocNames = processModel(VISA.createModelURI(pModelName));

        // Insert the new model into the existing one
        ds.getDefaultModel().add(activeNamedModel);

        // Layout the topology
        TEBackend.TOPOLOGY_STORAGE.layoutTopology();

        TEBackend.TOPOLOGY_STORAGE.updateInterfaceOrientations(addedLocNames);

        ds.commit();
    } catch (Throwable ex) {
        ds.abort();

        throw ex;
    } finally {
        activeNamedModel = null;

        ds.end();
        TDB.sync(ds);

        pIS.close();
    }
}