Example usage for java.nio.file StandardOpenOption CREATE

List of usage examples for java.nio.file StandardOpenOption CREATE

Introduction

In this page you can find the example usage for java.nio.file StandardOpenOption CREATE.

Prototype

StandardOpenOption CREATE

To view the source code for java.nio.file StandardOpenOption CREATE.

Click Source Link

Document

Create a new file if it does not exist.

Usage

From source file:com.splicemachine.derby.stream.control.ControlDataSet.java

@Override
public void saveAsTextFile(String path) {
    OutputStream fileOut = null;/* www.jav  a  2s .  co m*/
    try {
        DistributedFileSystem dfs = SIDriver.driver().fileSystem();
        fileOut = dfs.newOutputStream(path, StandardOpenOption.CREATE);
        while (iterator.hasNext()) {
            fileOut.write(Bytes.toBytes(iterator.next().toString()));
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (fileOut != null) {
            try {
                Closeables.close(fileOut, true);
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    }

}

From source file:org.apache.nifi.controller.StandardFlowService.java

@Override
public void overwriteFlow(final InputStream is) throws IOException {
    writeLock.lock();/*from   w w w .j  a  va2  s  . co m*/
    try (final OutputStream output = Files.newOutputStream(flowXml, StandardOpenOption.WRITE,
            StandardOpenOption.CREATE); final OutputStream gzipOut = new GZIPOutputStream(output);) {
        FileUtils.copy(is, gzipOut);
    } finally {
        writeLock.unlock();
    }
}

From source file:org.apache.flink.yarn.CliFrontendYarnAddressConfigurationTest.java

private File writeYarnPropertiesFile(String contents) throws IOException {
    File tmpFolder = temporaryFolder.newFolder();
    String currentUser = System.getProperty("user.name");

    // copy .yarn-properties-<username>
    File testPropertiesFile = new File(tmpFolder, ".yarn-properties-" + currentUser);
    Files.write(testPropertiesFile.toPath(), contents.getBytes(), StandardOpenOption.CREATE);

    // copy reference flink-conf.yaml to temporary test directory and append custom configuration path.
    String confFile = flinkConf + "\nyarn.properties-file.location: " + tmpFolder;
    File testConfFile = new File(tmpFolder.getAbsolutePath(), "flink-conf.yaml");
    Files.write(testConfFile.toPath(), confFile.getBytes(), StandardOpenOption.CREATE);

    return tmpFolder.getAbsoluteFile();
}

From source file:org.schedulesdirect.grabber.ScheduleTask.java

protected Map<String, Collection<String>> getStaleStationIds() {
    Map<String, Collection<String>> staleIds = new HashMap<>();
    DefaultJsonRequest req = factory.get(DefaultJsonRequest.Action.POST, RestNouns.SCHEDULE_MD5S,
            clnt.getHash(), clnt.getUserAgent(), clnt.getBaseUrl());
    JSONArray data = new JSONArray();
    for (int i = 0; i < this.req.length(); ++i) {
        JSONObject o = new JSONObject();
        o.put("stationID", this.req.getString(i));
        data.put(o);/*w  ww . j  a v  a 2s.com*/
    }
    try {
        JSONObject result = Config.get().getObjectMapper().readValue(req.submitForJson(data), JSONObject.class);
        if (!JsonResponseUtils.isErrorResponse(result)) {
            Iterator<?> idItr = result.keys();
            while (idItr.hasNext()) {
                String stationId = idItr.next().toString();
                boolean schedFileExists = Files
                        .exists(vfs.getPath("schedules", String.format("%s.txt", stationId)));
                Path cachedMd5File = vfs.getPath("md5s", String.format("%s.txt", stationId));
                JSONObject cachedMd5s = Files.exists(cachedMd5File)
                        ? Config.get().getObjectMapper()
                                .readValue(new String(Files.readAllBytes(cachedMd5File),
                                        ZipEpgClient.ZIP_CHARSET.toString()), JSONObject.class)
                        : new JSONObject();
                JSONObject stationInfo = result.getJSONObject(stationId);
                Iterator<?> dateItr = stationInfo.keys();
                while (dateItr.hasNext()) {
                    String date = dateItr.next().toString();
                    JSONObject dateInfo = stationInfo.getJSONObject(date);
                    if (!schedFileExists || isScheduleStale(dateInfo, cachedMd5s.optJSONObject(date))) {
                        Collection<String> dates = staleIds.get(stationId);
                        if (dates == null) {
                            dates = new ArrayList<String>();
                            staleIds.put(stationId, dates);
                        }
                        dates.add(date);
                        if (LOG.isDebugEnabled())
                            LOG.debug(String.format("Station %s/%s queued for refresh!", stationId, date));
                    } else if (LOG.isDebugEnabled())
                        LOG.debug(String.format("Station %s is unchanged on the server; skipping it!",
                                stationId));
                }
                Files.write(cachedMd5File, stationInfo.toString(3).getBytes(ZipEpgClient.ZIP_CHARSET),
                        StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING,
                        StandardOpenOption.CREATE);
            }
        }
    } catch (Throwable t) {
        Grabber.failedTask = true;
        LOG.error("Error processing cache; returning partial stale list!", t);
    }
    return staleIds;
}

From source file:org.roda.core.plugins.plugins.base.ExportAIPPlugin.java

private Report exportMultiZip(List<AIP> aips, Path outputPath, Report report, ModelService model,
        IndexService index, StorageService storage, SimpleJobPluginInfo jobPluginInfo, Job job) {
    for (AIP aip : aips) {
        LOGGER.debug("Exporting AIP {} to ZIP", aip.getId());
        OutputStream os = null;/*www  .  jav  a  2 s  .  c  om*/
        String error = null;
        try {
            Path zip = outputPath.resolve(aip.getId() + ".zip");
            if (FSUtils.exists(zip) && removeIfAlreadyExists) {
                Files.delete(zip);
            } else if (FSUtils.exists(zip) && !removeIfAlreadyExists) {
                error = "File " + zip.toString() + " already exists";
            }
            if (error == null) {
                os = Files.newOutputStream(zip, StandardOpenOption.CREATE,
                        StandardOpenOption.TRUNCATE_EXISTING);

                Directory directory = storage.getDirectory(ModelUtils.getAIPStoragePath(aip.getId()));
                ConsumesOutputStream cos = DownloadUtils.download(storage, directory);
                cos.consumeOutputStream(os);
            }
        } catch (Exception e) {
            LOGGER.error("Error exporting AIP " + aip.getId() + ": " + e.getMessage());
            error = e.getMessage();
        } finally {
            if (os != null) {
                IOUtils.closeQuietly(os);
            }
        }

        Report reportItem = PluginHelper.initPluginReportItem(this, aip.getId(), AIP.class, AIPState.ACTIVE);
        if (error != null) {
            reportItem.setPluginState(PluginState.FAILURE)
                    .setPluginDetails("Export AIP did not end successfully: " + error);
            jobPluginInfo.incrementObjectsProcessedWithFailure();
        } else {
            reportItem.setPluginState(PluginState.SUCCESS).setPluginDetails("Export AIP ended successfully");
            jobPluginInfo.incrementObjectsProcessedWithSuccess();
        }
        report.addReport(reportItem);
        PluginHelper.updatePartialJobReport(this, model, reportItem, true, job);
    }
    return report;
}

From source file:de.tiqsolutions.hdfs.HadoopFileSystemProvider.java

@Override
public void copy(Path source, Path target, CopyOption... options) throws IOException {
    List<CopyOption> optionList = Arrays.asList(options);
    if (!optionList.contains(StandardCopyOption.REPLACE_EXISTING)) {
        if (Files.exists(target))
            throw new java.nio.file.FileAlreadyExistsException(source.toString(), target.toString(),
                    "could not copy file to destination");
    } else {//from  w ww .j  ava 2  s . c  om
        Files.deleteIfExists(target);
    }

    FileSystem sourceFS = source.getFileSystem();
    FileSystem targetFS = target.getFileSystem();

    if (optionList.contains(HadoopCopyOption.REMOTE_COPY) && sourceFS.equals(targetFS)) {

        remoteCopy(source, target, options);
        return;

    }
    try (SeekableByteChannel sourceChannel = sourceFS.provider().newByteChannel(source,
            EnumSet.of(StandardOpenOption.READ))) {

        Set<StandardOpenOption> openOptions = EnumSet.of(StandardOpenOption.WRITE);

        if (optionList.contains(StandardCopyOption.REPLACE_EXISTING))
            openOptions.add(StandardOpenOption.CREATE);
        else
            openOptions.add(StandardOpenOption.CREATE_NEW);
        List<FileAttribute<?>> fileAttributes = new ArrayList<>();
        if (optionList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {

            Set<String> sourceAttrViews = sourceFS.supportedFileAttributeViews();
            Set<String> targetAttrViews = targetFS.supportedFileAttributeViews();
            if (sourceAttrViews.contains(PosixFileAttributeViewImpl.NAME)
                    && targetAttrViews.contains(PosixFileAttributeViewImpl.NAME)) {
                PosixFileAttributes posixAttributes = sourceFS.provider().readAttributes(source,
                        PosixFileAttributes.class);
                fileAttributes.add(PosixFilePermissions.asFileAttribute(posixAttributes.permissions()));
            }

            if (sourceAttrViews.contains(HadoopFileAttributeViewImpl.NAME)
                    && targetAttrViews.contains(HadoopFileAttributeViewImpl.NAME)) {
                final HadoopFileAttributes hdfsAttributes = sourceFS.provider().readAttributes(source,
                        HadoopFileAttributes.class);
                fileAttributes.add(new FileAttribute<Long>() {
                    @Override
                    public String name() {
                        return HadoopFileAttributeViewImpl.NAME + ":blockSize";
                    }

                    @Override
                    public Long value() {
                        return hdfsAttributes.getBlockSize();
                    }
                });
                fileAttributes.add(new FileAttribute<Short>() {
                    @Override
                    public String name() {
                        return HadoopFileAttributeViewImpl.NAME + ":replication";
                    }

                    @Override
                    public Short value() {
                        return hdfsAttributes.getReplication();
                    }
                });

            }
        }

        FileAttribute<?>[] attributes = fileAttributes.toArray(new FileAttribute<?>[fileAttributes.size()]);

        try (SeekableByteChannel targetChannel = targetFS.provider().newByteChannel(target, openOptions,
                attributes)) {
            int buffSize = getConfiguration().getInt(DFSConfigKeys.DFS_STREAM_BUFFER_SIZE_KEY,
                    DFSConfigKeys.DFS_STREAM_BUFFER_SIZE_DEFAULT);
            ByteBuffer buffer = ByteBuffer.allocate(buffSize);
            buffer.clear();
            while (sourceChannel.read(buffer) > 0) {
                buffer.flip();
                targetChannel.write(buffer);
                buffer.clear();
            }

        }
        if (optionList.contains(StandardCopyOption.COPY_ATTRIBUTES)) {
            BasicFileAttributes attrs = sourceFS.provider().readAttributes(source, BasicFileAttributes.class);
            BasicFileAttributeView view = targetFS.provider().getFileAttributeView(target,
                    BasicFileAttributeView.class);
            view.setTimes(attrs.lastModifiedTime(), attrs.lastAccessTime(), attrs.creationTime());

        }

    }

}

From source file:org.mycore.common.MCRUtils.java

/**
 * Writes plain text to a file.//  w  ww .ja v  a2 s. co m
 * 
 * @param textToWrite
 *            the text to write into the file
 * @param fileName
 *            the name of the file to write to, given as absolute path
 * @return a handle to the written file
 */
public static Path writeTextToFile(String textToWrite, String fileName, Charset cs) throws IOException {
    Path file = Paths.get(fileName);
    Files.write(file, Arrays.asList(textToWrite), cs, StandardOpenOption.CREATE);
    return file;
}

From source file:com.ontotext.s4.service.S4ServiceClientIntegrationTest.java

@Test
public void testClassifyFileContentsAsStreamUrlClient() throws Exception {
    ServiceDescriptor temp = ServicesCatalog.getItem("news-classifier");
    S4ServiceClient client = new S4ServiceClient(temp, testApiKeyId, testApiKeyPass);
    serializationFormat = ResponseFormat.JSON;
    File f = new File("test-file");
    try {/* ww  w .j  av  a  2  s .co m*/
        Path p = f.toPath();
        ArrayList<String> lines = new ArrayList<>(1);
        lines.add(documentText);
        Files.write(p, lines, Charset.forName("UTF-8"), StandardOpenOption.CREATE);

        InputStream result = client.annotateFileContentsAsStream(f, Charset.forName("UTF-8"),
                SupportedMimeType.PLAINTEXT, serializationFormat);
        StringWriter writer = new StringWriter();
        IOUtils.copy(result, writer, Charset.forName("UTF-8"));

        assertTrue(writer.toString().contains("category"));
        assertTrue(writer.toString().contains("allScores"));
    } finally {
        f.delete();
    }
}

From source file:io.adeptj.runtime.server.Server.java

private void createServerConfFile() {
    try (InputStream stream = Server.class.getResourceAsStream("/reference.conf")) {
        Files.write(Paths.get(USER_DIR, DIR_ADEPTJ_RUNTIME, DIR_DEPLOYMENT, SERVER_CONF_FILE),
                IOUtils.toBytes(stream), StandardOpenOption.CREATE);
    } catch (IOException ex) {
        LOGGER.error("Exception while creating server conf file!!", ex);
    }/* w ww. j  a  v a2 s . c o m*/
}

From source file:de.decoit.visa.rdf.RDFManager.java

/**
 * Load RDF/XML data into a named model. The model will be read from the
 * specified input stream. It can be specified if the existing model will be
 * replaced or the new data will be appended to the current model.
 *
 * @param pIS Input stream to read the model from
 * @param pReplace Specify if the new data will replace the existing model
 *            or if it will be appended to the model
 * @param pModelName The local name of the model the RDF information will be
 *            stored into/*from   w  w  w  .  jav  a  2s. c o  m*/
 * @throws IOException
 * @throws RDFSourceException
 */
public void loadRDF(InputStream pIS, boolean pReplace, String pModelName)
        throws IOException, RDFSourceException {
    ds.begin(ReadWrite.WRITE);

    try {
        activeNamedModel = ds.getNamedModel(VISA.createModelURI(pModelName));

        // If the model contains statements, clear it before importing the
        // new statements
        if (!activeNamedModel.isEmpty()) {
            activeNamedModel.removeAll();
        }

        // Read the RDF file into the model
        activeNamedModel.read(pIS, null);

        HashSet<String> addedLocNames = new HashSet<>();

        if (pReplace) {
            // Clear the topology storage
            TEBackend.TOPOLOGY_STORAGE.clear();

            // Clear the default model
            ds.getDefaultModel().removeAll();

            // Get the root node (the node which is subject of device
            // properties) of this model
            QueryExecution qexec = QueryExecutionFactory
                    .create(getRootNodeSPARQL(VISA.createModelURI(pModelName)), ds);
            ArrayList<QuerySolution> rsList = resultSetToList(qexec.execSelect());

            if (rsList.size() == 1) {
                rootNode = rsList.get(0).getResource("root");
            } else if (rsList.size() > 1) {
                activeNamedModel
                        .write(Files.newOutputStream(Paths.get("error_dump.rdf"), StandardOpenOption.CREATE,
                                StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING));
                throw new RDFSourceException("Multiple VSA (root) nodes found in RDF/XML file");
            } else {
                throw new RDFSourceException("No VSA (root) node found in RDF/XML file");
            }
        } else {
            // Alter the local names of the nodes
            preventLocalNameCollisions(VISA.createModelURI(pModelName));

            // Alter the root node to fit the root node of the current model
            alterRootNode(VISA.createModelURI(pModelName), rootNode);
        }

        // Process data stored in the model and create topology objects
        // from it
        addedLocNames = processModel(VISA.createModelURI(pModelName));

        // Insert the new model into the existing one
        ds.getDefaultModel().add(activeNamedModel);

        // Layout the topology
        TEBackend.TOPOLOGY_STORAGE.layoutTopology();

        TEBackend.TOPOLOGY_STORAGE.updateInterfaceOrientations(addedLocNames);

        ds.commit();
    } catch (Throwable ex) {
        ds.abort();

        throw ex;
    } finally {
        activeNamedModel = null;

        ds.end();
        TDB.sync(ds);

        pIS.close();
    }
}