List of usage examples for java.nio.file StandardOpenOption TRUNCATE_EXISTING
StandardOpenOption TRUNCATE_EXISTING
To view the source code for java.nio.file StandardOpenOption TRUNCATE_EXISTING.
Click Source Link
From source file:org.eclipse.winery.generators.ia.Generator.java
private void generateJavaFile(File javaService) throws IOException { // Generate methods StringBuilder sb = new StringBuilder(); for (TOperation op : this.tinterface.getOperation()) { // Annotations sb.append("\t@WebMethod\n"); sb.append("\t@SOAPBinding\n"); sb.append("\t@Oneway\n"); // Signatur String operationReturn = "void"; sb.append("\tpublic " + operationReturn + " " + op.getName() + "(\n"); // Parameter boolean first = true; if (op.getInputParameters() != null) { for (TParameter parameter : op.getInputParameters().getInputParameter()) { String parameterName = parameter.getName(); if (first) { first = false;/*from w w w . j a va2 s . c o m*/ sb.append("\t\t"); } else { sb.append(",\n\t\t"); } // Generate @WebParam sb.append("@WebParam(name=\"" + parameterName + "\", targetNamespace=\"" + this.namespace + "\") "); // Handle required and optional parameters using @XmlElement if (parameter.getRequired().equals(TBoolean.YES)) { sb.append("@XmlElement(required=true)"); } else { sb.append("@XmlElement(required=false)"); } sb.append(" String " + parameterName); } } sb.append("\n\t) {\n"); // If there are output parameters we generate the respective HashMap boolean outputParamsExist = (op.getOutputParameters() != null) && (!op.getOutputParameters().getOutputParameter().isEmpty()); if (outputParamsExist) { sb.append("\t\t// This HashMap holds the return parameters of this operation.\n"); sb.append("\t\tfinal HashMap<String,String> returnParameters = new HashMap<String, String>();\n\n"); } sb.append("\t\t// TODO: Implement your operation here.\n"); // Generate code to set output parameters if (outputParamsExist) { for (TParameter outputParam : op.getOutputParameters().getOutputParameter()) { sb.append("\n\n\t\t// Output Parameter '" + outputParam.getName() + "' "); if (outputParam.getRequired().equals(TBoolean.YES)) { sb.append("(required)"); } else { sb.append("(optional)"); } sb.append("\n\t\t// TODO: Set " + outputParam.getName() + " parameter here."); sb.append( "\n\t\t// Do NOT delete the next line of code. Set \"\" as value if you want to return nothing or an empty result!"); sb.append("\n\t\treturnParameters.put(\"" + outputParam.getName() + "\", \"TODO\");"); } sb.append("\n\n\t\tsendResponse(returnParameters);\n"); } sb.append("\t}\n\n"); } // Read file and replace placeholders Charset cs = Charset.defaultCharset(); List<String> lines = new ArrayList<>(); for (String line : Files.readAllLines(javaService.toPath(), cs)) { // Replace web service method line = line.replaceAll(Generator.PLACEHOLDER_GENERATED_WEBSERVICE_METHODS, sb.toString()); lines.add(line); } // Write file OpenOption[] options = new OpenOption[] { StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; Files.write(javaService.toPath(), lines, cs, options); }
From source file:de.decoit.visa.rdf.RDFManager.java
/** * Load RDF/XML data into a named model. The model will be read from the * specified input stream. It can be specified if the existing model will be * replaced or the new data will be appended to the current model. * * @param pIS Input stream to read the model from * @param pReplace Specify if the new data will replace the existing model * or if it will be appended to the model * @param pModelName The local name of the model the RDF information will be * stored into//from ww w .j a va2 s. co m * @throws IOException * @throws RDFSourceException */ public void loadRDF(InputStream pIS, boolean pReplace, String pModelName) throws IOException, RDFSourceException { ds.begin(ReadWrite.WRITE); try { activeNamedModel = ds.getNamedModel(VISA.createModelURI(pModelName)); // If the model contains statements, clear it before importing the // new statements if (!activeNamedModel.isEmpty()) { activeNamedModel.removeAll(); } // Read the RDF file into the model activeNamedModel.read(pIS, null); HashSet<String> addedLocNames = new HashSet<>(); if (pReplace) { // Clear the topology storage TEBackend.TOPOLOGY_STORAGE.clear(); // Clear the default model ds.getDefaultModel().removeAll(); // Get the root node (the node which is subject of device // properties) of this model QueryExecution qexec = QueryExecutionFactory .create(getRootNodeSPARQL(VISA.createModelURI(pModelName)), ds); ArrayList<QuerySolution> rsList = resultSetToList(qexec.execSelect()); if (rsList.size() == 1) { rootNode = rsList.get(0).getResource("root"); } else if (rsList.size() > 1) { activeNamedModel .write(Files.newOutputStream(Paths.get("error_dump.rdf"), StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING)); throw new RDFSourceException("Multiple VSA (root) nodes found in RDF/XML file"); } else { throw new RDFSourceException("No VSA (root) node found in RDF/XML file"); } } else { // Alter the local names of the nodes preventLocalNameCollisions(VISA.createModelURI(pModelName)); // Alter the root node to fit the root node of the current model alterRootNode(VISA.createModelURI(pModelName), rootNode); } // Process data stored in the model and create topology objects // from it addedLocNames = processModel(VISA.createModelURI(pModelName)); // Insert the new model into the existing one ds.getDefaultModel().add(activeNamedModel); // Layout the topology TEBackend.TOPOLOGY_STORAGE.layoutTopology(); TEBackend.TOPOLOGY_STORAGE.updateInterfaceOrientations(addedLocNames); ds.commit(); } catch (Throwable ex) { ds.abort(); throw ex; } finally { activeNamedModel = null; ds.end(); TDB.sync(ds); pIS.close(); } }
From source file:org.eclipse.winery.generators.ia.Generator.java
/** * Iterates recursively through all the files in the project working * directory and tries to replace the global placeholders. * //from w w w . j a v a 2s .co m * @param folderOrFile to start with */ private void updateFilesRecursively(File folderOrFile) { if (folderOrFile.isFile()) { if (folderOrFile.getAbsolutePath().endsWith(".jar")) { return; } Generator.logger.trace("Updating file " + folderOrFile); try { // Read file and replace placeholders Charset cs = Charset.defaultCharset(); List<String> lines = new ArrayList<>(); for (String line : Files.readAllLines(folderOrFile.toPath(), cs)) { line = line.replaceAll(Generator.PLACEHOLDER_CLASS_NAME, this.name); line = line.replaceAll(Generator.PLACEHOLDER_JAVA_PACKAGE, this.javaPackage); line = line.replaceAll(Generator.PLACEHOLDER_NAMESPACE, this.namespace); line = line.replaceAll(Generator.PLACEHOLDER_IA_ARTIFACT_TEMPLATE_UPLOAD_URL, this.iaArtifactTemplateUploadUrl.toString()); lines.add(line); } // Write file OpenOption[] options = new OpenOption[] { StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; Files.write(folderOrFile.toPath(), lines, cs, options); } catch (IOException e) { e.printStackTrace(); } } else { Generator.logger.trace("Updating folder " + folderOrFile); for (File childFile : folderOrFile.listFiles()) { this.updateFilesRecursively(childFile); } } }
From source file:org.elasticsearch.xpack.core.ssl.SSLConfigurationReloaderTests.java
/** * Tests the reloading of a keystore when there is an exception during reloading. An exception is caused by truncating the keystore * that is being monitored/*from w ww . j ava2 s. c om*/ */ public void testReloadingKeyStoreException() throws Exception { Path tempDir = createTempDir(); Path keystorePath = tempDir.resolve("testnode.jks"); Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks"), keystorePath); MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("xpack.ssl.keystore.secure_password", "testnode"); Settings settings = Settings.builder().put("xpack.ssl.keystore.path", keystorePath) .setSecureSettings(secureSettings).put("path.home", createTempDir()).build(); Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.sslConfiguration(Settings.EMPTY); new SSLConfigurationReloader(settings, env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { fail("reload should not be called! [keystore reload exception]"); } }; final SSLContext context = sslService.sslContextHolder(config).sslContext(); // truncate the keystore try (OutputStream out = Files.newOutputStream(keystorePath, StandardOpenOption.TRUNCATE_EXISTING)) { } // we intentionally don't wait here as we rely on concurrency to catch a failure assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); }
From source file:org.elasticsearch.xpack.core.ssl.SSLConfigurationReloaderTests.java
/** * Tests the reloading of a key config backed by pem files when there is an exception during reloading. An exception is caused by * truncating the key file that is being monitored */// www . ja va2 s . c om public void testReloadingPEMKeyConfigException() throws Exception { Path tempDir = createTempDir(); Path keyPath = tempDir.resolve("testnode.pem"); Path certPath = tempDir.resolve("testnode.crt"); Path clientCertPath = tempDir.resolve("testclient.crt"); Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem"), keyPath); Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"), certPath); Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt"), clientCertPath); MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("xpack.ssl.secure_key_passphrase", "testnode"); Settings settings = Settings.builder().put("xpack.ssl.key", keyPath).put("xpack.ssl.certificate", certPath) .putList("xpack.ssl.certificate_authorities", certPath.toString(), clientCertPath.toString()) .put("path.home", createTempDir()).setSecureSettings(secureSettings).build(); Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.sslConfiguration(Settings.EMPTY); new SSLConfigurationReloader(settings, env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { fail("reload should not be called! [pem key reload exception]"); } }; final SSLContext context = sslService.sslContextHolder(config).sslContext(); // truncate the file try (OutputStream os = Files.newOutputStream(keyPath, StandardOpenOption.TRUNCATE_EXISTING)) { } // we intentionally don't wait here as we rely on concurrency to catch a failure assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); }
From source file:org.hyperledger.fabric.sdk.testutils.TestConfig.java
/** * Returns the appropriate Network Config YAML file based on whether TLS is currently * enabled or not/*ww w. ja v a 2 s .co m*/ * * @return The appropriate Network Config YAML file */ public File getTestNetworkConfigFileYAML() { String fname = runningTLS ? "network-config-tls.yaml" : "network-config.yaml"; String pname = "src/test/fixture/sdkintegration/network_configs/"; File ret = new File(pname, fname); if (!"localhost".equals(LOCALHOST) || isFabricVersionAtOrAfter("1.3")) { // change on the fly ... File temp = null; try { //create a temp file temp = File.createTempFile(fname, "-FixedUp.yaml"); if (temp.exists()) { //For testing start fresh temp.delete(); } byte[] data = Files.readAllBytes(Paths.get(ret.getAbsolutePath())); String sourceText = new String(data, StandardCharsets.UTF_8); sourceText = sourceText.replaceAll("https://localhost", "https://" + LOCALHOST); sourceText = sourceText.replaceAll("http://localhost", "http://" + LOCALHOST); sourceText = sourceText.replaceAll("grpcs://localhost", "grpcs://" + LOCALHOST); sourceText = sourceText.replaceAll("grpc://localhost", "grpc://" + LOCALHOST); if (isFabricVersionAtOrAfter("1.3")) { //eventUrl: grpc://localhost:8053 sourceText = sourceText.replaceAll("(?m)^[ \\t]*eventUrl:", "# eventUrl:"); } Files.write(Paths.get(temp.getAbsolutePath()), sourceText.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE_NEW, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE); if (!Objects.equals("true", System.getenv(ORG_HYPERLEDGER_FABRIC_SDK_TEST_FABRIC_HOST + "_KEEP"))) { temp.deleteOnExit(); } else { System.err.println("produced new network-config.yaml file at:" + temp.getAbsolutePath()); } } catch (Exception e) { throw new RuntimeException(e); } ret = temp; } return ret; }
From source file:org.elasticsearch.xpack.core.ssl.SSLConfigurationReloaderTests.java
/** * Tests the reloading of a truststore when there is an exception during reloading. An exception is caused by truncating the truststore * that is being monitored// w ww. ja va 2s . c o m */ public void testTrustStoreReloadException() throws Exception { Path tempDir = createTempDir(); Path trustStorePath = tempDir.resolve("testnode.jks"); Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks"), trustStorePath); MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("xpack.ssl.truststore.secure_password", "testnode"); Settings settings = Settings.builder().put("xpack.ssl.truststore.path", trustStorePath) .put("path.home", createTempDir()).setSecureSettings(secureSettings).build(); Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); final SSLService sslService = new SSLService(settings, env); final SSLConfiguration config = sslService.sslConfiguration(Settings.EMPTY); new SSLConfigurationReloader(settings, env, sslService, resourceWatcherService) { @Override void reloadSSLContext(SSLConfiguration configuration) { fail("reload should not be called! [truststore reload exception]"); } }; final SSLContext context = sslService.sslContextHolder(config).sslContext(); // truncate the truststore try (OutputStream os = Files.newOutputStream(trustStorePath, StandardOpenOption.TRUNCATE_EXISTING)) { } // we intentionally don't wait here as we rely on concurrency to catch a failure assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); }
From source file:org.ballerinalang.test.packaging.PackagingNegativeTestCase.java
/** * Override content in file./* w w w .j a v a 2 s. co m*/ * * @param filePath path of file to override * @param content content to be written * @throws IOException */ private void writeToFile(Path filePath, String content) throws IOException { Files.write(filePath, content.getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); }
From source file:org.schedulesdirect.grabber.Grabber.java
private void updateZip(NetworkEpgClient clnt) throws IOException, JSONException, JsonParseException { Set<String> completedListings = new HashSet<String>(); LOG.debug(String.format("Using %d worker threads", globalOpts.getMaxThreads())); pool = createThreadPoolExecutor();/*from ww w .ja va 2 s. co m*/ start = System.currentTimeMillis(); File dest = grabOpts.getTarget(); cachedSeriesIds = new HashSet<String>(); boolean rmDest = false; if (dest.exists()) { ZipEpgClient zipClnt = null; try { zipClnt = new ZipEpgClient(dest); if (!zipClnt.getUserStatus().getLastServerRefresh() .before(clnt.getUserStatus().getLastServerRefresh())) { LOG.info( "Current cache file contains latest data from Schedules Direct server; use --force-download to force a new download from server."); boolean force = grabOpts.isForce(); if (!force) return; else LOG.warn("Forcing an update of data with the server due to user request!"); } } catch (Exception e) { if (grabOpts.isKeep()) { LOG.error("Existing cache is invalid, keeping by user request!", e); return; } else { LOG.warn("Existing cache is invalid, deleting it; use --keep-bad-cache to keep existing cache!", e); rmDest = true; } } finally { if (zipClnt != null) try { zipClnt.close(); } catch (IOException e) { } if (rmDest && !dest.delete()) throw new IOException("Unable to delete " + dest); } } freshZip = !dest.exists(); try (FileSystem vfs = FileSystems.newFileSystem(new URI(String.format("jar:%s", dest.toURI())), Collections.singletonMap("create", "true"))) { if (freshZip) { Path target = vfs.getPath(ZipEpgClient.ZIP_VER_FILE); Files.write(target, Integer.toString(ZipEpgClient.ZIP_VER).getBytes(ZipEpgClient.ZIP_CHARSET)); } ProgramCache progCache = ProgramCache.get(vfs); Path lineups = vfs.getPath("lineups.txt"); Files.deleteIfExists(lineups); Path scheds = vfs.getPath("/schedules/"); if (!Files.isDirectory(scheds)) Files.createDirectory(scheds); Path maps = vfs.getPath("/maps/"); PathUtils.removeDirectory(maps); Files.createDirectory(maps); Path progs = vfs.getPath("/programs/"); if (!Files.isDirectory(progs)) Files.createDirectory(progs); Path logos = vfs.getPath("/logos/"); if (!Files.isDirectory(logos)) Files.createDirectory(logos); Path md5s = vfs.getPath("/md5s/"); if (!Files.isDirectory(md5s)) Files.createDirectory(md5s); Path cache = vfs.getPath(LOGO_CACHE); if (Files.exists(cache)) { String cacheData = new String(Files.readAllBytes(cache), ZipEpgClient.ZIP_CHARSET); logoCache = Config.get().getObjectMapper().readValue(cacheData, JSONObject.class); } else logoCache = new JSONObject(); Path seriesInfo = vfs.getPath("/seriesInfo/"); if (!Files.isDirectory(seriesInfo)) Files.createDirectories(seriesInfo); loadSeriesInfoIds(seriesInfo); missingSeriesIds = Collections.synchronizedSet(new HashSet<String>()); loadRetryIds(vfs.getPath(SERIES_INFO_DATA)); JSONObject resp = Config.get().getObjectMapper().readValue( factory.get(DefaultJsonRequest.Action.GET, RestNouns.LINEUPS, clnt.getHash(), clnt.getUserAgent(), globalOpts.getUrl().toString()).submitForJson(null), JSONObject.class); if (!JsonResponseUtils.isErrorResponse(resp)) Files.write(lineups, resp.toString(3).getBytes(ZipEpgClient.ZIP_CHARSET)); else LOG.error("Received error response when requesting lineup data!"); for (Lineup l : clnt.getLineups()) { buildStationList(); JSONObject o = Config.get().getObjectMapper() .readValue( factory.get(DefaultJsonRequest.Action.GET, l.getUri(), clnt.getHash(), clnt.getUserAgent(), globalOpts.getUrl().toString()).submitForJson(null), JSONObject.class); Files.write(vfs.getPath("/maps", ZipEpgClient.scrubFileName(String.format("%s.txt", l.getId()))), o.toString(3).getBytes(ZipEpgClient.ZIP_CHARSET)); JSONArray stations = o.getJSONArray("stations"); JSONArray ids = new JSONArray(); for (int i = 0; i < stations.length(); ++i) { JSONObject obj = stations.getJSONObject(i); String sid = obj.getString("stationID"); if (stationList != null && !stationList.contains(sid)) LOG.debug(String.format("Skipped %s; not listed in station file", sid)); else if (completedListings.add(sid)) { ids.put(sid); if (!grabOpts.isNoLogos()) { if (logoCacheInvalid(obj)) pool.execute(new LogoTask(obj, vfs, logoCache)); else if (LOG.isDebugEnabled()) LOG.debug(String.format("Skipped logo for %s; already cached!", obj.optString("callsign", null))); } else if (!logosWarned) { logosWarned = true; LOG.warn("Logo downloads disabled by user request!"); } } else LOG.debug(String.format("Skipped %s; already downloaded.", sid)); //pool.setMaximumPoolSize(5); // Processing these new schedules takes all kinds of memory! if (ids.length() == grabOpts.getMaxSchedChunk()) { pool.execute(new ScheduleTask(ids, vfs, clnt, progCache, factory)); ids = new JSONArray(); } } if (ids.length() > 0) pool.execute(new ScheduleTask(ids, vfs, clnt, progCache, factory)); } pool.shutdown(); try { LOG.debug("Waiting for SchedLogoExecutor to terminate..."); if (pool.awaitTermination(15, TimeUnit.MINUTES)) LOG.debug("SchedLogoExecutor: Terminated successfully."); else { failedTask = true; LOG.warn( "SchedLogoExecutor: Termination timed out; some tasks probably didn't finish properly!"); } } catch (InterruptedException e) { failedTask = true; LOG.warn( "SchedLogoExecutor: Termination interrupted); some tasks probably didn't finish properly!"); } Files.write(cache, logoCache.toString(3).getBytes(ZipEpgClient.ZIP_CHARSET), StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE, StandardOpenOption.CREATE); ScheduleTask.commit(vfs); pool = createThreadPoolExecutor(); //pool.setMaximumPoolSize(5); // Again, we've got memory problems String[] dirtyPrograms = progCache.getDirtyIds(); progCache.markAllClean(); progCache = null; LOG.info(String.format("Identified %d program ids requiring an update!", dirtyPrograms.length)); Collection<String> progIds = new ArrayList<String>(); for (String progId : dirtyPrograms) { progIds.add(progId); if (progIds.size() == grabOpts.getMaxProgChunk()) { pool.execute(new ProgramTask(progIds, vfs, clnt, factory, missingSeriesIds, "programs", null, false)); progIds.clear(); } } if (progIds.size() > 0) pool.execute( new ProgramTask(progIds, vfs, clnt, factory, missingSeriesIds, "programs", null, false)); pool.shutdown(); try { LOG.debug("Waiting for ProgramExecutor to terminate..."); if (pool.awaitTermination(15, TimeUnit.MINUTES)) { LOG.debug("ProgramExecutor: Terminated successfully."); Iterator<String> itr = missingSeriesIds.iterator(); while (itr.hasNext()) { String id = itr.next(); if (cachedSeriesIds.contains(id)) itr.remove(); } if (missingSeriesIds.size() > 0) { LOG.info(String.format("Grabbing %d series info programs!", missingSeriesIds.size())); Set<String> retrySet = new HashSet<>(); try { new ProgramTask(missingSeriesIds, vfs, clnt, factory, missingSeriesIds, "seriesInfo", retrySet, true).run(); } catch (RuntimeException e) { LOG.error("SeriesInfo task failed!", e); Grabber.failedTask = true; } Path seriesInfoData = vfs.getPath(SERIES_INFO_DATA); if (retrySet.size() > 0) { StringBuilder sb = new StringBuilder(); for (String id : retrySet) sb.append(id + "\n"); Files.write(seriesInfoData, sb.toString().getBytes(ZipEpgClient.ZIP_CHARSET), StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE); } else if (Files.exists(seriesInfoData)) Files.delete(seriesInfoData); } } else { failedTask = true; LOG.warn("ProgramExecutor: Termination timed out; some tasks probably didn't finish properly!"); } } catch (InterruptedException e) { failedTask = true; LOG.warn("ProgramExecutor: Termination interrupted); some tasks probably didn't finish properly!"); } String userData = clnt.getUserStatus().toJson(); if (failedTask) { LOG.error("One or more tasks failed! Resetting last data refresh timestamp to zero."); SimpleDateFormat fmt = Config.get().getDateTimeFormat(); String exp = fmt.format(new Date(0L)); JSONObject o = Config.get().getObjectMapper().readValue(userData, JSONObject.class); o.put("lastDataUpdate", exp); userData = o.toString(2); } Path p = vfs.getPath(USER_DATA); Files.write(p, userData.getBytes(ZipEpgClient.ZIP_CHARSET), StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE); removeIgnoredStations(vfs); } catch (URISyntaxException e1) { throw new RuntimeException(e1); } finally { Runtime rt = Runtime.getRuntime(); LOG.info(String.format("MemStats:%n\tFREE: %s%n\tUSED: %s%n\t MAX: %s", FileUtils.byteCountToDisplaySize(rt.freeMemory()), FileUtils.byteCountToDisplaySize(rt.totalMemory()), FileUtils.byteCountToDisplaySize(rt.maxMemory()))); } }
From source file:com.spectralogic.ds3client.integration.Smoke_Test.java
@Test public void testRecoverReadJob() throws IOException, XmlProcessingException, JobRecoveryException, URISyntaxException { final String bucketName = "test_recover_read_job_bucket"; final String book1 = "beowulf.txt"; final String book2 = "ulysses.txt"; final Path objPath1 = ResourceUtils.loadFileResource(RESOURCE_BASE_NAME + book1); final Path objPath2 = ResourceUtils.loadFileResource(RESOURCE_BASE_NAME + book2); final Ds3Object obj1 = new Ds3Object(book1, Files.size(objPath1)); final Ds3Object obj2 = new Ds3Object(book2, Files.size(objPath2)); final Path dirPath = FileSystems.getDefault().getPath("output"); if (!Files.exists(dirPath)) { Files.createDirectory(dirPath); }/*from ww w . j a v a 2s .c om*/ try { HELPERS.ensureBucketExists(bucketName, envDataPolicyId); final Ds3ClientHelpers.Job putJob = HELPERS.startWriteJob(bucketName, Lists.newArrayList(obj1, obj2)); putJob.transfer(new ResourceObjectPutter(RESOURCE_BASE_NAME)); final FileChannel channel1 = FileChannel.open(dirPath.resolve(book1), StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); final Ds3ClientHelpers.Job readJob = HELPERS.startReadJob(bucketName, Lists.newArrayList(obj1, obj2)); final GetObjectResponse readResponse1 = client .getObject(new GetObjectRequest(bucketName, book1, channel1, readJob.getJobId().toString(), 0)); assertThat(readResponse1, is(notNullValue())); assertThat(readResponse1.getStatusCode(), is(equalTo(200))); // Interruption... final Ds3ClientHelpers.Job recoverJob = HELPERS.recoverReadJob(readJob.getJobId()); final FileChannel channel2 = FileChannel.open(dirPath.resolve(book2), StandardOpenOption.WRITE, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING); final GetObjectResponse readResponse2 = client.getObject( new GetObjectRequest(bucketName, book2, channel2, recoverJob.getJobId().toString(), 0)); assertThat(readResponse2, is(notNullValue())); assertThat(readResponse2.getStatusCode(), is(equalTo(200))); } finally { deleteAllContents(client, bucketName); for (final Path tempFile : Files.newDirectoryStream(dirPath)) { Files.delete(tempFile); } Files.delete(dirPath); } }