Example usage for java.nio.file StandardOpenOption WRITE

List of usage examples for java.nio.file StandardOpenOption WRITE

Introduction

In this page you can find the example usage for java.nio.file StandardOpenOption WRITE.

Prototype

StandardOpenOption WRITE

To view the source code for java.nio.file StandardOpenOption WRITE.

Click Source Link

Document

Open for write access.

Usage

From source file:faescapeplan.FAEscapePlanUI.java

@SuppressWarnings("unchecked")
private void downloadJournals(ArrayList<String> journalList) {
    JSONArray jsonList = new JSONArray();
    String downloadLoc = this.saveLocText.getText();
    Path jsonPath = Paths.get(downloadLoc + "\\" + userData.getName() + "\\journals\\journals.json");

    try {// www.j a  v a  2  s . com
        Files.deleteIfExists(jsonPath);
        Files.createFile(jsonPath);
    } catch (IOException ex) {
        Logger.getLogger(FAEscapePlanUI.class.getName()).log(Level.SEVERE, null, ex);
        JOptionPane.showMessageDialog(this, "A critical IO exception occurred in method: downloadJournals");
    }

    for (String item : journalList) {
        try {
            Map<String, String> jsonMap = new LinkedHashMap<>();
            Document doc = Jsoup.connect("http://www.furaffinity.net/journal/" + item + "/")
                    .cookies(userData.getCookies()).userAgent(USER_AGENT).get();
            String title = doc.title().split(" -- ")[0];
            String date = doc.getElementsByClass("popup_date").get(0).attr("title");
            String body = doc.getElementsByClass("journal-body").get(0).html();
            jsonMap.put("title", title);
            jsonMap.put("date", date);
            jsonMap.put("body", body);
            jsonList.add(jsonMap);
            Path journalPath = Paths.get(downloadLoc,
                    "\\" + userData.getName() + "\\journals\\" + item + "_" + title + ".txt");
            String bodyParsed = removeHtmlTags(body);

            try (FileWriter journalWriter = new FileWriter(new File(journalPath.toString()))) {
                journalWriter.append(title + System.getProperty("line.separator"));
                journalWriter.append(date + System.getProperty("line.separator"));
                journalWriter.append(bodyParsed + System.getProperty("line.separator"));
            }
        } catch (FileAlreadyExistsException ex) {
            Logger.getLogger(FAEscapePlanUI.class.getName()).log(Level.SEVERE, null, ex);
            updateTextLog("File already exists");
        } catch (IOException ex) {
            Logger.getLogger(FAEscapePlanUI.class.getName()).log(Level.SEVERE, null, ex);
            updateTextLog("An IO Exception occurred while downloading journal: " + item);
        }
    }

    String jsonString = JSONValue.toJSONString(jsonList);

    try {
        Files.write(jsonPath, Arrays.asList(jsonString), StandardOpenOption.WRITE);
    } catch (IOException ex) {
        Logger.getLogger(FAEscapePlanUI.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:org.eclipse.winery.generators.ia.Generator.java

/**
 * Iterates recursively through all the files in the project working
 * directory and tries to replace the global placeholders.
 * //from ww w. j  a v a  2  s  . c o m
 * @param folderOrFile to start with
 */
private void updateFilesRecursively(File folderOrFile) {
    if (folderOrFile.isFile()) {

        if (folderOrFile.getAbsolutePath().endsWith(".jar")) {
            return;
        }

        Generator.logger.trace("Updating file " + folderOrFile);

        try {
            // Read file and replace placeholders
            Charset cs = Charset.defaultCharset();
            List<String> lines = new ArrayList<>();
            for (String line : Files.readAllLines(folderOrFile.toPath(), cs)) {
                line = line.replaceAll(Generator.PLACEHOLDER_CLASS_NAME, this.name);
                line = line.replaceAll(Generator.PLACEHOLDER_JAVA_PACKAGE, this.javaPackage);
                line = line.replaceAll(Generator.PLACEHOLDER_NAMESPACE, this.namespace);
                line = line.replaceAll(Generator.PLACEHOLDER_IA_ARTIFACT_TEMPLATE_UPLOAD_URL,
                        this.iaArtifactTemplateUploadUrl.toString());
                lines.add(line);
            }

            // Write file
            OpenOption[] options = new OpenOption[] { StandardOpenOption.WRITE,
                    StandardOpenOption.TRUNCATE_EXISTING };
            Files.write(folderOrFile.toPath(), lines, cs, options);

        } catch (IOException e) {
            e.printStackTrace();
        }

    } else {
        Generator.logger.trace("Updating folder " + folderOrFile);
        for (File childFile : folderOrFile.listFiles()) {
            this.updateFilesRecursively(childFile);
        }
    }
}

From source file:codes.thischwa.c5c.impl.LocalConnector.java

@Override
public void saveFile(String backendPath, String content) throws C5CException {
    Path file = buildRealPath(backendPath);
    OutputStream out = null;//from   w  w w. j a v  a 2s  . c o m
    try {
        out = Files.newOutputStream(file, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
        IOUtils.write(content, out, PropertiesLoader.getDefaultEncoding());
    } catch (IOException e) {
        logger.warn("Error while saving content of {}", backendPath);
        throw new C5CException(FilemanagerAction.SAVEFILE, e.getMessage());
    } finally {
        IOUtils.closeQuietly(out);
    }
}

From source file:org.opencb.cellbase.app.transform.VariationParser.java

private Path addVariationIdToTranscriptVariationFile(Map<Integer, Integer> variationFeatureToVariationId)
        throws IOException {
    Path transcriptVariationTempFile = variationDirectoryPath.resolve(TRANSCRIPT_VARIATION_FILENAME + ".tmp");
    this.logger.info("Adding variation Id to transcript variations and saving them into "
            + transcriptVariationTempFile + " ...");
    Stopwatch stopwatch = Stopwatch.createStarted();

    Path unpreprocessedTranscriptVariationFile = variationDirectoryPath.resolve(TRANSCRIPT_VARIATION_FILENAME);
    BufferedReader br = getBufferedReader(unpreprocessedTranscriptVariationFile);
    BufferedWriter bw = Files.newBufferedWriter(transcriptVariationTempFile, Charset.defaultCharset(),
            StandardOpenOption.CREATE, StandardOpenOption.WRITE);

    String line;/*w ww .jav  a2  s.c  o  m*/
    while ((line = br.readLine()) != null) {
        // TODO: add limit parameter would do that run faster?
        // TODO: use a precompiled pattern would improve efficiency
        Integer variationFeatureId = Integer.valueOf(line.split("\t")[1]);
        Integer variationId = variationFeatureToVariationId.get(variationFeatureId);
        bw.write(line + "\t" + variationId + "\n");
    }

    br.close();
    bw.close();

    this.logger.info("Added");
    this.logger.debug("Elapsed time adding variation Id to transcript variation file: " + stopwatch);

    return transcriptVariationTempFile;
}

From source file:org.hyperledger.fabric.sdk.testutils.TestConfig.java

/**
 * Returns the appropriate Network Config YAML file based on whether TLS is currently
 * enabled or not/*from w  ww. j a  v  a 2  s  .  co m*/
 *
 * @return The appropriate Network Config YAML file
 */
public File getTestNetworkConfigFileYAML() {
    String fname = runningTLS ? "network-config-tls.yaml" : "network-config.yaml";
    String pname = "src/test/fixture/sdkintegration/network_configs/";
    File ret = new File(pname, fname);

    if (!"localhost".equals(LOCALHOST) || isFabricVersionAtOrAfter("1.3")) {
        // change on the fly ...
        File temp = null;

        try {
            //create a temp file
            temp = File.createTempFile(fname, "-FixedUp.yaml");

            if (temp.exists()) { //For testing start fresh
                temp.delete();
            }

            byte[] data = Files.readAllBytes(Paths.get(ret.getAbsolutePath()));

            String sourceText = new String(data, StandardCharsets.UTF_8);

            sourceText = sourceText.replaceAll("https://localhost", "https://" + LOCALHOST);
            sourceText = sourceText.replaceAll("http://localhost", "http://" + LOCALHOST);
            sourceText = sourceText.replaceAll("grpcs://localhost", "grpcs://" + LOCALHOST);
            sourceText = sourceText.replaceAll("grpc://localhost", "grpc://" + LOCALHOST);

            if (isFabricVersionAtOrAfter("1.3")) {
                //eventUrl: grpc://localhost:8053
                sourceText = sourceText.replaceAll("(?m)^[ \\t]*eventUrl:", "# eventUrl:");
            }

            Files.write(Paths.get(temp.getAbsolutePath()), sourceText.getBytes(StandardCharsets.UTF_8),
                    StandardOpenOption.CREATE_NEW, StandardOpenOption.TRUNCATE_EXISTING,
                    StandardOpenOption.WRITE);

            if (!Objects.equals("true", System.getenv(ORG_HYPERLEDGER_FABRIC_SDK_TEST_FABRIC_HOST + "_KEEP"))) {
                temp.deleteOnExit();
            } else {
                System.err.println("produced new network-config.yaml file at:" + temp.getAbsolutePath());
            }

        } catch (Exception e) {
            throw new RuntimeException(e);
        }

        ret = temp;
    }

    return ret;
}

From source file:com.jivesoftware.os.upena.deployable.UpenaMain.java

public void run(String[] args) throws Exception {

    HealthFactory.initialize(BindInterfaceToConfiguration::bindDefault, new HealthCheckRegistry() {

        @Override/*from  ww w .  ja  v  a2 s.  c  o m*/
        public void register(HealthChecker<?> healthChecker) {

        }

        @Override
        public void unregister(HealthChecker<?> healthChecker) {

        }
    });

    Properties buildProperties = new Properties();
    String upenaVersion = "";
    try {
        buildProperties.load(UpenaMain.class.getClassLoader().getResourceAsStream("build.properties"));
        upenaVersion = buildProperties.getProperty("my.version", "") + " "
                + buildProperties.getProperty("my.timestamp", "") + " sha:"
                + buildProperties.getProperty("git.commit.id", "");
    } catch (Exception x) {
        LOG.warn("Failed to locate build.properties");
    }

    String workingDir = System.getProperty("user.dir");
    long start = System.currentTimeMillis();
    Exception failed = null;
    while (start + TimeUnit.SECONDS.toMillis(10) > System.currentTimeMillis()) {
        try {
            File lockFile = new File(workingDir, "onlyLetOneRunningAtATime");
            lockFile.createNewFile();
            FileChannel.open(lockFile.toPath(), StandardOpenOption.WRITE).lock();
            failed = null;
            break;
        } catch (Exception x) {
            failed = x;
            LOG.warn("Failed to acquire lock on onlyLetOneRunningAtATime", x);
            Thread.sleep(1000);
        }
    }
    if (failed != null) {
        throw failed;
    }

    JDIAPI jvmapi = null;
    try {
        jvmapi = new JDIAPI();
    } catch (NoClassDefFoundError x) {
        LOG.warn(
                "Failed to local tools.jar. Please manually add to classpath. Breakpoint debugger will be disabled.");
    }

    String hostname = args[0];

    int loopbackPort = Integer.parseInt(System.getProperty("amza.loopback.port", "1174"));
    int port = Integer.parseInt(System.getProperty("amza.port", "1175"));
    String multicastGroup = System.getProperty("amza.discovery.group", "225.4.5.6");
    int multicastPort = Integer.parseInt(System.getProperty("amza.discovery.port", "1123"));
    String clusterDiscoveryName = (args.length > 1 ? args[1] : null);

    String datacenter = System.getProperty("host.datacenter", "unknownDatacenter");
    String rack = System.getProperty("host.rack", "unknownRack");
    String publicHost = System.getProperty("public.host.name", hostname);

    UpenaRingHost ringHost = new UpenaRingHost(hostname, port); // TODO include rackId

    // todo need a better way to create writer id.
    int writerId = new Random().nextInt(512);
    TimestampedOrderIdProvider orderIdProvider = new OrderIdProviderImpl(
            new ConstantWriterIdProvider(writerId));

    ObjectMapper mapper = new ObjectMapper();
    mapper.configure(SerializationFeature.INDENT_OUTPUT, true);
    mapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
    mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);

    RowsStorageProvider rowsStorageProvider = rowsStorageProvider(orderIdProvider);

    boolean sslEnable = Boolean.parseBoolean(System.getProperty("ssl.enabled", "true"));
    String sslKeystorePassword = System.getProperty("ssl.keystore.password", "password");
    String sslKeystorePath = System.getProperty("ssl.keystore.path", "./certs/sslKeystore");
    String sslKeyStoreAlias = System.getProperty("ssl.keystore.alias", "upenanode").toLowerCase();
    boolean sslAutoGenerateSelfSignedCert = Boolean
            .parseBoolean(System.getProperty("ssl.keystore.autoGenerate", "true"));

    File sslKeystore = new File(sslKeystorePath);
    if (sslEnable) {
        SelfSigningCertGenerator selfSigningCertGenerator = new SelfSigningCertGenerator();
        if (sslKeystore.exists()) {
            if (!selfSigningCertGenerator.validate(sslKeyStoreAlias, sslKeystorePassword, sslKeystore)) {
                LOG.error("SSL keystore validation failed. keyStoreAlias:{} sslKeystore:{}", sslKeyStoreAlias,
                        sslKeystore);
                System.exit(1);
            }
        } else {
            sslKeystore.getParentFile().mkdirs();
            if (sslAutoGenerateSelfSignedCert) {
                selfSigningCertGenerator.create(sslKeyStoreAlias, sslKeystorePassword, sslKeystore);
            } else {
                LOG.error("Failed to locate mandatory sslKeystore:{}", sslKeystore);
                System.exit(1);
            }
        }
    }

    String consumerKey = System.getProperty("upena.consumerKey", clusterDiscoveryName);
    if (consumerKey == null) {
        consumerKey = "upena";
        LOG.warn("Please provide a stronger consumerKey via -Dupena.consumerKey");
    }
    String finalConsumerKey = consumerKey;

    String secret = System.getProperty("upena.secret");
    if (secret == null) {
        secret = "secret";
        LOG.warn("Please provide a stronger secret via -Dupena.secret");
    }
    String finalSecret = secret;

    OAuthSigner authSigner = (request) -> {
        CommonsHttpOAuthConsumer oAuthConsumer = new CommonsHttpOAuthConsumer(finalConsumerKey, finalSecret);
        oAuthConsumer.setMessageSigner(new HmacSha1MessageSigner());
        oAuthConsumer.setTokenWithSecret(finalConsumerKey, finalSecret);
        return oAuthConsumer.sign(request);
    };
    UpenaSSLConfig upenaSSLConfig = new UpenaSSLConfig(sslEnable, sslAutoGenerateSelfSignedCert, authSigner);

    UpenaAmzaService upenaAmzaService = null;
    if (!new File("./state").exists()) {
        UpdatesSender changeSetSender = new HttpUpdatesSender(sslEnable, sslAutoGenerateSelfSignedCert,
                authSigner);
        UpdatesTaker tableTaker = new HttpUpdatesTaker(sslEnable, sslAutoGenerateSelfSignedCert, authSigner);

        UpenaAmzaServiceConfig upenaAmzaServiceConfig = new UpenaAmzaServiceConfig();
        upenaAmzaService = new UpenaAmzaServiceInitializer().initialize(upenaAmzaServiceConfig, orderIdProvider,
                new com.jivesoftware.os.upena.amza.storage.FstMarshaller(
                        FSTConfiguration.getDefaultConfiguration()),
                rowsStorageProvider, rowsStorageProvider, rowsStorageProvider, changeSetSender, tableTaker,
                Optional.<SendFailureListener>absent(), Optional.<UpenaTakeFailureListener>absent(),
                (changes) -> {
                });

        /*upenaAmzaService.start(ringHost, upenaAmzaServiceConfig.resendReplicasIntervalInMillis,
            upenaAmzaServiceConfig.applyReplicasIntervalInMillis,
            upenaAmzaServiceConfig.takeFromNeighborsIntervalInMillis,
            upenaAmzaServiceConfig.checkIfCompactionIsNeededIntervalInMillis,
            upenaAmzaServiceConfig.compactTombstoneIfOlderThanNMillis);*/

        LOG.info("-----------------------------------------------------------------------");
        LOG.info("|      OLD Amza Service Online");
        LOG.info("-----------------------------------------------------------------------");
    } else {
        LOG.info("-----------------------------------------------------------------------");
        LOG.info("|      OLD Amza Data is decomissionable");
        LOG.info("-----------------------------------------------------------------------");
    }

    BAInterner baInterner = new BAInterner();

    AtomicReference<Callable<RingTopology>> topologyProvider = new AtomicReference<>(); // bit of a hack
    InstanceDescriptor instanceDescriptor = new InstanceDescriptor(datacenter, rack, "", "", "", "", "", "", "",
            "", 0, "", "", "", 0L, true);
    ConnectionDescriptorsProvider noAuthConnectionsProvider = (connectionDescriptorsRequest,
            expectedReleaseGroup) -> {
        try {
            RingTopology systemRing = topologyProvider.get().call();
            List<ConnectionDescriptor> descriptors = Lists.newArrayList(Iterables.transform(systemRing.entries,
                    input -> new ConnectionDescriptor(instanceDescriptor, sslEnable, false,
                            new HostPort(input.ringHost.getHost(), input.ringHost.getPort()),
                            Collections.emptyMap(), Collections.emptyMap())));
            return new ConnectionDescriptorsResponse(200, Collections.emptyList(), "", descriptors,
                    connectionDescriptorsRequest.getRequestUuid());
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    };

    TenantsServiceConnectionDescriptorProvider<String> noAuthConnectionPoolProvider = new TenantsServiceConnectionDescriptorProvider<>(
            Executors.newScheduledThreadPool(1), "", noAuthConnectionsProvider, "", "", 10_000); // TODO config
    noAuthConnectionPoolProvider.start();

    ConnectionDescriptorsProvider connectionsProvider = (connectionDescriptorsRequest,
            expectedReleaseGroup) -> {
        try {
            RingTopology systemRing = topologyProvider.get().call();
            List<ConnectionDescriptor> descriptors = Lists.newArrayList(Iterables.transform(systemRing.entries,
                    input -> new ConnectionDescriptor(instanceDescriptor, sslEnable, true,
                            new HostPort(input.ringHost.getHost(), input.ringHost.getPort()),
                            Collections.emptyMap(), Collections.emptyMap())));
            return new ConnectionDescriptorsResponse(200, Collections.emptyList(), "", descriptors,
                    connectionDescriptorsRequest.getRequestUuid());
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    };

    TenantsServiceConnectionDescriptorProvider<String> connectionPoolProvider = new TenantsServiceConnectionDescriptorProvider<>(
            Executors.newScheduledThreadPool(1), "", connectionsProvider, "", "", 10_000); // TODO config
    connectionPoolProvider.start();

    HttpDeliveryClientHealthProvider clientHealthProvider = new HttpDeliveryClientHealthProvider("", null, "",
            5000, 100);

    TenantRoutingHttpClientInitializer<String> nonSigningClientInitializer = new TenantRoutingHttpClientInitializer<>(
            null);

    TenantAwareHttpClient<String> systemTakeClient = nonSigningClientInitializer
            .builder(noAuthConnectionPoolProvider, // TODO config
                    clientHealthProvider)
            .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).maxConnections(1_000)
            .socketTimeoutInMillis(60_000).build(); // TODO expose to conf
    TenantAwareHttpClient<String> stripedTakeClient = nonSigningClientInitializer
            .builder(noAuthConnectionPoolProvider, // TODO config
                    clientHealthProvider)
            .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).maxConnections(1_000)
            .socketTimeoutInMillis(60_000).build(); // TODO expose to conf

    TenantRoutingHttpClientInitializer<String> tenantRoutingHttpClientInitializer = new TenantRoutingHttpClientInitializer<>(
            new OAuthSignerProvider(() -> authSigner));

    TenantAwareHttpClient<String> ringClient = tenantRoutingHttpClientInitializer
            .builder(connectionPoolProvider, // TODO config
                    clientHealthProvider)
            .deadAfterNErrors(10).checkDeadEveryNMillis(10_000).maxConnections(1_000)
            .socketTimeoutInMillis(60_000).build(); // TODO expose to conf
    AmzaStats amzaStats = new AmzaStats();

    AmzaService amzaService = startAmza(workingDir, amzaStats, baInterner, writerId,
            new RingHost(datacenter, rack, ringHost.getHost(), ringHost.getPort()),
            new RingMember(ringHost.getHost() + ":" + ringHost.getPort()), authSigner, systemTakeClient,
            stripedTakeClient, ringClient, topologyProvider, clusterDiscoveryName, multicastGroup,
            multicastPort);

    EmbeddedClientProvider embeddedClientProvider = new EmbeddedClientProvider(amzaService);

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|      Amza Service Online");
    LOG.info("-----------------------------------------------------------------------");

    ObjectMapper storeMapper = new ObjectMapper();
    mapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
    mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
    UpenaConfigStore upenaConfigStore = new UpenaConfigStore(orderIdProvider, storeMapper, upenaAmzaService,
            amzaService, embeddedClientProvider);

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|      Upena Config Store Online");
    LOG.info("-----------------------------------------------------------------------");

    ExecutorService instanceChangedThreads = Executors.newFixedThreadPool(32);

    AtomicReference<UbaService> ubaServiceReference = new AtomicReference<>();
    UpenaStore upenaStore = new UpenaStore(storeMapper, upenaAmzaService, (instanceChanges) -> {
        instanceChangedThreads.submit(() -> {
            UbaService got = ubaServiceReference.get();
            if (got != null) {
                try {
                    got.instanceChanged(instanceChanges);
                } catch (Exception ex) {
                    ex.printStackTrace();
                }
            }
        });
    }, (changes) -> {
    }, (change) -> {
        LOG.info("TODO: tie into conductor. " + change);
    }, amzaService, embeddedClientProvider);
    //upenaStore.attachWatchers();

    ChaosService chaosService = new ChaosService(upenaStore);
    SecureRandom random = new SecureRandom();
    PasswordStore passwordStore = (key) -> {
        String password = System.getProperty("sauth.keystore.password");
        if (password == null) {
            File passwordFile = new File(workingDir, "keystore/" + key + ".key");
            if (passwordFile.exists()) {
                password = Files.toString(passwordFile, StandardCharsets.UTF_8);
            } else {
                passwordFile.getParentFile().mkdirs();
                password = new BigInteger(130, random).toString(32);
                Files.write(password, passwordFile, StandardCharsets.UTF_8);
            }
        }
        return password;
    };

    SessionStore sessionStore = new SessionStore(TimeUnit.MINUTES.toMillis(60), TimeUnit.MINUTES.toMillis(30));

    AtomicReference<UpenaHealth> upenaHealthProvider = new AtomicReference<>();
    InstanceHealthly instanceHealthly = (key, version) -> {
        UpenaHealth upenaHealth = upenaHealthProvider.get();
        if (upenaHealth == null) {
            return false;
        }
        ConcurrentMap<RingHost, NodeHealth> ringHostNodeHealth = upenaHealth.buildClusterHealth();
        for (NodeHealth nodeHealth : ringHostNodeHealth.values()) {
            for (NannyHealth nannyHealth : nodeHealth.nannyHealths) {
                if (nannyHealth.instanceDescriptor.instanceKey.equals(key.getKey())) {
                    return nannyHealth.serviceHealth.fullyOnline
                            ? nannyHealth.serviceHealth.version.equals(version)
                            : false;
                }
            }
        }
        return false;
    };
    UpenaService upenaService = new UpenaService(passwordStore, sessionStore, upenaStore, chaosService,
            instanceHealthly);

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|      Upena Service Online");
    LOG.info("-----------------------------------------------------------------------");

    File defaultPathToRepo = new File(new File(System.getProperty("user.dir"), ".m2"), "repository");
    PathToRepo localPathToRepo = new PathToRepo(
            new File(System.getProperty("pathToRepo", defaultPathToRepo.getAbsolutePath())));
    RepositoryProvider repositoryProvider = new RepositoryProvider(localPathToRepo);

    Host host = new Host(publicHost, datacenter, rack, ringHost.getHost(), ringHost.getPort(), workingDir, null,
            null);
    HostKey hostKey = new HostKeyProvider().getNodeKey(upenaStore.hosts, host);

    String hostInstanceId = System.getProperty("host.instance.id", hostKey.getKey());
    host = new Host(publicHost, datacenter, rack, ringHost.getHost(), ringHost.getPort(), workingDir,
            hostInstanceId, null);

    UbaLog ubaLog = (what, why, how) -> {
        try {
            upenaStore.record("Uba", what, System.currentTimeMillis(), why,
                    ringHost.getHost() + ":" + ringHost.getPort(), how);
        } catch (Exception x) {
            x.printStackTrace(); // Hmm lame
        }
    };

    OktaLog oktaLog = (who, what, why, how) -> {
        try {
            upenaStore.record("okta:" + who, what, System.currentTimeMillis(), why,
                    ringHost.getHost() + ":" + ringHost.getPort(), how);
        } catch (Exception x) {
            x.printStackTrace(); // Hmm lame
        }
    };

    OktaCredentialsMatcher.oktaLog = oktaLog;
    OktaRealm.oktaLog = oktaLog;

    UpenaClient upenaClient = new UpenaClient() {
        @Override
        public InstanceDescriptorsResponse instanceDescriptor(
                InstanceDescriptorsRequest instanceDescriptorsRequest) throws Exception {
            return upenaService.instanceDescriptors(instanceDescriptorsRequest);
        }

        @Override
        public void updateKeyPair(String instanceKey, String publicKey) throws Exception {
            Instance i = upenaStore.instances.get(new InstanceKey(instanceKey));
            if (i != null) {
                LOG.info("Updating publicKey for {}", instanceKey);
                upenaStore.instances.update(new InstanceKey(instanceKey),
                        new Instance(i.clusterKey, i.hostKey, i.serviceKey, i.releaseGroupKey, i.instanceId,
                                i.enabled, i.locked, publicKey, i.restartTimestampGMTMillis, i.ports));
            }
        }

    };

    final UbaService ubaService = new UbaServiceInitializer().initialize(passwordStore, upenaClient,
            repositoryProvider, hostKey.getKey(), workingDir,
            new UbaCoordinate(datacenter, rack, publicHost, host.hostName, "localhost", loopbackPort), null,
            ubaLog);

    UpenaHealth upenaHealth = new UpenaHealth(amzaService, upenaSSLConfig, upenaConfigStore, ubaService,
            new RingHost(datacenter, rack, ringHost.getHost(), ringHost.getPort()), hostKey);
    upenaHealthProvider.set(upenaHealth);

    DiscoveredRoutes discoveredRoutes = new DiscoveredRoutes();
    ShiroRequestHelper shiroRequestHelper = new ShiroRequestHelper(TimeUnit.DAYS.toMillis(1)); // TODO expose Sys prop?

    String shiroConfigLocation = System.getProperty("shiro.ini.location", "classpath:shiro.ini"); // classpath:oktashiro.ini

    UpenaJerseyEndpoints jerseyEndpoints = new UpenaJerseyEndpoints(shiroConfigLocation)
            .addInjectable(ShiroRequestHelper.class, shiroRequestHelper)
            .addEndpoint(UpenaClusterRestEndpoints.class).addEndpoint(UpenaHostRestEndpoints.class)
            .addEndpoint(UpenaServiceRestEndpoints.class).addEndpoint(UpenaReleaseRestEndpoints.class)
            .addEndpoint(UpenaInstanceRestEndpoints.class).addEndpoint(UpenaTenantRestEndpoints.class)
            .addInjectable(upenaHealth).addInjectable(upenaService).addInjectable(upenaStore)
            .addInjectable(upenaConfigStore).addInjectable(ubaService)
            //.addEndpoint(AmzaReplicationRestEndpoints.class)
            //.addInjectable(UpenaAmzaInstance.class, upenaAmzaService)
            .addEndpoint(UpenaEndpoints.class).addEndpoint(UpenaConnectivityEndpoints.class)
            .addEndpoint(UpenaManagedDeployableEndpoints.class).addEndpoint(UpenaHealthEndpoints.class)
            .addEndpoint(UpenaRepoEndpoints.class).addInjectable(DiscoveredRoutes.class, discoveredRoutes)
            .addInjectable(UpenaRingHost.class, ringHost).addInjectable(HostKey.class, hostKey)
            .addInjectable(UpenaAutoRelease.class, new UpenaAutoRelease(repositoryProvider, upenaStore))
            .addInjectable(PathToRepo.class, localPathToRepo);

    PercentileHealthCheckConfig phcc = bindDefault(PercentileHealthCheckConfig.class);
    PercentileHealthChecker authFilterHealthCheck = new PercentileHealthChecker(phcc);
    AuthValidationFilter authValidationFilter = new AuthValidationFilter(authFilterHealthCheck);
    authValidationFilter.addEvaluator(new NoAuthEvaluator(), "/", "/swagger.json", "/ui/*", // Handled by Shiro
            "/repo/*" // Cough
    );

    OAuth1Signature verifier = new OAuth1Signature(new OAuthServiceLocatorShim());
    OAuthSecretManager oAuthSecretManager = new OAuthSecretManager() {
        @Override
        public void clearCache() {
        }

        @Override
        public String getSecret(String id) throws AuthValidationException {
            return id.equals(finalConsumerKey) ? finalSecret : null;
        }

        @Override
        public void verifyLastSecretRemovalTime() throws Exception {
        }
    };
    AuthValidator<OAuth1Signature, OAuth1Request> oAuthValidator = new DefaultOAuthValidator(
            Executors.newScheduledThreadPool(1), Long.MAX_VALUE, oAuthSecretManager, 60_000, false, false);
    oAuthValidator.start();
    authValidationFilter.addEvaluator(new NoAuthEvaluator(), "/repo/*", "/amza/rows/stream/*",
            "/amza/rows/taken/*", "/amza/pong/*", "/amza/invalidate/*");
    authValidationFilter.addEvaluator(new OAuthEvaluator(oAuthValidator, verifier), "/upena/*", "/amza/*");

    // TODO something better someday
    String upenaApiUsername = System.getProperty("upena.api.username", null);
    String upenaApiPassword = System.getProperty("upena.api.password", null);

    if (upenaApiUsername != null && upenaApiPassword != null) {
        authValidationFilter.addEvaluator(containerRequestContext -> {
            String authCredentials = containerRequestContext.getHeaderString("Authorization");
            if (authCredentials == null) {
                return AuthStatus.not_handled;
            }

            final String encodedUserPassword = authCredentials.replaceFirst("Basic" + " ", "");
            String usernameAndPassword = null;
            try {
                byte[] decodedBytes = Base64.getDecoder().decode(encodedUserPassword);
                usernameAndPassword = new String(decodedBytes, "UTF-8");
            } catch (IOException e) {
                return AuthStatus.denied;
            }
            final StringTokenizer tokenizer = new StringTokenizer(usernameAndPassword, ":");
            final String username = tokenizer.nextToken();
            final String password = tokenizer.nextToken();

            boolean authenticationStatus = upenaApiUsername.equals(username)
                    && upenaApiPassword.equals(password);

            return authenticationStatus ? AuthStatus.authorized : AuthStatus.denied;
        }, "/api/*");
    }

    jerseyEndpoints.addContainerRequestFilter(authValidationFilter);

    String region = System.getProperty("aws.region", null);
    String roleArn = System.getProperty("aws.roleArn", null);

    AWSClientFactory awsClientFactory = new AWSClientFactory(region, roleArn);

    String accountName = System.getProperty("account.name",
            clusterDiscoveryName == null ? "" : clusterDiscoveryName);
    String humanReadableUpenaClusterName = datacenter + " - " + accountName;
    injectUI(upenaVersion, awsClientFactory, storeMapper, mapper, jvmapi, amzaService, localPathToRepo,
            repositoryProvider, hostKey, ringHost, upenaSSLConfig, port, sessionStore, ubaService, upenaHealth,
            upenaStore, upenaConfigStore, jerseyEndpoints, humanReadableUpenaClusterName, discoveredRoutes);

    injectAmza(baInterner, amzaStats, jerseyEndpoints, amzaService, ringClient);

    InitializeRestfulServer initializeRestfulServer = new InitializeRestfulServer(false, port, "UpenaNode",
            sslEnable, sslKeyStoreAlias, sslKeystorePassword, sslKeystorePath, 128, 10_000);

    buildSwagger();
    initializeRestfulServer.addClasspathResource("/resources");
    initializeRestfulServer.addContextHandler("/", jerseyEndpoints);

    RestfulServer restfulServer = initializeRestfulServer.build();
    restfulServer.start();

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|      Jetty Service Online");
    LOG.info("-----------------------------------------------------------------------");

    UpenaJerseyEndpoints loopbackJerseyEndpoints = new UpenaJerseyEndpoints(null)
            .addEndpoint(UpenaLoopbackEndpoints.class).addEndpoint(UpenaConfigRestEndpoints.class)
            .addInjectable(SessionStore.class, sessionStore)
            .addInjectable(DiscoveredRoutes.class, discoveredRoutes).addInjectable(upenaConfigStore)
            .addInjectable(upenaStore).addInjectable(upenaHealth)
            .addInjectable(UpenaService.class, upenaService);

    InitializeRestfulServer initializeLoopbackRestfulServer = new InitializeRestfulServer(
            Boolean.parseBoolean(System.getProperty("amza.loopback.strict", "true")), loopbackPort, "UpenaNode",
            false, sslKeyStoreAlias, sslKeystorePassword, sslKeystorePath, 128, 10_000);
    initializeLoopbackRestfulServer.addClasspathResource("/resources");
    initializeLoopbackRestfulServer.addContextHandler("/", loopbackJerseyEndpoints);

    RestfulServer loopbackRestfulServer = initializeLoopbackRestfulServer.build();
    loopbackRestfulServer.start();

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|      Jetty Service Online");
    LOG.info("-----------------------------------------------------------------------");

    if (ubaService != null) {
        Executors.newScheduledThreadPool(1).scheduleWithFixedDelay(() -> {
            try {
                ubaService.nanny();
            } catch (Exception ex) {
                LOG.error("Nanny failure", ex);
            }
        }, 15, 15, TimeUnit.SECONDS);
        LOG.info("-----------------------------------------------------------------------");
        LOG.info("|      Uba Service Online");
        LOG.info("-----------------------------------------------------------------------");
    }
    ubaServiceReference.set(ubaService);

    /*String peers = System.getProperty("manual.peers");
    if (peers != null) {
    String[] hostPortTuples = peers.split(",");
    for (String hostPortTuple : hostPortTuples) {
        String hostPort = hostPortTuple.trim();
        if (hostPort.length() > 0 && hostPort.contains(":")) {
            String[] host_port = hostPort.split(":");
            try {
                UpenaRingHost anotherRingHost = new UpenaRingHost(host_port[0].trim(), Integer.parseInt(host_port[1].trim()));
                List<UpenaRingHost> ring = upenaAmzaService.getRing("master");
                if (!ring.contains(anotherRingHost)) {
                    LOG.info("Adding host to the cluster: " + anotherRingHost);
                    upenaAmzaService.addRingHost("master", anotherRingHost);
                }
            } catch (Exception x) {
                LOG.warn("Malformed hostPortTuple {}", hostPort);
            }
        } else {
            LOG.warn("Malformed hostPortTuple {}", hostPort);
        }
    }
    }*/

    String vpc = System.getProperty("aws.vpc", null);
    UpenaAWSLoadBalancerNanny upenaAWSLoadBalancerNanny = new UpenaAWSLoadBalancerNanny(vpc, upenaStore,
            hostKey, awsClientFactory);

    Executors.newScheduledThreadPool(1).scheduleWithFixedDelay(() -> {
        try {
            upenaAWSLoadBalancerNanny.ensureSelf();
        } catch (Exception x) {
            LOG.warn("Failures while nannying load loadbalancer.", x);
        }
    }, 1, 1, TimeUnit.MINUTES); // TODO better

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|     Waiting for amza to be ready....");
    LOG.info("-----------------------------------------------------------------------");
    while (!amzaService.isReady()) {
        Thread.sleep(1000);
    }

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|     Begin Migration");
    LOG.info("-----------------------------------------------------------------------");

    upenaStore.init(orderIdProvider, Integer.parseInt(System.getProperty("min.service.port", "10000")),
            Integer.parseInt(System.getProperty("max.service.port", String.valueOf(Short.MAX_VALUE))), false);

    upenaConfigStore.init();

    LOG.info("-----------------------------------------------------------------------");
    LOG.info("|     End Migration");
    LOG.info("-----------------------------------------------------------------------");

    addManualPeers(amzaService);

    Host gotHost = upenaStore.hosts.get(hostKey);
    if (gotHost == null || !gotHost.equals(host)) {
        upenaStore.hosts.update(hostKey, host);
    }
}

From source file:io.druid.segment.realtime.appenderator.AppenderatorImpl.java

private void lockBasePersistDirectory() {
    if (basePersistDirLock == null) {
        try {//from  w  ww .j  a v  a 2s.com
            basePersistDirLockChannel = FileChannel.open(computeLockFile().toPath(), StandardOpenOption.CREATE,
                    StandardOpenOption.WRITE);

            basePersistDirLock = basePersistDirLockChannel.tryLock();
            if (basePersistDirLock == null) {
                throw new ISE("Cannot acquire lock on basePersistDir: %s", computeLockFile());
            }
        } catch (IOException e) {
            throw Throwables.propagate(e);
        }
    }
}

From source file:edu.wustl.lookingglass.community.CommunityRepository.java

private void lockRepo() throws IOException {
    this.syncLockChannel = FileChannel.open(Paths.get(syncLockPath), StandardOpenOption.WRITE,
            StandardOpenOption.CREATE);
    FileLock lock = this.syncLockChannel.lock(); // gets an exclusive lock
    assert lock.isValid();
    this.syncLockChannel.write(ByteBuffer.wrap(ManagementFactory.getRuntimeMXBean().getName().getBytes()));
}

From source file:org.schedulesdirect.grabber.Grabber.java

private void updateZip(NetworkEpgClient clnt) throws IOException, JSONException, JsonParseException {
    Set<String> completedListings = new HashSet<String>();
    LOG.debug(String.format("Using %d worker threads", globalOpts.getMaxThreads()));
    pool = createThreadPoolExecutor();//from   w ww . java  2 s. c o m
    start = System.currentTimeMillis();
    File dest = grabOpts.getTarget();
    cachedSeriesIds = new HashSet<String>();
    boolean rmDest = false;
    if (dest.exists()) {
        ZipEpgClient zipClnt = null;
        try {
            zipClnt = new ZipEpgClient(dest);
            if (!zipClnt.getUserStatus().getLastServerRefresh()
                    .before(clnt.getUserStatus().getLastServerRefresh())) {
                LOG.info(
                        "Current cache file contains latest data from Schedules Direct server; use --force-download to force a new download from server.");
                boolean force = grabOpts.isForce();
                if (!force)
                    return;
                else
                    LOG.warn("Forcing an update of data with the server due to user request!");
            }
        } catch (Exception e) {
            if (grabOpts.isKeep()) {
                LOG.error("Existing cache is invalid, keeping by user request!", e);
                return;
            } else {
                LOG.warn("Existing cache is invalid, deleting it; use --keep-bad-cache to keep existing cache!",
                        e);
                rmDest = true;
            }
        } finally {
            if (zipClnt != null)
                try {
                    zipClnt.close();
                } catch (IOException e) {
                }
            if (rmDest && !dest.delete())
                throw new IOException("Unable to delete " + dest);
        }
    }

    freshZip = !dest.exists();
    try (FileSystem vfs = FileSystems.newFileSystem(new URI(String.format("jar:%s", dest.toURI())),
            Collections.singletonMap("create", "true"))) {
        if (freshZip) {
            Path target = vfs.getPath(ZipEpgClient.ZIP_VER_FILE);
            Files.write(target, Integer.toString(ZipEpgClient.ZIP_VER).getBytes(ZipEpgClient.ZIP_CHARSET));
        }
        ProgramCache progCache = ProgramCache.get(vfs);
        Path lineups = vfs.getPath("lineups.txt");
        Files.deleteIfExists(lineups);
        Path scheds = vfs.getPath("/schedules/");
        if (!Files.isDirectory(scheds))
            Files.createDirectory(scheds);
        Path maps = vfs.getPath("/maps/");
        PathUtils.removeDirectory(maps);
        Files.createDirectory(maps);
        Path progs = vfs.getPath("/programs/");
        if (!Files.isDirectory(progs))
            Files.createDirectory(progs);
        Path logos = vfs.getPath("/logos/");
        if (!Files.isDirectory(logos))
            Files.createDirectory(logos);
        Path md5s = vfs.getPath("/md5s/");
        if (!Files.isDirectory(md5s))
            Files.createDirectory(md5s);
        Path cache = vfs.getPath(LOGO_CACHE);
        if (Files.exists(cache)) {
            String cacheData = new String(Files.readAllBytes(cache), ZipEpgClient.ZIP_CHARSET);
            logoCache = Config.get().getObjectMapper().readValue(cacheData, JSONObject.class);
        } else
            logoCache = new JSONObject();
        Path seriesInfo = vfs.getPath("/seriesInfo/");
        if (!Files.isDirectory(seriesInfo))
            Files.createDirectories(seriesInfo);
        loadSeriesInfoIds(seriesInfo);
        missingSeriesIds = Collections.synchronizedSet(new HashSet<String>());
        loadRetryIds(vfs.getPath(SERIES_INFO_DATA));

        JSONObject resp = Config.get().getObjectMapper().readValue(
                factory.get(DefaultJsonRequest.Action.GET, RestNouns.LINEUPS, clnt.getHash(),
                        clnt.getUserAgent(), globalOpts.getUrl().toString()).submitForJson(null),
                JSONObject.class);
        if (!JsonResponseUtils.isErrorResponse(resp))
            Files.write(lineups, resp.toString(3).getBytes(ZipEpgClient.ZIP_CHARSET));
        else
            LOG.error("Received error response when requesting lineup data!");

        for (Lineup l : clnt.getLineups()) {
            buildStationList();
            JSONObject o = Config.get().getObjectMapper()
                    .readValue(
                            factory.get(DefaultJsonRequest.Action.GET, l.getUri(), clnt.getHash(),
                                    clnt.getUserAgent(), globalOpts.getUrl().toString()).submitForJson(null),
                            JSONObject.class);
            Files.write(vfs.getPath("/maps", ZipEpgClient.scrubFileName(String.format("%s.txt", l.getId()))),
                    o.toString(3).getBytes(ZipEpgClient.ZIP_CHARSET));
            JSONArray stations = o.getJSONArray("stations");
            JSONArray ids = new JSONArray();
            for (int i = 0; i < stations.length(); ++i) {
                JSONObject obj = stations.getJSONObject(i);
                String sid = obj.getString("stationID");
                if (stationList != null && !stationList.contains(sid))
                    LOG.debug(String.format("Skipped %s; not listed in station file", sid));
                else if (completedListings.add(sid)) {
                    ids.put(sid);
                    if (!grabOpts.isNoLogos()) {
                        if (logoCacheInvalid(obj))
                            pool.execute(new LogoTask(obj, vfs, logoCache));
                        else if (LOG.isDebugEnabled())
                            LOG.debug(String.format("Skipped logo for %s; already cached!",
                                    obj.optString("callsign", null)));
                    } else if (!logosWarned) {
                        logosWarned = true;
                        LOG.warn("Logo downloads disabled by user request!");
                    }
                } else
                    LOG.debug(String.format("Skipped %s; already downloaded.", sid));
                //pool.setMaximumPoolSize(5); // Processing these new schedules takes all kinds of memory!
                if (ids.length() == grabOpts.getMaxSchedChunk()) {
                    pool.execute(new ScheduleTask(ids, vfs, clnt, progCache, factory));
                    ids = new JSONArray();
                }
            }
            if (ids.length() > 0)
                pool.execute(new ScheduleTask(ids, vfs, clnt, progCache, factory));
        }
        pool.shutdown();
        try {
            LOG.debug("Waiting for SchedLogoExecutor to terminate...");
            if (pool.awaitTermination(15, TimeUnit.MINUTES))
                LOG.debug("SchedLogoExecutor: Terminated successfully.");
            else {
                failedTask = true;
                LOG.warn(
                        "SchedLogoExecutor: Termination timed out; some tasks probably didn't finish properly!");
            }
        } catch (InterruptedException e) {
            failedTask = true;
            LOG.warn(
                    "SchedLogoExecutor: Termination interrupted); some tasks probably didn't finish properly!");
        }
        Files.write(cache, logoCache.toString(3).getBytes(ZipEpgClient.ZIP_CHARSET),
                StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE, StandardOpenOption.CREATE);
        ScheduleTask.commit(vfs);

        pool = createThreadPoolExecutor();
        //pool.setMaximumPoolSize(5); // Again, we've got memory problems
        String[] dirtyPrograms = progCache.getDirtyIds();
        progCache.markAllClean();
        progCache = null;
        LOG.info(String.format("Identified %d program ids requiring an update!", dirtyPrograms.length));
        Collection<String> progIds = new ArrayList<String>();
        for (String progId : dirtyPrograms) {
            progIds.add(progId);
            if (progIds.size() == grabOpts.getMaxProgChunk()) {
                pool.execute(new ProgramTask(progIds, vfs, clnt, factory, missingSeriesIds, "programs", null,
                        false));
                progIds.clear();
            }
        }
        if (progIds.size() > 0)
            pool.execute(
                    new ProgramTask(progIds, vfs, clnt, factory, missingSeriesIds, "programs", null, false));
        pool.shutdown();
        try {
            LOG.debug("Waiting for ProgramExecutor to terminate...");
            if (pool.awaitTermination(15, TimeUnit.MINUTES)) {
                LOG.debug("ProgramExecutor: Terminated successfully.");
                Iterator<String> itr = missingSeriesIds.iterator();
                while (itr.hasNext()) {
                    String id = itr.next();
                    if (cachedSeriesIds.contains(id))
                        itr.remove();
                }
                if (missingSeriesIds.size() > 0) {
                    LOG.info(String.format("Grabbing %d series info programs!", missingSeriesIds.size()));
                    Set<String> retrySet = new HashSet<>();
                    try {
                        new ProgramTask(missingSeriesIds, vfs, clnt, factory, missingSeriesIds, "seriesInfo",
                                retrySet, true).run();
                    } catch (RuntimeException e) {
                        LOG.error("SeriesInfo task failed!", e);
                        Grabber.failedTask = true;
                    }
                    Path seriesInfoData = vfs.getPath(SERIES_INFO_DATA);
                    if (retrySet.size() > 0) {
                        StringBuilder sb = new StringBuilder();
                        for (String id : retrySet)
                            sb.append(id + "\n");
                        Files.write(seriesInfoData, sb.toString().getBytes(ZipEpgClient.ZIP_CHARSET),
                                StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING,
                                StandardOpenOption.CREATE);
                    } else if (Files.exists(seriesInfoData))
                        Files.delete(seriesInfoData);
                }
            } else {
                failedTask = true;
                LOG.warn("ProgramExecutor: Termination timed out; some tasks probably didn't finish properly!");
            }
        } catch (InterruptedException e) {
            failedTask = true;
            LOG.warn("ProgramExecutor: Termination interrupted); some tasks probably didn't finish properly!");
        }

        String userData = clnt.getUserStatus().toJson();
        if (failedTask) {
            LOG.error("One or more tasks failed!  Resetting last data refresh timestamp to zero.");
            SimpleDateFormat fmt = Config.get().getDateTimeFormat();
            String exp = fmt.format(new Date(0L));
            JSONObject o = Config.get().getObjectMapper().readValue(userData, JSONObject.class);
            o.put("lastDataUpdate", exp);
            userData = o.toString(2);
        }
        Path p = vfs.getPath(USER_DATA);
        Files.write(p, userData.getBytes(ZipEpgClient.ZIP_CHARSET), StandardOpenOption.WRITE,
                StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE);
        removeIgnoredStations(vfs);
    } catch (URISyntaxException e1) {
        throw new RuntimeException(e1);
    } finally {
        Runtime rt = Runtime.getRuntime();
        LOG.info(String.format("MemStats:%n\tFREE: %s%n\tUSED: %s%n\t MAX: %s",
                FileUtils.byteCountToDisplaySize(rt.freeMemory()),
                FileUtils.byteCountToDisplaySize(rt.totalMemory()),
                FileUtils.byteCountToDisplaySize(rt.maxMemory())));
    }
}

From source file:com.spectralogic.ds3client.integration.Smoke_Test.java

@Test
public void testRecoverReadJob()
        throws IOException, XmlProcessingException, JobRecoveryException, URISyntaxException {
    final String bucketName = "test_recover_read_job_bucket";
    final String book1 = "beowulf.txt";
    final String book2 = "ulysses.txt";
    final Path objPath1 = ResourceUtils.loadFileResource(RESOURCE_BASE_NAME + book1);
    final Path objPath2 = ResourceUtils.loadFileResource(RESOURCE_BASE_NAME + book2);
    final Ds3Object obj1 = new Ds3Object(book1, Files.size(objPath1));
    final Ds3Object obj2 = new Ds3Object(book2, Files.size(objPath2));

    final Path dirPath = FileSystems.getDefault().getPath("output");
    if (!Files.exists(dirPath)) {
        Files.createDirectory(dirPath);
    }/*from  ww  w  .  ja v a2 s .  c o  m*/

    try {
        HELPERS.ensureBucketExists(bucketName, envDataPolicyId);

        final Ds3ClientHelpers.Job putJob = HELPERS.startWriteJob(bucketName, Lists.newArrayList(obj1, obj2));
        putJob.transfer(new ResourceObjectPutter(RESOURCE_BASE_NAME));

        final FileChannel channel1 = FileChannel.open(dirPath.resolve(book1), StandardOpenOption.WRITE,
                StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);

        final Ds3ClientHelpers.Job readJob = HELPERS.startReadJob(bucketName, Lists.newArrayList(obj1, obj2));
        final GetObjectResponse readResponse1 = client
                .getObject(new GetObjectRequest(bucketName, book1, channel1, readJob.getJobId().toString(), 0));

        assertThat(readResponse1, is(notNullValue()));
        assertThat(readResponse1.getStatusCode(), is(equalTo(200)));

        // Interruption...
        final Ds3ClientHelpers.Job recoverJob = HELPERS.recoverReadJob(readJob.getJobId());

        final FileChannel channel2 = FileChannel.open(dirPath.resolve(book2), StandardOpenOption.WRITE,
                StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
        final GetObjectResponse readResponse2 = client.getObject(
                new GetObjectRequest(bucketName, book2, channel2, recoverJob.getJobId().toString(), 0));
        assertThat(readResponse2, is(notNullValue()));
        assertThat(readResponse2.getStatusCode(), is(equalTo(200)));

    } finally {
        deleteAllContents(client, bucketName);
        for (final Path tempFile : Files.newDirectoryStream(dirPath)) {
            Files.delete(tempFile);
        }
        Files.delete(dirPath);
    }
}