Example usage for java.time Duration ofMillis

List of usage examples for java.time Duration ofMillis

Introduction

In this page you can find the example usage for java.time Duration ofMillis.

Prototype

public static Duration ofMillis(long millis) 

Source Link

Document

Obtains a Duration representing a number of milliseconds.

Usage

From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java

@Test
public void testCheckPointForEntryLoggerWithMultipleActiveEntryLogs() throws Exception {
    File tmpDir = createTempDir("DiskCheck", "test");

    final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration()
            .setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(5000)
            .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() })
            .setAutoRecoveryDaemonEnabled(false).setFlushInterval(3000)
            .setBookiePort(PortManager.nextFreePort())
            // entrylog per ledger is enabled
            .setEntryLogPerLedgerEnabled(true)
            .setLedgerStorageClass(MockInterleavedLedgerStorage.class.getName());

    Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length);
    // we know there is only one ledgerDir
    File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0];
    BookieServer server = new BookieServer(conf);
    server.start();//from   www .  j a  v  a 2  s .c o  m
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
    final BookKeeper bkClient = new BookKeeper(clientConf);

    int numOfLedgers = 12;
    int numOfEntries = 100;
    byte[] dataBytes = "data".getBytes();
    AtomicBoolean receivedExceptionForAdd = new AtomicBoolean(false);
    LongStream.range(0, numOfLedgers).parallel().mapToObj((ledgerId) -> {
        LedgerHandle handle = null;
        try {
            handle = bkClient.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(), null);
        } catch (BKException | InterruptedException exc) {
            receivedExceptionForAdd.compareAndSet(false, true);
            LOG.error("Got Exception while trying to create LedgerHandle for ledgerId: " + ledgerId, exc);
        }
        return handle;
    }).forEach((writeHandle) -> {
        IntStream.range(0, numOfEntries).forEach((entryId) -> {
            try {
                writeHandle.addEntry(entryId, dataBytes);
            } catch (BKException | InterruptedException exc) {
                receivedExceptionForAdd.compareAndSet(false, true);
                LOG.error("Got Exception while trying to AddEntry of ledgerId: " + writeHandle.getId()
                        + " entryId: " + entryId, exc);
            }
        });
        try {
            writeHandle.close();
        } catch (BKException | InterruptedException e) {
            receivedExceptionForAdd.compareAndSet(false, true);
            LOG.error("Got Exception while trying to close writeHandle of ledgerId: " + writeHandle.getId(), e);
        }
    });

    Assert.assertFalse(
            "There shouldn't be any exceptions while creating writeHandle and adding entries to writeHandle",
            receivedExceptionForAdd.get());

    executorController.advance(Duration.ofMillis(conf.getFlushInterval()));
    // since we have waited for more than flushInterval SyncThread should have checkpointed.
    // if entrylogperledger is not enabled, then we checkpoint only when currentLog in EntryLogger
    // is rotated. but if entrylogperledger is enabled, then we checkpoint for every flushInterval period
    File lastMarkFile = new File(ledgerDir, "lastMark");
    Assert.assertTrue("lastMark file must be existing, because checkpoint should have happened",
            lastMarkFile.exists());
    LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
    Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happenend", 0,
            rolledLogMark.compare(new LogMark()));

    bkClient.close();
    // here we are calling shutdown, but MockInterleavedLedgerStorage shudown/flush
    // methods are noop, so entrylogger is not flushed as part of this shutdown
    // here we are trying to simulate Bookie crash, but there is no way to
    // simulate bookie abrupt crash
    server.shutdown();

    // delete journal files and lastMark, to make sure that we are not reading from
    // Journal file
    File[] journalDirs = conf.getJournalDirs();
    for (File journalDir : journalDirs) {
        File journalDirectory = Bookie.getCurrentDirectory(journalDir);
        List<Long> journalLogsId = Journal.listJournalIds(journalDirectory, null);
        for (long journalId : journalLogsId) {
            File journalFile = new File(journalDirectory, Long.toHexString(journalId) + ".txn");
            journalFile.delete();
        }
    }

    // we know there is only one ledgerDir
    lastMarkFile = new File(ledgerDir, "lastMark");
    lastMarkFile.delete();

    // now we are restarting BookieServer
    conf.setLedgerStorageClass(InterleavedLedgerStorage.class.getName());
    server = new BookieServer(conf);
    server.start();
    BookKeeper newBKClient = new BookKeeper(clientConf);
    // since Bookie checkpointed successfully before shutdown/crash,
    // we should be able to read from entryLogs though journal is deleted

    AtomicBoolean receivedExceptionForRead = new AtomicBoolean(false);

    LongStream.range(0, numOfLedgers).parallel().forEach((ledgerId) -> {
        try {
            LedgerHandle lh = newBKClient.openLedger(ledgerId, DigestType.CRC32, "passwd".getBytes());
            Enumeration<LedgerEntry> entries = lh.readEntries(0, numOfEntries - 1);
            while (entries.hasMoreElements()) {
                LedgerEntry entry = entries.nextElement();
                byte[] readData = entry.getEntry();
                Assert.assertEquals("Ledger Entry Data should match", new String("data".getBytes()),
                        new String(readData));
            }
            lh.close();
        } catch (BKException | InterruptedException e) {
            receivedExceptionForRead.compareAndSet(false, true);
            LOG.error("Got Exception while trying to read entries of ledger, ledgerId: " + ledgerId, e);
        }
    });
    Assert.assertFalse("There shouldn't be any exceptions while creating readHandle and while reading"
            + "entries using readHandle", receivedExceptionForRead.get());

    newBKClient.close();
    server.shutdown();
}

From source file:org.apache.flink.runtime.fs.hdfs.HadoopRecoverableFsDataOutputStream.java

/**
 * Called when resuming execution after a failure and waits until the lease
 * of the file we are resuming is free./*from www . ja  va 2 s  . c  o m*/
 *
 * <p>The lease of the file we are resuming writing/committing to may still
 * belong to the process that failed previously and whose state we are
 * recovering.
 *
 * @param path The path to the file we want to resume writing to.
 */
private boolean waitUntilLeaseIsRevoked(final Path path) throws IOException {
    Preconditions.checkState(fs instanceof DistributedFileSystem);

    final DistributedFileSystem dfs = (DistributedFileSystem) fs;
    dfs.recoverLease(path);

    final Deadline deadline = Deadline.now().plus(Duration.ofMillis(LEASE_TIMEOUT));

    final StopWatch sw = new StopWatch();
    sw.start();

    boolean isClosed = dfs.isFileClosed(path);
    while (!isClosed && deadline.hasTimeLeft()) {
        try {
            Thread.sleep(500L);
        } catch (InterruptedException e1) {
            throw new IOException("Recovering the lease failed: ", e1);
        }
        isClosed = dfs.isFileClosed(path);
    }
    return isClosed;
}

From source file:org.apache.flink.test.recovery.JobManagerHAProcessFailureRecoveryITCase.java

private void waitForTaskManagers(int numberOfTaskManagers, DispatcherGateway dispatcherGateway,
        FiniteDuration timeLeft) throws ExecutionException, InterruptedException {
    FutureUtils.retrySuccessfulWithDelay(
            () -> dispatcherGateway.requestClusterOverview(Time.milliseconds(timeLeft.toMillis())),
            Time.milliseconds(50L),
            org.apache.flink.api.common.time.Deadline.fromNow(Duration.ofMillis(timeLeft.toMillis())),
            clusterOverview -> clusterOverview.getNumTaskManagersConnected() >= numberOfTaskManagers,
            new ScheduledExecutorServiceAdapter(Executors.newSingleThreadScheduledExecutor())).get();
}

From source file:org.apache.james.mailbox.quota.mailing.QuotaMailingListenerConfigurationTest.java

@Test
public void fromShouldLoadGracePeriodInMs() throws Exception {
    DefaultConfigurationBuilder xmlConfiguration = new DefaultConfigurationBuilder();
    xmlConfiguration.load(toStream("<configuration><gracePeriod>12 ms</gracePeriod></configuration>"));

    assertThat(QuotaMailingListenerConfiguration.from(xmlConfiguration).getGracePeriod())
            .isEqualTo(Duration.ofMillis(12));
}

From source file:org.janusgraph.codepipelines.AwsCodePipelinesCi.java

private void run() throws IOException {
    final File file = new File(getOptionValue(PIPELINES_JSON_OPTION));
    final Region region = Region.of(getOptionValue(REGION_OPTION));
    final AwsCredentialsProvider provider = ProfileCredentialsProvider.builder()
            .profileName(getOptionValue(PROFILE_OPTION)).build();

    final ClientHttpConfiguration http = ClientHttpConfiguration.builder()
            .httpClient(ApacheSdkHttpClientFactory.builder() //consider netty some other time
                    .socketTimeout(Duration.ofSeconds(10)).connectionTimeout(Duration.ofMillis(750)).build()
                    .createHttpClient())
            .build();/*from  w  w  w. j  a v a 2  s  .c om*/

    final AwsCodePipelinesLogic.AwsCodePipelinesLogicBuilder builder = AwsCodePipelinesLogic.builder()
            .githubToken(getOptionValue(GITHUB_TOKEN_OPTION)).githubOwner(getOptionValue(GITHUB_OWNER_OPTION))
            .githubRepo(getOptionValue(GITHUB_REPO_OPTION)).githubBranch(getOptionValue(GITHUB_BRANCH_OPTION))
            .codeBuildServiceRoleArn(getOptionValue(CODE_BUILD_SERVICE_ROLE_ARN_OPTION))
            .codePipelineRoleArn(getOptionValue(CODEPIPELINE_ROLE_ARN_OPTION))
            .s3Bucket(getOptionValue(BUCKET_OPTION))
            .s3BucketLocationConstraint(BucketLocationConstraint.fromValue(region.value()))
            .s3(S3Client.builder().httpConfiguration(http).region(region).credentialsProvider(provider).build())
            .codeBuild(CodeBuildClient.builder().httpConfiguration(http).region(region)
                    .credentialsProvider(provider).build())
            .codePipeline(CodePipelineClient.builder().httpConfiguration(http).region(region)
                    .credentialsProvider(provider).build());

    final Tag timeTag = Tag.builder().key("date").value(Long.toString(System.currentTimeMillis())).build();
    final PipelineDefinitions definitions = new ObjectMapper(new YAMLFactory()).readValue(file,
            PipelineDefinitions.class);
    definitions.getPipelines().stream()
            .map(def -> builder.pipelineName(def.getName()).sourceOutputArtifactName(def.getName() + "Source")
                    .parallelBuildActions(def.getParallelBuildActions())
                    .defaultComputeImage(definitions.getDefaultComputeImage())
                    .defaultComputeType(definitions.getDefaultComputeType())
                    .defaultPrivilegedMode(definitions.isDefaultPrivilegedMode())
                    .tags(ImmutableList.of(Tag.builder().key("project").value(def.getName()).build(), timeTag))
                    .build())
            .forEach(AwsCodePipelinesLogic::run);
}

From source file:org.janusgraph.core.util.ManagementUtil.java

private static void awaitIndexUpdate(JanusGraph g, String indexName, String relationTypeName, long time,
        TemporalUnit unit) {//from  w  ww .j a  va 2s .c o  m
    Preconditions.checkArgument(g != null && g.isOpen(), "Need to provide valid, open graph instance");
    Preconditions.checkArgument(time > 0 && unit != null, "Need to provide valid time interval");
    Preconditions.checkArgument(StringUtils.isNotBlank(indexName), "Need to provide an index name");
    StandardJanusGraph graph = (StandardJanusGraph) g;
    TimestampProvider times = graph.getConfiguration().getTimestampProvider();
    Instant end = times.getTime().plus(Duration.of(time, unit));
    boolean isStable = false;
    while (times.getTime().isBefore(end)) {
        JanusGraphManagement mgmt = graph.openManagement();
        try {
            if (StringUtils.isNotBlank(relationTypeName)) {
                RelationTypeIndex idx = mgmt.getRelationIndex(mgmt.getRelationType(relationTypeName),
                        indexName);
                Preconditions.checkArgument(idx != null, "Index could not be found: %s @ %s", indexName,
                        relationTypeName);
                isStable = idx.getIndexStatus().isStable();
            } else {
                JanusGraphIndex idx = mgmt.getGraphIndex(indexName);
                Preconditions.checkArgument(idx != null, "Index could not be found: %s", indexName);
                isStable = true;
                for (PropertyKey key : idx.getFieldKeys()) {
                    if (!idx.getIndexStatus(key).isStable())
                        isStable = false;
                }
            }
        } finally {
            mgmt.rollback();
        }
        if (isStable)
            break;
        try {
            times.sleepFor(Duration.ofMillis(500));
        } catch (InterruptedException e) {

        }
    }
    if (!isStable)
        throw new JanusGraphException(
                "Index did not stabilize within the given amount of time. For sufficiently long "
                        + "wait periods this is most likely caused by a failed/incorrectly shut down JanusGraph instance or a lingering transaction.");
}

From source file:org.janusgraph.diskstorage.es.ElasticSearchConfigTest.java

private void simpleWriteAndQuery(IndexProvider idx) throws BackendException, InterruptedException {

    final Duration maxWrite = Duration.ofMillis(2000L);
    final String storeName = "jvmlocal_test_store";
    final KeyInformation.IndexRetriever indexRetriever = IndexProviderTest
            .getIndexRetriever(IndexProviderTest.getMapping(idx.getFeatures()));

    BaseTransactionConfig txConfig = StandardBaseTransactionConfig.of(TimestampProviders.MILLI);
    IndexTransaction itx = new IndexTransaction(idx, indexRetriever, txConfig, maxWrite);
    assertEquals(0, itx// w  ww . j a v a  2s . c  om
            .query(new IndexQuery(storeName, PredicateCondition.of(IndexProviderTest.NAME, Text.PREFIX, "ali")))
            .size());
    itx.add(storeName, "doc", IndexProviderTest.NAME, "alice", false);
    itx.commit();
    Thread.sleep(1500L); // Slightly longer than default 1s index.refresh_interval
    itx = new IndexTransaction(idx, indexRetriever, txConfig, maxWrite);
    assertEquals(0, itx
            .query(new IndexQuery(storeName, PredicateCondition.of(IndexProviderTest.NAME, Text.PREFIX, "zed")))
            .size());
    assertEquals(1, itx
            .query(new IndexQuery(storeName, PredicateCondition.of(IndexProviderTest.NAME, Text.PREFIX, "ali")))
            .size());
    itx.rollback();
}

From source file:org.jbb.system.web.cache.logic.FormCacheTranslator.java

private HazelcastClientSettings buildHazelcastClientSettings(CacheSettingsForm form,
        CacheSettings currentCacheSettings) {
    HazelcastClientSettings currentClientSettings = currentCacheSettings.getHazelcastClientSettings();
    HazelcastClientSettingsForm newClientSettings = form.getHazelcastClientSettings();
    HazelcastClientSettings clientSettings = new HazelcastClientSettings();
    clientSettings.setGroupName(newClientSettings.getGroupName());
    clientSettings.setGroupPassword(//from   w w  w .j  a  v  a  2s. c o  m
            StringUtils.isEmpty(newClientSettings.getGroupPassword()) ? currentClientSettings.getGroupPassword()
                    : newClientSettings.getGroupPassword());
    clientSettings.setMembers(buildHazelcastMemberList(newClientSettings.getMembers(), true));
    clientSettings.setConnectionAttemptLimit(newClientSettings.getConnectionAttemptLimit());
    clientSettings
            .setConnectionAttemptPeriod(Duration.ofMillis(newClientSettings.getConnectionAttemptPeriod()));
    clientSettings.setConnectionTimeout(Duration.ofMillis(newClientSettings.getConnectionTimeout()));
    return clientSettings;
}

From source file:org.lanternpowered.server.world.pregen.LanternChunkPreGenerateTask.java

@Override
public void accept(Task task) {
    final long stepStartTime = System.currentTimeMillis();
    if (this.generationStartTime == 0) {
        this.generationStartTime = stepStartTime;
    }//from w  ww. j  a v  a 2  s . c o  m

    // Create and fire event.
    final ChunkPreGenerationEvent.Pre preEvent = SpongeEventFactory.createChunkPreGenerationEventPre(this.cause,
            this, this.world, false);

    if (Sponge.getEventManager().post(preEvent)) {
        // Cancelled event = cancelled task.
        cancelTask(task);
        return;
    }

    if (preEvent.getSkipStep()) {
        // Skip the step, but don't cancel the task.
        return;
    }

    // Count how many chunks are generated during the tick
    int count = 0;
    int skipped = 0;
    do {
        final Vector3i position = nextChunkPosition();
        final Vector3i pos1 = position.sub(Vector3i.UNIT_X);
        final Vector3i pos2 = position.sub(Vector3i.UNIT_Z);
        final Vector3i pos3 = pos2.sub(Vector3i.UNIT_X);

        // We can only skip generation if all chunks are loaded.
        if (!areAllChunksLoaded(position, pos1, pos2, pos3)) {

            // At least one chunk isn't generated, so to populate, we need to load them all.
            this.world.loadChunk(position, true);
            this.world.loadChunk(pos1, true);
            this.world.loadChunk(pos2, true);
            this.world.loadChunk(pos3, true);

            count += this.currentGenCount;
        } else {

            // Skipped them, log this.
            skipped += this.currentGenCount;
        }
    } while (hasNextChunkPosition() && checkChunkCount(count)
            && checkTickTime(System.currentTimeMillis() - stepStartTime));

    this.chunksGenerated += count;
    this.chunksSkipped += skipped;

    final long deltaTime = System.currentTimeMillis() - stepStartTime;
    this.generationEndTime = System.currentTimeMillis();

    // Create and fire event.
    if (Sponge.getEventManager().post(SpongeEventFactory.createChunkPreGenerationEventPost(this.cause, this,
            this.world, Duration.ofMillis(deltaTime), count, skipped))) {
        cancelTask(task);
        return;
    }

    if (!hasNextChunkPosition()) {
        // Generation has completed.
        Sponge.getEventManager()
                .post(SpongeEventFactory.createChunkPreGenerationEventComplete(this.cause, this, this.world));
        this.isCancelled = true;
        unregisterListener();
        task.cancel();
    }
}

From source file:org.onosproject.newoptical.OpticalPathProvisionerTest.java

/**
 * Checks setupConnectivity method works.
 *///w ww.  ja  v  a 2  s  . co m
@Test
public void testSetupConnectivity() {
    Bandwidth bandwidth = Bandwidth.bps(100);
    Duration latency = Duration.ofMillis(10);

    OpticalConnectivityId cid = target.setupConnectivity(CP12, CP71, bandwidth, latency);
    assertNotNull(cid);

    // Checks path computation is called as expected
    assertEquals(1, pathService.edges.size());
    assertEquals(CP12.deviceId(), pathService.edges.get(0).getKey());
    assertEquals(CP71.deviceId(), pathService.edges.get(0).getValue());

    // Checks intents are installed as expected
    assertEquals(1, intentService.submitted.size());
    assertEquals(OpticalConnectivityIntent.class, intentService.submitted.get(0).getClass());
    OpticalConnectivityIntent connIntent = (OpticalConnectivityIntent) intentService.submitted.get(0);
    assertEquals(CP31, connIntent.getSrc());
    assertEquals(CP52, connIntent.getDst());
}