List of usage examples for java.util.concurrent TimeUnit MINUTES
TimeUnit MINUTES
To view the source code for java.util.concurrent TimeUnit MINUTES.
Click Source Link
From source file:com.cloudant.sync.replication.BasicPullStrategy.java
public BasicPullStrategy(PullReplication pullReplication, ExecutorService executorService, PullConfiguration config) {/*from w ww . ja v a 2 s .co m*/ Preconditions.checkNotNull(pullReplication, "PullReplication must not be null."); if (executorService == null) { executorService = new ThreadPoolExecutor(4, 4, 1, TimeUnit.MINUTES, new LinkedBlockingQueue<Runnable>()); } if (config == null) { config = new PullConfiguration(); } this.executor = executorService; this.config = config; this.filter = pullReplication.filter; String dbName = pullReplication.getSourceDbName(); CouchConfig couchConfig = pullReplication.getCouchConfig(); this.sourceDb = new CouchClientWrapper(dbName, couchConfig); this.targetDb = new DatastoreWrapper((DatastoreExtended) pullReplication.target); this.name = String.format("%s [%s]", LOG_TAG, pullReplication.getReplicatorName()); }
From source file:com.galenframework.ide.Main.java
protected void initWebServer(ServiceProvider serviceProvider, IdeArguments ideArguments) { port(ideArguments.getPort());/*from www. java 2s .c om*/ staticFileLocation("/public"); externalStaticFileLocation(staticFolderForSpark); System.out.println("Reports are in: " + reportFolder); new DeviceController(serviceProvider.deviceService()); new DomSnapshotController(serviceProvider.domSnapshotService()); new FileBrowserController(serviceProvider.fileBrowserService()); new SettingsController(serviceProvider.settingsService()); new ProfilesController(serviceProvider.profilesService(), serviceProvider.settingsService()); new TaskResultController(serviceProvider.taskResultService()); new TesterController(serviceProvider.testerService()); new HelpController(); scheduledExecutorService.scheduleAtFixedRate( new TaskResultsStorageCleanupJob(taskResultsStorage, ideArguments.getKeepLastResults(), ideArguments.getZombieResultsTimeout(), reportFolder), ideArguments.getCleanupPeriodInMinutes(), ideArguments.getCleanupPeriodInMinutes(), TimeUnit.MINUTES); if (ideArguments.getProfile() != null) { serviceProvider.profilesService().loadProfile(ideArguments.getProfile()); } }
From source file:com.hpcloud.util.Duration.java
public static Duration minutes(long count) { return new Duration(count, TimeUnit.MINUTES); }
From source file:de.digiway.rapidbreeze.client.model.download.DownloadModel.java
private String formatEta(long seconds) { long min = TimeUnit.SECONDS.toMinutes(seconds); long sec = seconds - TimeUnit.MINUTES.toSeconds(min); return String.format("%02d:%02d Min", min, sec); }
From source file:com.linkedin.pinot.integration.tests.OfflineClusterIntegrationTest.java
@BeforeClass public void setUp() throws Exception { //Clean up//from w w w . j a v a2 s. co m ensureDirectoryExistsAndIsEmpty(_tmpDir); ensureDirectoryExistsAndIsEmpty(_segmentDir); ensureDirectoryExistsAndIsEmpty(_tarDir); // Start the cluster startCluster(); // Unpack the Avro files final List<File> avroFiles = unpackAvroData(_tmpDir, SEGMENT_COUNT); createTable(); // Load data into H2 ExecutorService executor = Executors.newCachedThreadPool(); setupH2AndInsertAvro(avroFiles, executor); // Create segments from Avro data buildSegmentsFromAvro(avroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null); // Initialize query generator setupQueryGenerator(avroFiles, executor); executor.shutdown(); executor.awaitTermination(10, TimeUnit.MINUTES); // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online final CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", SEGMENT_COUNT); // Upload the segments int i = 0; for (String segmentName : _tarDir.list()) { System.out.println("Uploading segment " + (i++) + " : " + segmentName); File file = new File(_tarDir, segmentName); FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file), file.length()); } // Wait for all segments to be online latch.await(); TOTAL_DOCS = 115545; long timeInTwoMinutes = System.currentTimeMillis() + 2 * 60 * 1000L; long numDocs; while ((numDocs = getCurrentServingNumDocs()) < TOTAL_DOCS) { System.out.println("Current number of documents: " + numDocs); if (System.currentTimeMillis() < timeInTwoMinutes) { Thread.sleep(1000); } else { Assert.fail("Segments were not completely loaded within two minutes"); } } }
From source file:com.linkedin.pinot.integration.tests.RealtimeClusterIntegrationTest.java
@BeforeClass public void setUp() throws Exception { // Start ZK and Kafka startZk();/*from w w w . ja v a2 s . c o m*/ kafkaStarters = KafkaStarterUtils.startServers(getKafkaBrokerCount(), KafkaStarterUtils.DEFAULT_KAFKA_PORT, KafkaStarterUtils.DEFAULT_ZK_STR, KafkaStarterUtils.getDefaultKafkaConfiguration()); // Create Kafka topic createKafkaTopic(KAFKA_TOPIC, KafkaStarterUtils.DEFAULT_ZK_STR); // Start the Pinot cluster startController(); startBroker(); startServer(); // Unpack data final List<File> avroFiles = unpackAvroData(_tmpDir, SEGMENT_COUNT); File schemaFile = getSchemaFile(); // Load data into H2 ExecutorService executor = Executors.newCachedThreadPool(); setupH2AndInsertAvro(avroFiles, executor); // Initialize query generator setupQueryGenerator(avroFiles, executor); // Push data into the Kafka topic pushAvroIntoKafka(avroFiles, executor, KAFKA_TOPIC); // Wait for data push, query generator initialization and H2 load to complete executor.shutdown(); executor.awaitTermination(10, TimeUnit.MINUTES); // Create Pinot table setUpTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC, schemaFile, avroFiles.get(0)); // Wait until the Pinot event count matches with the number of events in the Avro files long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L; Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); statement.execute("select count(*) from mytable"); ResultSet rs = statement.getResultSet(); rs.first(); int h2RecordCount = rs.getInt(1); rs.close(); waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes); }
From source file:io.druid.tests.indexer.AbstractITRealtimeIndexTaskTest.java
void doTest() { LOG.info("Starting test: ITRealtimeIndexTaskTest"); try {//from w ww .ja v a 2s. c om // the task will run for 3 minutes and then shutdown itself String task = setShutOffTime(getTaskAsString(getTaskResource()), DateTimes.utc(System.currentTimeMillis() + TimeUnit.MINUTES.toMillis(3))); LOG.info("indexerSpec: [%s]\n", task); taskID = indexer.submitTask(task); // this posts 22 events, one every 4 seconds // each event contains the current time as its timestamp except // the timestamp for the 14th event is early enough that the event should be ignored // the timestamp for the 18th event is 2 seconds earlier than the 17th postEvents(); // sleep for a while to let the events be ingested TimeUnit.SECONDS.sleep(5); // put the timestamps into the query structure String query_response_template = null; InputStream is = ITRealtimeIndexTaskTest.class.getResourceAsStream(getQueriesResource()); if (null == is) { throw new ISE("could not open query file: %s", getQueriesResource()); } query_response_template = IOUtils.toString(is, "UTF-8"); String queryStr = query_response_template .replace("%%TIMEBOUNDARY_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst)) .replace("%%TIMEBOUNDARY_RESPONSE_MAXTIME%%", TIMESTAMP_FMT.print(dtLast)) .replace("%%TIMEBOUNDARY_RESPONSE_MINTIME%%", TIMESTAMP_FMT.print(dtFirst)) .replace("%%TIMESERIES_QUERY_START%%", INTERVAL_FMT.print(dtFirst)) .replace("%%TIMESERIES_QUERY_END%%", INTERVAL_FMT.print(dtLast.plusMinutes(2))) .replace("%%TIMESERIES_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst)) .replace("%%POST_AG_REQUEST_START%%", INTERVAL_FMT.print(dtFirst)) .replace("%%POST_AG_REQUEST_END%%", INTERVAL_FMT.print(dtLast.plusMinutes(2))).replace( "%%POST_AG_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtGroupBy.withSecondOfMinute(0))); // should hit the queries all on realtime task or some on realtime task // and some on historical. Which it is depends on where in the minute we were // when we started posting events. try { this.queryHelper.testQueriesFromString(getRouterURL(), queryStr, 2); } catch (Exception e) { throw Throwables.propagate(e); } // wait for the task to complete indexer.waitUntilTaskCompletes(taskID); // task should complete only after the segments are loaded by historical node RetryUtil.retryUntil(new Callable<Boolean>() { @Override public Boolean call() { return coordinator.areSegmentsLoaded(INDEX_DATASOURCE); } }, true, 60000, 10, "Real-time generated segments loaded"); // queries should be answered by historical this.queryHelper.testQueriesFromString(getRouterURL(), queryStr, 2); } catch (Exception e) { throw Throwables.propagate(e); } finally { unloadAndKillData(INDEX_DATASOURCE); } }
From source file:com.rackspacecloud.blueflood.cache.TtlCache.java
public TtlCache(String label, TimeValue expiration, int cacheConcurrency, final InternalAPI internalAPI) { try {/*from w ww .j av a2 s.c o m*/ final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); final String name = String.format( TtlCache.class.getPackage().getName() + ":type=%s,scope=%s,name=Stats", TtlCache.class.getSimpleName(), label); final ObjectName nameObj = new ObjectName(name); mbs.registerMBean(this, nameObj); instantiateYammerMetrics(TtlCache.class, label, nameObj); } catch (Exception ex) { log.error("Unable to register mbean for " + getClass().getName()); } generalErrorMeter = Metrics.newMeter(TtlCache.class, "Load Errors", label, "Rollups", TimeUnit.MINUTES); httpErrorMeter = Metrics.newMeter(TtlCache.class, "Http Errors", label, "Rollups", TimeUnit.MINUTES); CacheLoader<String, Map<ColumnFamily<Locator, Long>, TimeValue>> loader = new CacheLoader<String, Map<ColumnFamily<Locator, Long>, TimeValue>>() { // values from the default account are used to build a ttl map for tenants that do not exist in the // internal API. These values are put into the cache, meaning subsequent cache requests do not // incur a miss and hit the internal API. private final Account DEFAULT_ACCOUNT = new Account() { @Override public TimeValue getMetricTtl(String resolution) { return SAFETY_TTLS .get(AstyanaxIO.getColumnFamilyMapper().get(Granularity.fromString(resolution).name())); } }; @Override public Map<ColumnFamily<Locator, Long>, TimeValue> load(final String key) throws Exception { // load account, build ttl map. try { Account acct = internalAPI.fetchAccount(key); return buildTtlMap(acct); } catch (HttpResponseException ex) { // cache the default value on a 404. this means that we will not be hammering the API for values // that are constantly not there. The other option was to let the Http error bubble out, use a // and value from SAFETY_TTLS. But the same thing (an HTTP round trip) would happen the very next // time a TTL is requested. if (ex.getStatusCode() == 404) { httpErrorMeter.mark(); log.warn(ex.getMessage()); return buildTtlMap(DEFAULT_ACCOUNT); } else throw ex; } } }; cache = CacheBuilder.newBuilder().expireAfterWrite(expiration.getValue(), expiration.getUnit()) .concurrencyLevel(cacheConcurrency).recordStats().build(loader); }
From source file:com.samples.platform.serviceprovider.library.internal.GetBookOperation.java
/** * @param start// w w w . j av a 2 s. com * @return the duration in 000:00:00.000 format. */ private String requestDuration(final long start) { long millis = System.currentTimeMillis() - start; String hmss = String.format("%03d:%02d:%02d.%03d", TimeUnit.MILLISECONDS.toHours(millis), TimeUnit.MILLISECONDS.toMinutes(millis) - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(millis)), TimeUnit.MILLISECONDS.toSeconds(millis) - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(millis)), TimeUnit.MILLISECONDS.toMillis(millis) - TimeUnit.SECONDS.toMillis(TimeUnit.MILLISECONDS.toSeconds(millis))); return hmss; }
From source file:org.apache.james.jmap.UserProvisionningConcurrencyTest.java
@Test public void provisionMailboxesShouldNotDuplicateMailboxByName() throws Exception { String token = HttpJmapAuthentication.authenticateJamesUser(baseUri(), USER, PASSWORD).serialize(); boolean termination = new ConcurrentTestRunner(10, 1, (a, b) -> with().header("Authorization", token) .body("[[\"getMailboxes\", {}, \"#0\"]]").post("/jmap")).run().awaitTermination(1, TimeUnit.MINUTES); assertThat(termination).isTrue();/*from w w w . ja va2 s.co m*/ given().header("Authorization", token).body("[[\"getMailboxes\", {}, \"#0\"]]").when().post("/jmap").then() .statusCode(200).body(NAME, equalTo("mailboxes")).body(ARGUMENTS + ".list", hasSize(5)) .body(ARGUMENTS + ".list.name", hasItems(DefaultMailboxes.DEFAULT_MAILBOXES.toArray())); }