Example usage for java.util.concurrent TimeUnit MINUTES

List of usage examples for java.util.concurrent TimeUnit MINUTES

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit MINUTES.

Prototype

TimeUnit MINUTES

To view the source code for java.util.concurrent TimeUnit MINUTES.

Click Source Link

Document

Time unit representing sixty seconds.

Usage

From source file:hsa.awp.common.mail.MailStressTest.java

@Test
public void testThreaded() throws InterruptedException {

    for (int i = 1; i <= amount; i++) {
        service.execute(new Runnable() {
            @Override//from www . j a  v  a2  s .com
            public void run() {

                long start = System.currentTimeMillis();

                IMail mail = mailFactory.getInstance(recipients.get(sent % recipients.size()), subject, message,
                        sender);
                mail.send();
                inc();

                long end = System.currentTimeMillis();

                System.out.println(sent + ". mail sent time : " + (end - start));

                if (sent % (amount / 10) == 0) {
                    int percent = sent * 100 / amount;
                    System.out.println("--------------------------------------------------");
                    System.out.println(" > " + percent + "% completed : " + sent + " mail sent");
                }
            }
        });
    }

    System.out.println("Tasks in Queue : " + queue.size());
    while (queue.size() != 0) {
        service.awaitTermination(1, TimeUnit.SECONDS);
        System.out.println("Tasks in Queue : " + queue.size());
    }
    service.shutdown();
    service.awaitTermination(1, TimeUnit.MINUTES);
    System.out.println("Shutdown complete.");
}

From source file:com.datatorrent.demos.dimensions.ads.ApplicationWithHDHT.java

@Override
public void populateDAG(DAG dag, Configuration conf) {
    InputItemGenerator input = dag.addOperator("InputGenerator", InputItemGenerator.class);
    DimensionsComputation<AdInfo, AdInfo.AdInfoAggregateEvent> dimensions = dag.addOperator(
            "DimensionsComputation", new DimensionsComputation<AdInfo, AdInfo.AdInfoAggregateEvent>());
    dag.getMeta(dimensions).getAttributes().put(Context.OperatorContext.APPLICATION_WINDOW_COUNT, 4);
    String[] dimensionSpecs = new String[] { "time=" + TimeUnit.MINUTES, "time=" + TimeUnit.MINUTES + ":adUnit",
            "time=" + TimeUnit.MINUTES + ":advertiserId", "time=" + TimeUnit.MINUTES + ":publisherId",
            "time=" + TimeUnit.MINUTES + ":advertiserId:adUnit",
            "time=" + TimeUnit.MINUTES + ":publisherId:adUnit",
            "time=" + TimeUnit.MINUTES + ":publisherId:advertiserId",
            "time=" + TimeUnit.MINUTES + ":publisherId:advertiserId:adUnit" };

    AdInfoAggregator[] aggregators = new AdInfoAggregator[dimensionSpecs.length];
    for (int i = dimensionSpecs.length; i-- > 0;) {
        AdInfoAggregator aggregator = new AdInfoAggregator();
        aggregator.init(dimensionSpecs[i]);
        aggregators[i] = aggregator;//ww  w .j  av  a  2s  . c  o m
    }
    dimensions.setAggregators(aggregators);

    AdsDimensionStoreOperator store = dag.addOperator("Store", AdsDimensionStoreOperator.class);
    TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
    store.setFileStore(hdsFile);
    store.setAggregator(new AdInfoAggregator());
    dag.setAttribute(store, Context.OperatorContext.COUNTERS_AGGREGATOR,
            new BasicCounters.LongAggregator<MutableLong>());

    Operator.OutputPort<String> queryPort;
    Operator.InputPort<Object> queryResultPort;
    if (conf.getBoolean(PROP_USE_WEBSOCKETS, false)) {
        String gatewayAddress = dag.getValue(DAG.GATEWAY_CONNECT_ADDRESS);
        URI uri = URI.create("ws://" + gatewayAddress + "/pubsub");
        //LOG.info("WebSocket with gateway at: {}", gatewayAddress);
        PubSubWebSocketInputOperator<String> wsIn = dag.addOperator("Query",
                new PubSubWebSocketInputOperator<String>());
        wsIn.setUri(uri);
        queryPort = wsIn.outputPort;
        PubSubWebSocketOutputOperator<Object> wsOut = dag.addOperator("QueryResult",
                new PubSubWebSocketOutputOperator<Object>());
        wsOut.setUri(uri);
        queryResultPort = wsOut.input;
    } else {
        KafkaSinglePortStringInputOperator queries = dag.addOperator("Query",
                new KafkaSinglePortStringInputOperator());
        queries.setConsumer(new SimpleKafkaConsumer());
        queryPort = queries.outputPort;
        KafkaSinglePortOutputOperator<Object, Object> queryResult = dag.addOperator("QueryResult",
                new KafkaSinglePortOutputOperator<Object, Object>());
        queryResult.getConfigProperties().put("serializer.class", KafkaJsonEncoder.class.getName());
        queryResultPort = queryResult.inputPort;
    }

    dag.addStream("InputStream", input.outputPort, dimensions.data).setLocality(Locality.CONTAINER_LOCAL);
    dag.addStream("DimensionalData", dimensions.output, store.input);
    dag.addStream("Query", queryPort, store.query);
    dag.addStream("QueryResult", store.queryResult, queryResultPort);
}

From source file:com.spotify.reaper.ReaperApplication.java

@Override
public void run(ReaperApplicationConfiguration config, Environment environment) throws Exception {
    // Using UTC times everywhere as default. Affects only Yoda time.
    DateTimeZone.setDefault(DateTimeZone.UTC);

    checkConfiguration(config);/*www  .jav  a  2s  .  com*/
    context.config = config;

    addSignalHandlers(); // SIGHUP, etc.

    LOG.info("initializing runner thread pool with {} threads", config.getRepairRunThreadCount());
    context.repairManager = new RepairManager();
    context.repairManager.initializeThreadPool(config.getRepairRunThreadCount(),
            config.getHangingRepairTimeoutMins(), TimeUnit.MINUTES, 30, TimeUnit.SECONDS);

    if (context.storage == null) {
        LOG.info("initializing storage of type: {}", config.getStorageType());
        context.storage = initializeStorage(config, environment);
    } else {
        LOG.info("storage already given in context, not initializing a new one");
    }

    if (context.jmxConnectionFactory == null) {
        LOG.info("no JMX connection factory given in context, creating default");
        context.jmxConnectionFactory = new JmxConnectionFactory();
    }

    // read jmx host/port mapping from config and provide to jmx con.factory
    Map<String, Integer> jmxPorts = config.getJmxPorts();
    if (jmxPorts != null) {
        LOG.debug("using JMX ports mapping: {}", jmxPorts);
        context.jmxConnectionFactory.setJmxPorts(jmxPorts);
    }

    // Enable cross-origin requests for using external GUI applications.
    if (config.isEnableCrossOrigin() || System.getProperty("enableCrossOrigin") != null) {
        final FilterRegistration.Dynamic cors = environment.servlets().addFilter("crossOriginRequests",
                CrossOriginFilter.class);
        cors.setInitParameter("allowedOrigins", "*");
        cors.setInitParameter("allowedHeaders", "X-Requested-With,Content-Type,Accept,Origin");
        cors.setInitParameter("allowedMethods", "OPTIONS,GET,PUT,POST,DELETE,HEAD");
        cors.addMappingForUrlPatterns(EnumSet.allOf(DispatcherType.class), true, "/*");
    }

    JmxCredentials jmxAuth = config.getJmxAuth();
    if (jmxAuth != null) {
        LOG.debug("using specified JMX credentials for authentication");
        context.jmxConnectionFactory.setJmxAuth(jmxAuth);
    }

    LOG.info("creating and registering health checks");
    // Notice that health checks are registered under the admin application on /healthcheck
    final ReaperHealthCheck healthCheck = new ReaperHealthCheck(context);
    environment.healthChecks().register("reaper", healthCheck);

    LOG.info("creating resources and registering endpoints");
    final PingResource pingResource = new PingResource();
    environment.jersey().register(pingResource);

    final ClusterResource addClusterResource = new ClusterResource(context);
    environment.jersey().register(addClusterResource);

    final RepairRunResource addRepairRunResource = new RepairRunResource(context);
    environment.jersey().register(addRepairRunResource);

    final RepairScheduleResource addRepairScheduleResource = new RepairScheduleResource(context);
    environment.jersey().register(addRepairScheduleResource);
    Thread.sleep(1000);

    SchedulingManager.start(context);

    if (config.hasAutoSchedulingEnabled()) {
        LOG.debug("using specified configuration for auto scheduling: {}", config.getAutoScheduling());
        AutoSchedulingManager.start(context);
    }

    LOG.info("resuming pending repair runs");
    context.repairManager.resumeRunningRepairRuns(context);
}

From source file:org.excalibur.aqmp.handler.DeploymentHandler.java

@SuppressWarnings("unchecked")
public void handle(Deployment deployment) {
    NodeManagerFactory.getManagerReference();

    try {//from  ww  w .j ava  2s .co m

        WorkflowDescription workflowDescription = deploymentService.createWorkflowFor(deployment);
        String thisHostname = System.getProperty("org.excalibur.instance.hostname");

        VirtualMachine localNode = instanceService_.getInstanceByName(thisHostname);
        checkState(localNode != null);

        ThreadFactory threadFactory = new ThreadFactoryBuilder()
                .setNameFormat("event-bus-for-workflow-" + workflowDescription.getName() + "-%d").build();

        int numberOfActivities = workflowDescription.getNumberOfActivities();

        LOG.debug("workflow [{}] has [{}] activities [{}]", workflowDescription.getName(), numberOfActivities);

        ExecutorService eventBusExecutor = DynamicExecutors.newScalingThreadPool(1, numberOfActivities, 5,
                TimeUnit.MINUTES, threadFactory);
        EventBus bus = new AsyncEventBus(eventBusExecutor);

        final Workflow workflow = new WorkflowBuilder().description(workflowDescription).eventBus(bus).build();
        final WorkflowExecutionStrategy strategy = new WorkflowExecutionStrategy(workflow);

        WorkflowContext context = new WorkflowContextImpl(workflow, workflowRepository_, strategy);
        context.setWorkflowCoordinator(localNode);

        context.registerExecutors(eventBusExecutor);

        final WorkflowExecutor executor = new WorkflowExecutor(context, taskRepository_, userRepository_,
                regionRepository_, localNode);
        List<ActivityExecutionContext> executionContexts = executor.execute();

        for (ActivityExecutionContext executionContext : executionContexts) {
            for (TaskType<?> task : executionContext.getTasks()) {
                TaskResult<Instances> result = (TaskResult<Instances>) task.getResult();

                if (result != null && TaskState.SUCCESS.equals(result.getTaskState())) {
                    LOG.debug("Task [{}] executed successfully in [{}] ms",
                            TimeUnit.MILLISECONDS.toMillis(result.getFinishTime() - result.getStartTime()));

                    newInstanceTemplate_.convertAndSend(result.getResult());
                }
            }
        }

        long elapsedTime = workflowDescription.getCreatedIn() != null
                ? System.currentTimeMillis() - workflowDescription.getCreatedIn().getTime()
                : 0;
        LOG.debug("Finished the workflow [{}] in [{}] seconds", workflowDescription.getName(),
                TimeUnit.MILLISECONDS.toSeconds(elapsedTime));
    } catch (Exception exception) {
        String text = DeploymentUtils.marshalQuietly(deployment);
        LOG.error("Error on executing the deployment [{}]. Error message [{}]", text, exception.getMessage(),
                exception);
        AnyThrow.throwUncheked(exception);
    }
}

From source file:mitm.BouncyCastleSslEngineSource.java

private static Cache<String, SSLContext> initDefaultCertificateCache() {
    return CacheBuilder.newBuilder() //
            .expireAfterAccess(5, TimeUnit.MINUTES) //
            .concurrencyLevel(16) //
            .build();//from   w  w  w . ja v  a 2 s .  co  m
}

From source file:io.druid.tests.indexer.ITRealtimeIndexTaskTest.java

@Test
public void testRealtimeIndexTask() throws Exception {
    LOG.info("Starting test: ITRealtimeIndexTaskTest");
    try {/*from   ww  w .j  a v  a  2s .  c om*/
        // the task will run for 3 minutes and then shutdown itself
        String task = setShutOffTime(getTaskAsString(REALTIME_TASK_RESOURCE),
                new DateTime(System.currentTimeMillis() + TimeUnit.MINUTES.toMillis(3)));
        LOG.info("indexerSpec: [%s]\n", task);
        taskID = indexer.submitTask(task);

        // this posts 22 events, one every 4 seconds
        // each event contains the current time as its timestamp except
        //   the timestamp for the 14th event is early enough that the event should be ignored
        //   the timestamp for the 18th event is 2 seconds earlier than the 17th
        postEvents();

        // sleep for a while to let the events be ingested
        TimeUnit.SECONDS.sleep(5);

        // put the timestamps into the query structure
        String query_response_template = null;
        InputStream is = ITRealtimeIndexTaskTest.class.getResourceAsStream(REALTIME_QUERIES_RESOURCE);
        if (null == is) {
            throw new ISE("could not open query file: %s", REALTIME_QUERIES_RESOURCE);
        }
        query_response_template = IOUtils.toString(is, "UTF-8");

        String queryStr = query_response_template
                .replace("%%TIMEBOUNDARY_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst))
                .replace("%%TIMEBOUNDARY_RESPONSE_MAXTIME%%", TIMESTAMP_FMT.print(dtLast))
                .replace("%%TIMEBOUNDARY_RESPONSE_MINTIME%%", TIMESTAMP_FMT.print(dtFirst))
                .replace("%%TIMESERIES_QUERY_START%%", INTERVAL_FMT.print(dtFirst))
                .replace("%%TIMESERIES_QUERY_END%%", INTERVAL_FMT.print(dtLast.plusMinutes(2)))
                .replace("%%TIMESERIES_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst))
                .replace("%%POST_AG_REQUEST_START%%", INTERVAL_FMT.print(dtFirst))
                .replace("%%POST_AG_REQUEST_END%%", INTERVAL_FMT.print(dtLast.plusMinutes(2))).replace(
                        "%%POST_AG_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtGroupBy.withSecondOfMinute(0)));

        // should hit the queries all on realtime task or some on realtime task
        // and some on historical.  Which it is depends on where in the minute we were
        // when we started posting events.  
        try {
            this.queryHelper.testQueriesFromString(getRouterURL(), queryStr, 2);
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }

        // wait for the task to complete
        indexer.waitUntilTaskCompletes(taskID);

        // task should complete only after the segments are loaded by historical node
        RetryUtil.retryUntil(new Callable<Boolean>() {
            @Override
            public Boolean call() throws Exception {
                return coordinator.areSegmentsLoaded(INDEX_DATASOURCE);
            }
        }, true, 60000, 10, "Real-time generated segments loaded");

        // queries should be answered by historical
        this.queryHelper.testQueriesFromString(getRouterURL(), queryStr, 2);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    } finally {
        unloadAndKillData(INDEX_DATASOURCE);
    }
}

From source file:com.rationaldevelopers.oss.ApplicationConfiguration.java

@Bean
public IQueue<SimpleItem> inboundQueue(final HazelcastInstance hazelcastInstance,
        final SimpleItemService simpleItemService, final IMap<String, SimpleItem> cache) {

    IQueue<SimpleItem> queue = hazelcastInstance.getQueue(QUEUE_NAME);
    queue.addItemListener(new ItemListener<SimpleItem>() {
        @Override//from w w  w .  ja  v  a2  s.  co  m
        public void itemAdded(ItemEvent<SimpleItem> itemEvent) {
            SimpleItem i = itemEvent.getItem();
            try {
                simpleItemService.saveOrUpdate(i);
                cache.put(i.getSid(), i, 2, TimeUnit.MINUTES);
                queue.remove(i);
            } catch (Exception e) {
                i.incrementCount();
                if (i.getCount() >= 10) {
                    //Effectively handling a poison message or dead letter
                    queue.remove(i);
                }
            }
        }

        @Override
        public void itemRemoved(ItemEvent<SimpleItem> itemEvent) {
            System.out.println("Item Processed: {}" + itemEvent.getItem().getName());
        }
    }, true);
    log.info("=========================== Created Queue ======== {}", queue);
    return queue;
}

From source file:me.j360.trace.autoconfiguration.ui.ZipkinUiAutoConfiguration.java

@RequestMapping(value = "/config.json", method = GET, produces = APPLICATION_JSON_VALUE)
public ResponseEntity<ZipkinUiProperties> serveUiConfig() {
    return ResponseEntity.ok().cacheControl(CacheControl.maxAge(10, TimeUnit.MINUTES)).body(ui);
}

From source file:com.janrain.backplane.config.BackplaneConfig.java

@SuppressWarnings({ "UnusedDeclaration" })
private BackplaneConfig() {
    ConsoleReporter.enable(10, TimeUnit.MINUTES);

    // Dump metrics to graphite server
    String graphiteServer = System.getProperty(SystemProperties.GRAPHITE_SERVER());
    if (StringUtils.isNotBlank(graphiteServer)) {
        try {//from  w  w w  . ja v a  2s.  c o m
            String args[] = graphiteServer.split(":");
            String server = args[0];
            int port = Integer.parseInt(args[1]);
            GraphiteReporter.enable(10, TimeUnit.SECONDS, server, port,
                    SystemProperties.machineName().replace(".", "_") + "_" + SystemProperties.INSTANCE_ID());
            logger.info("Graphite server enabled at " + graphiteServer);
        } catch (Exception e) {
            logger.warn(
                    "could not enable Graphite from " + graphiteServer + " must be in the form SERVER:PORT");
        }
    }
    try {
        buildProperties.load(BackplaneConfig.class.getResourceAsStream(BUILD_PROPERTIES));
        //assert(StringUtils.isNotBlank(getEncryptionKey()));
    } catch (Exception e) {
        String err = "Error loading build properties from " + BUILD_PROPERTIES;
        logger.error(err, e);
        throw new RuntimeException(err, e);
    }

    logger.info("Configured Backplane Server instance: " + SystemProperties.INSTANCE_ID());
}

From source file:com.cloudant.client.org.lightcouch.CouchDbClient.java

CouchDbClient(CouchDbConfig config) {
    final CouchDbProperties props = config.getProperties();

    try {//from w w  w. j a  va2s. co  m
        this.clientUri = props.getCouchDbURL().toURI();
    } catch (URISyntaxException e) {
        throw new RuntimeException("Error converting account URL to URI.", e);
    }

    this.gson = GsonHelper.initGson(new GsonBuilder()).create();

    //if OkHttp is available then use it for connection pooling, otherwise default to the
    //JVM built-in pooling for HttpUrlConnection
    if (OkHttpClientHttpUrlConnectionFactory.isOkUsable() && props.getMaxConnections() > 0) {
        OkHttpClientHttpUrlConnectionFactory factory = new OkHttpClientHttpUrlConnectionFactory();
        //keep connections open for as long as possible, anything over 2.5 minutes will be
        //longer than the server
        ConnectionPool pool = new ConnectionPool(props.getMaxConnections(), TimeUnit.MINUTES.toMillis(3));
        factory.getOkHttpClient().setConnectionPool(pool);
        this.factory = factory;
    } else {
        factory = new DefaultHttpUrlConnectionFactory();
    }

    //set the proxy if it has been configured
    if (props.getProxyURL() != null) {
        factory.setProxy(props.getProxyURL());
    }

    this.requestInterceptors = new ArrayList<HttpConnectionRequestInterceptor>();
    this.responseInterceptors = new ArrayList<HttpConnectionResponseInterceptor>();

    if (props.getRequestInterceptors() != null) {
        this.requestInterceptors.addAll(props.getRequestInterceptors());
    }

    if (props.getResponseInterceptors() != null) {
        this.responseInterceptors.addAll(props.getResponseInterceptors());
    }
}