Example usage for java.util.concurrent ScheduledExecutorService shutdown

List of usage examples for java.util.concurrent ScheduledExecutorService shutdown

Introduction

In this page you can find the example usage for java.util.concurrent ScheduledExecutorService shutdown.

Prototype

void shutdown();

Source Link

Document

Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be accepted.

Usage

From source file:com.hurence.logisland.connect.opc.ua.OpcUaSourceTaskTest.java

@Test
@Ignore//from   w ww  .  j  av  a2 s . co  m
public void e2eTest() throws Exception {
    OpcUaSourceConnector connector = new OpcUaSourceConnector();
    Map<String, String> properties = new HashMap<>();
    properties.put(OpcUaSourceConnector.PROPERTY_AUTH_BASIC_USER, "test");
    properties.put(OpcUaSourceConnector.PROPERTY_AUTH_BASIC_PASSWORD, "test");
    properties.put(CommonDefinitions.PROPERTY_CONNECTION_SOCKET_TIMEOUT, "10000");
    properties.put(CommonDefinitions.PROPERTY_SERVER_URI, "opc.tcp://127.0.0.1:53530/OPCUA/SimulationServer");
    properties.put(CommonDefinitions.PROPERTY_TAGS_ID, "ns=5;s=Counter1,ns=5;s=Random1,ns=5;s=Sinusoid1");
    properties.put(CommonDefinitions.PROPERTY_TAGS_STREAM_MODE, "SUBSCRIBE,POLL,SUBSCRIBE");
    properties.put(CommonDefinitions.PROPERTY_TAGS_SAMPLING_RATE, "PT3S,PT0.01S,PT1S");
    properties.put(OpcUaSourceConnector.PROPERTY_DATA_PUBLICATION_RATE, "PT1S");

    connector.start(properties);
    OpcUaSourceTask task = new OpcUaSourceTask();
    task.start(connector.taskConfigs(1).get(0));
    ScheduledExecutorService es = Executors.newSingleThreadScheduledExecutor();
    Gson json = new Gson();
    es.scheduleAtFixedRate(() -> {
        try {
            task.poll().stream()
                    .map(a -> Pair.of(new Date((Long) a.sourceOffset().get(OpcRecordFields.SAMPLED_TIMESTAMP)),
                            json.toJson(a)))
                    .forEach(System.out::println);
        } catch (InterruptedException e) {
            //do nothing
        }
    }, 0, 10, TimeUnit.MILLISECONDS);

    Thread.sleep(600000);
    task.stop();
    es.shutdown();
    connector.stop();
}

From source file:UserInterface.PDCPrimaryDoctorRole.PDCPrimaryDoctorReportsJPanel.java

private void generateReportToggleBtnActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_generateReportToggleBtnActionPerformed
    // TODO add your handling code here:
    if (!generateReportToggleBtn.isSelected()) {
        reportJPanel.removeAll();//  w  w w  .  j  av  a 2s  .  c  o  m
        //reportJPanel.revalidate();
        reportJPanel.repaint();
        generateReportToggleBtn.setText("Generate Live Graph");
    } else {
        generateReportToggleBtn.setText("Stop Live Graph");
    }
    ScheduledExecutorService scheduledExecutorService2 = Executors.newScheduledThreadPool(5);

    Runnable task = new Runnable() {
        @Override
        public void run() {
            if (generateReportToggleBtn.isSelected()) {
                Patient patient = (Patient) patientListComboBox.getSelectedItem();
                String attribute = (String) vitalSignComboBox.getSelectedItem();
                Methods.inputVitalSigns(patient);
                populateGraphs(patient, attribute);
            } else {
                scheduledExecutorService2.shutdown();
            }

        }
    };

    ScheduledFuture scheduledTask;
    scheduledTask = scheduledExecutorService2.scheduleAtFixedRate(task, 1, 10, TimeUnit.SECONDS);

}

From source file:edu.umich.robot.HeadlessApplication.java

/**
 * <p>//w  w w . ja  va2  s.  co  m
 * Start Soar, wait for timeout or Soar to stop.
 * 
 * @param controller
 *            Simulation controller initialized.
 * @throws InterruptedException
 *             Thrown on thread interrupt.
 */
private void run(Controller controller) throws InterruptedException {
    final CountDownLatch doneSignal = new CountDownLatch(1);

    Thread shutdownHook = new Thread() {
        @Override
        public void run() {
            logger.warn("Shutdown detected.");
            shutdown.set(true);
            doneSignal.countDown();
        }
    };

    Runtime.getRuntime().addShutdownHook(shutdownHook);

    try {
        controller.addListener(SoarStoppedEvent.class, new RobotEventListener() {
            public void onEvent(RobotEvent event) {
                logger.info("Soar stop detected.");
                doneSignal.countDown();
            }
        });

        ScheduledExecutorService schexec = MoreExecutors
                .getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1));
        ScheduledFuture<?> task = null;
        if (seconds > 0) {
            task = schexec.schedule(new Runnable() {
                public void run() {
                    logger.info("Time up.");
                    doneSignal.countDown();
                }
            }, seconds, TimeUnit.SECONDS);
        }

        controller.startSoar(cycles);

        doneSignal.await();

        if (task != null)
            task.cancel(true);
        schexec.shutdown();
    } finally {
        if (!shutdown.get())
            Runtime.getRuntime().removeShutdownHook(shutdownHook);
    }
}

From source file:com.liferay.portal.search.elasticsearch.internal.connection.EmbeddedElasticsearchConnection.java

@Override
public void close() {
    super.close();

    if (_node == null) {
        return;/*from w  w  w  .  j a v  a2s .com*/
    }

    try {
        Class.forName(ByteBufferUtil.class.getName());
    } catch (ClassNotFoundException cnfe) {
        if (_log.isWarnEnabled()) {
            _log.warn(
                    StringBundler.concat("Unable to preload ", String.valueOf(ByteBufferUtil.class),
                            " to prevent Netty shutdown concurrent class loading ", "interruption issue"),
                    cnfe);
        }
    }

    if (PortalRunMode.isTestMode()) {
        settingsBuilder.put("index.refresh_interval", "-1");
        settingsBuilder.put("index.translog.flush_threshold_ops", Integer.MAX_VALUE);
        settingsBuilder.put("index.translog.interval", "1d");

        Settings settings = settingsBuilder.build();

        Injector injector = _node.injector();

        IndicesService indicesService = injector.getInstance(IndicesService.class);

        Iterator<IndexService> iterator = indicesService.iterator();

        while (iterator.hasNext()) {
            IndexService indexService = iterator.next();

            injector = indexService.injector();

            IndexSettingsService indexSettingsService = injector.getInstance(IndexSettingsService.class);

            indexSettingsService.refreshSettings(settings);
        }

        ThreadPool threadPool = injector.getInstance(ThreadPool.class);

        ScheduledExecutorService scheduledExecutorService = threadPool.scheduler();

        if (scheduledExecutorService instanceof ThreadPoolExecutor) {
            ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) scheduledExecutorService;

            threadPoolExecutor.setRejectedExecutionHandler(_REJECTED_EXECUTION_HANDLER);
        }

        scheduledExecutorService.shutdown();

        try {
            scheduledExecutorService.awaitTermination(1, TimeUnit.HOURS);
        } catch (InterruptedException ie) {
            if (_log.isWarnEnabled()) {
                _log.warn("Thread pool shutdown wait was interrupted", ie);
            }
        }
    }

    _node.close();

    _node = null;

    _file.deltree(_jnaTmpDirName);
}

From source file:io.fabric8.kubernetes.client.dsl.internal.RollingUpdater.java

/**
 * Since k8s v1.4.x, rc/rs deletes are asynchronous.
 * Lets wait until the resource is actually deleted in the server
 *///from  ww w . j  av a 2s . c o  m
private void waitUntilDeleted(final String namespace, final String name) {
    final CountDownLatch countDownLatch = new CountDownLatch(1);

    final Runnable waitTillDeletedPoller = new Runnable() {
        public void run() {
            try {
                T res = resources().inNamespace(namespace).withName(name).get();
                if (res == null) {
                    countDownLatch.countDown();
                }
            } catch (KubernetesClientException e) {
                if (e.getCode() == 404) {
                    countDownLatch.countDown();
                }
            }
        }
    };

    ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
    ScheduledFuture poller = executor.scheduleWithFixedDelay(waitTillDeletedPoller, 0, 5, TimeUnit.SECONDS);
    ScheduledFuture logger = executor.scheduleWithFixedDelay(new Runnable() {
        @Override
        public void run() {
            LOG.debug("Found resource {}/{} not yet deleted on server, so waiting...", namespace, name);
        }
    }, 0, loggingIntervalMillis, TimeUnit.MILLISECONDS);
    try {
        countDownLatch.await(DEFAULT_SERVER_GC_WAIT_TIMEOUT, TimeUnit.MILLISECONDS);
        executor.shutdown();
    } catch (InterruptedException e) {
        poller.cancel(true);
        logger.cancel(true);
        executor.shutdown();
        LOG.warn("Still found deleted resource {} in namespace: {}  after waiting for {} seconds so giving up",
                name, namespace, TimeUnit.MILLISECONDS.toSeconds(DEFAULT_SERVER_GC_WAIT_TIMEOUT));
    }
}

From source file:io.fabric8.kubernetes.client.dsl.internal.RollingUpdater.java

/**
 * Lets wait until there are enough Ready pods of the given RC
 *//*www .  j av a2s  .  c  o  m*/
private void waitUntilPodsAreReady(final T obj, final String namespace, final int requiredPodCount) {
    final CountDownLatch countDownLatch = new CountDownLatch(1);
    final AtomicInteger podCount = new AtomicInteger(0);

    final Runnable readyPodsPoller = new Runnable() {
        public void run() {
            PodList podList = listSelectedPods(obj);
            int count = 0;
            List<Pod> items = podList.getItems();
            for (Pod item : items) {
                for (PodCondition c : item.getStatus().getConditions()) {
                    if (c.getType().equals("Ready") && c.getStatus().equals("True")) {
                        count++;
                    }
                }
            }
            podCount.set(count);
            if (count == requiredPodCount) {
                countDownLatch.countDown();
            }
        }
    };

    ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
    ScheduledFuture poller = executor.scheduleWithFixedDelay(readyPodsPoller, 0, 1, TimeUnit.SECONDS);
    ScheduledFuture logger = executor.scheduleWithFixedDelay(new Runnable() {
        @Override
        public void run() {
            LOG.debug("Only {}/{} pod(s) ready for {}: {} in namespace: {} seconds so waiting...",
                    podCount.get(), requiredPodCount, obj.getKind(), obj.getMetadata().getName(), namespace);
        }
    }, 0, loggingIntervalMillis, TimeUnit.MILLISECONDS);
    try {
        countDownLatch.await(rollingTimeoutMillis, TimeUnit.MILLISECONDS);
        executor.shutdown();
    } catch (InterruptedException e) {
        poller.cancel(true);
        logger.cancel(true);
        executor.shutdown();
        LOG.warn(
                "Only {}/{} pod(s) ready for {}: {} in namespace: {}  after waiting for {} seconds so giving up",
                podCount.get(), requiredPodCount, obj.getKind(), obj.getMetadata().getName(), namespace,
                TimeUnit.MILLISECONDS.toSeconds(rollingTimeoutMillis));
    }
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldNotShutdownExecutorServicesSuppliedToGremlinExecutor() throws Exception {
    final ScheduledExecutorService service = Executors.newScheduledThreadPool(4, testingThreadFactory);
    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().executorService(service)
            .scheduledExecutorService(service).create();

    gremlinExecutor.close();/*  w  ww  . j  a  v  a2  s . c o m*/
    assertFalse(service.isShutdown());
    service.shutdown();
    service.awaitTermination(30000, TimeUnit.MILLISECONDS);
}

From source file:com.github.ambry.utils.UtilsTest.java

/**
 * Test {@link Utils#newScheduler(int, String, boolean)}
 *//*ww  w.j  av a 2s  .  co  m*/
@Test
public void newSchedulerTest() throws Exception {
    ScheduledExecutorService scheduler = Utils.newScheduler(2, false);
    Future<String> future = scheduler.schedule(new Callable<String>() {
        @Override
        public String call() {
            return Thread.currentThread().getName();
        }
    }, 50, TimeUnit.MILLISECONDS);
    String threadName = future.get(10, TimeUnit.SECONDS);
    assertTrue("Unexpected thread name returned: " + threadName, threadName.startsWith("ambry-scheduler-"));
    scheduler.shutdown();
}

From source file:com.btoddb.chronicle.plunkers.HdfsPlunkerImplIT.java

@Test
@Ignore("very flakey, need to work out a more stable way of testing")
public void testLongRun() throws Exception {
    plunker.setIdleTimeout(0);//from   ww w.j  a  va2s.co m
    plunker.setRollPeriod(2);
    plunker.setTimeoutCheckPeriod(100);
    plunker.init(config);

    final int sleep = 200;
    final int maxCount = 100; // 20 seconds at 'sleep' interval should be 10 files
    final AtomicInteger count = new AtomicInteger();

    // do this to prime HDFS FileSystem object - otherwise timing is off
    plunker.handleInternal(Arrays.asList(new Event("the-body").withHeader("customer", "cust")
            .withHeader("msgId", String.valueOf(count.getAndIncrement()))));

    ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
    System.out.println("start");
    executor.scheduleWithFixedDelay(new Runnable() {
        @Override
        public void run() {
            try {
                System.out.println("time = " + System.currentTimeMillis());
                plunker.handleInternal(Arrays.asList(new Event("the-body").withHeader("customer", "cust")
                        .withHeader("msgId", String.valueOf(count.get()))));
            } catch (Exception e) {
                e.printStackTrace();
            }

            count.incrementAndGet();
        }
    }, 0, sleep, TimeUnit.MILLISECONDS);

    while (count.get() < maxCount) {
        Thread.sleep(sleep / 2);
    }

    executor.shutdown();
    executor.awaitTermination(60, TimeUnit.SECONDS);

    Thread.sleep(1500);

    plunker.shutdown();

    Event[] events = new Event[count.get()];
    for (int i = 0; i < count.get(); i++) {
        events[i] = new Event("the-body").withHeader("customer", "cust").withHeader("msgId", String.valueOf(i));
    }

    File theDir = new File(String.format("%s/the/cust/path", baseDir.getPath()));

    assertThat(theDir, ftUtils.countWithSuffix(".tmp", 0));
    assertThat(theDir, ftUtils.countWithSuffix(".avro", 10));

    assertThat(theDir, ftUtils.hasEventsInDir(events));
}

From source file:org.igov.service.business.dfs.DfsService.java

protected void saveServiceMessage_EncryptedFile(String sHead, String sBody, byte[] aByte, String sID_Order,
        String sMail, String sFileName, String sFileContentType) {

    final Map<String, String> mParam = new HashMap<>();
    mParam.put("sHead", sHead);//" ?"
    mParam.put("sBody", sBody);
    mParam.put("sID_Order", sID_Order);
    mParam.put("sMail", sMail);
    mParam.put("sFileName", sFileName);
    mParam.put("sFileContentType", sFileContentType);
    mParam.put("nID_SubjectMessageType", "" + 12L);
    mParam.put("sID_DataLinkSource", "Region");
    mParam.put("sID_DataLinkAuthor", "SFS");
    String sID_DataLink;//from   w  w w  .j a  va  2  s. c  o m
    sID_DataLink = durableBytesDataStorage.saveData(aByte); //sBody.getBytes(Charset.forName("UTF-8"))
    mParam.put("sID_DataLink", sID_DataLink);

    mParam.put("RequestMethod", RequestMethod.GET.name());

    LOG.info("ToJournal-PROCESS mParam=" + mParam);

    ScheduledExecutorService oScheduledExecutorService = Executors.newSingleThreadScheduledExecutor();
    Runnable oRunnable = new Runnable() {

        @Override
        public void run() {
            LOG.info("try to save service message with params with a delay: (params={})", mParam);
            String jsonServiceMessage;
            try {
                jsonServiceMessage = historyEventService.addServiceMessage(mParam);
                LOG.info("(jsonServiceMessage={})", jsonServiceMessage);
            } catch (Exception e) {
                LOG.error("( saveServiceMessage error={})", e.getMessage());
            }
        }
    };
    // run saving message in 10 seconds so history event will be in the
    // database already by that time
    oScheduledExecutorService.schedule(oRunnable, 10, TimeUnit.SECONDS);
    oScheduledExecutorService.shutdown();

    LOG.info("Configured thread to run in 10 seconds with params: (params={})", mParam);
}