List of usage examples for java.util.concurrent Executors newSingleThreadScheduledExecutor
public static ScheduledExecutorService newSingleThreadScheduledExecutor()
From source file:com.streamsets.pipeline.stage.origin.jdbc.AbstractTableJdbcSource.java
@Override public void produce(Map<String, String> lastOffsets, int maxBatchSize) throws StageException { int batchSize = Math.min(maxBatchSize, commonSourceConfigBean.maxBatchSize); handleLastOffset(new HashMap<>(lastOffsets)); try {//from w ww. j a v a2 s . co m executorService = new SafeScheduledExecutorService(numberOfThreads, TableJdbcRunnable.TABLE_JDBC_THREAD_PREFIX); ExecutorCompletionService<Future> completionService = new ExecutorCompletionService<>(executorService); final RateLimiter queryRateLimiter = commonSourceConfigBean.creatQueryRateLimiter(); List<Future> allFutures = new LinkedList<>(); IntStream.range(0, numberOfThreads).forEach(threadNumber -> { JdbcBaseRunnable runnable = new JdbcRunnableBuilder().context(getContext()) .threadNumber(threadNumber).batchSize(batchSize).connectionManager(connectionManager) .offsets(offsets).tableProvider(tableOrderProvider) .tableReadContextCache(getTableReadContextCache(connectionManager, offsets)) .commonSourceConfigBean(commonSourceConfigBean).tableJdbcConfigBean(tableJdbcConfigBean) .queryRateLimiter(commonSourceConfigBean.creatQueryRateLimiter()).isReconnect(isReconnect) .build(); toBeInvalidatedThreadCaches.add(runnable.getTableReadContextCache()); allFutures.add(completionService.submit(runnable, null)); }); if (commonSourceConfigBean.allowLateTable) { TableSpooler tableSpooler = new TableSpooler(); executorServiceForTableSpooler = new SafeScheduledExecutorService(1, JdbcBaseRunnable.TABLE_JDBC_THREAD_PREFIX); executorServiceForTableSpooler.scheduleWithFixedDelay(tableSpooler, 0, commonSourceConfigBean.newTableQueryInterval, TimeUnit.SECONDS); } while (!getContext().isStopped()) { checkWorkerStatus(completionService); final boolean shouldGenerate = tableOrderProvider.shouldGenerateNoMoreDataEvent(); if (shouldGenerate) { final int delay = commonSourceConfigBean.noMoreDataEventDelay; if (delay > 0) { Executors.newSingleThreadScheduledExecutor().schedule(new Runnable() { @Override public void run() { jdbcUtil.generateNoMoreDataEvent(getContext()); } }, delay, TimeUnit.SECONDS); } else { jdbcUtil.generateNoMoreDataEvent(getContext()); } } // This loop is only a checker for isStopped() -> hence running it as fast as possible leads to high CPU // usage even for no-data passing through use case. We're currently hard coding the sleep for few milliseconds. try { Thread.sleep(100); } catch (InterruptedException e) { LOG.debug("Interrupted wait"); } } for (Future future : allFutures) { try { future.get(); } catch (ExecutionException e) { LOG.error( "ExecutionException when attempting to wait for all table JDBC runnables to complete, after context was" + " stopped: {}", e.getMessage(), e); } catch (InterruptedException e) { LOG.error( "InterruptedException when attempting to wait for all table JDBC runnables to complete, after context " + "was stopped: {}", e.getMessage(), e); Thread.currentThread().interrupt(); } } } finally { if (shutdownExecutorIfNeeded()) { Thread.currentThread().interrupt(); } } }
From source file:nu.yona.app.utils.AppUtils.java
/** * Gets initialize scheduler.//from www. ja v a2s . c o m * * @return the initialize scheduler */ public static ScheduledExecutorService getInitializeScheduler() { if (scheduler == null) { scheduler = Executors.newSingleThreadScheduledExecutor(); } return scheduler; }
From source file:org.apache.flume.channel.file.TestFileChannelRestart.java
private void doTestCorruptInflights(String name, boolean backup) throws Exception { Map<String, String> overrides = Maps.newHashMap(); overrides.put(FileChannelConfiguration.USE_DUAL_CHECKPOINTS, String.valueOf(backup)); channel = createFileChannel(overrides); channel.start();/*from w ww.j a va2s . c o m*/ Assert.assertTrue(channel.isOpen()); final Set<String> in1 = putEvents(channel, "restart-", 10, 100); Assert.assertEquals(100, in1.size()); Executors.newSingleThreadScheduledExecutor().submit(new Runnable() { @Override public void run() { Transaction tx = channel.getTransaction(); Set<String> out1 = takeWithoutCommit(channel, tx, 100); Assert.assertEquals(100, out1.size()); } }); Transaction tx = channel.getTransaction(); Set<String> in2 = putWithoutCommit(channel, tx, "restart", 100); Assert.assertEquals(100, in2.size()); forceCheckpoint(channel); if (backup) { Thread.sleep(2000); } tx.commit(); tx.close(); channel.stop(); File inflight = new File(checkpointDir, name); RandomAccessFile writer = new RandomAccessFile(inflight, "rw"); writer.write(new Random().nextInt()); writer.close(); channel = createFileChannel(overrides); channel.start(); Assert.assertTrue(channel.isOpen()); Assert.assertTrue(!backup || channel.checkpointBackupRestored()); Set<String> out = consumeChannel(channel); in1.addAll(in2); compareInputAndOut(in1, out); }
From source file:com.linkedin.d2.balancer.simple.SimpleLoadBalancerTest.java
@Test(groups = { "small", "back-end" }) public void testLoadBalancerWithPartitionsSmoke() throws URISyntaxException, ServiceUnavailableException, InterruptedException, ExecutionException { for (int tryAgain = 0; tryAgain < 12; ++tryAgain) { Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>>(); Map<String, TransportClientFactory> clientFactories = new HashMap<String, TransportClientFactory>(); List<String> prioritizedSchemes = new ArrayList<String>(); MockStore<ServiceProperties> serviceRegistry = new MockStore<ServiceProperties>(); MockStore<ClusterProperties> clusterRegistry = new MockStore<ClusterProperties>(); MockStore<UriProperties> uriRegistry = new MockStore<UriProperties>(); ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3()); clientFactories.put("http", new DoNothingClientFactory()); SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriRegistry, clusterRegistry, serviceRegistry, clientFactories, loadBalancerStrategyFactories); SimpleLoadBalancer loadBalancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS); FutureCallback<None> balancerCallback = new FutureCallback<None>(); loadBalancer.start(balancerCallback); balancerCallback.get();/*from w w w. jav a2s. c o m*/ URI uri1 = URI.create("http://test.qa1.com:1234"); URI uri2 = URI.create("http://test.qa2.com:2345"); URI uri3 = URI.create("http://test.qa3.com:6789"); Map<URI, Double> uris = new HashMap<URI, Double>(); uris.put(uri1, 1d); uris.put(uri2, 1d); uris.put(uri3, 1d); Map<URI, Map<Integer, PartitionData>> partitionDesc = new HashMap<URI, Map<Integer, PartitionData>>(); Map<Integer, PartitionData> server1 = new HashMap<Integer, PartitionData>(); server1.put(0, new PartitionData(1d)); server1.put(1, new PartitionData(1d)); Map<Integer, PartitionData> server2 = new HashMap<Integer, PartitionData>(); server2.put(0, new PartitionData(1d)); Map<Integer, PartitionData> server3 = new HashMap<Integer, PartitionData>(); server3.put(1, new PartitionData(1d)); partitionDesc.put(uri1, server1); partitionDesc.put(uri2, server2); partitionDesc.put(uri3, server3); prioritizedSchemes.add("http"); int partitionMethod = tryAgain % 4; switch (partitionMethod) { case 0: clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 50, 2))); break; case 1: clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new HashBasedPartitionProperties("id=(\\d+)", 2, HashBasedPartitionProperties.HashAlgorithm.valueOf("MODULO")))); break; case 2: clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new HashBasedPartitionProperties("id=(\\d+)", 2, HashBasedPartitionProperties.HashAlgorithm.valueOf("MD5")))); break; case 3: // test getRings with gap. here, no server serves partition 2 clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 50, 4))); server3.put(3, new PartitionData(1d)); partitionDesc.put(uri3, server3); break; default: break; } serviceRegistry.put("foo", new ServiceProperties("foo", "cluster-1", "/foo", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, prioritizedSchemes, null)); uriRegistry.put("cluster-1", new UriProperties("cluster-1", partitionDesc)); if (partitionMethod == 3) { Map<Integer, Ring<URI>> ringMap = loadBalancer.getRings(URI.create("d2://foo")); assertEquals(ringMap.size(), 4); // the ring for partition 2 should be empty assertEquals(ringMap.get(2).toString(), new ConsistentHashRing<URI>(Collections.emptyList()).toString()); continue; } URI expectedUri1 = URI.create("http://test.qa1.com:1234/foo"); URI expectedUri2 = URI.create("http://test.qa2.com:2345/foo"); URI expectedUri3 = URI.create("http://test.qa3.com:6789/foo"); Set<URI> expectedUris = new HashSet<URI>(); expectedUris.add(expectedUri1); expectedUris.add(expectedUri2); expectedUris.add(expectedUri3); for (int i = 0; i < 1000; ++i) { int ii = i % 100; RewriteClient client = (RewriteClient) loadBalancer.getClient(new URIRequest("d2://foo/id=" + ii), new RequestContext()); String clientUri = client.getUri().toString(); HashFunction<String[]> hashFunction = null; String[] str = new String[1]; // test KeyMapper target host hint: request is always to target host regardless of what's in d2 URI and whether it's hash-based or range-based partitions RequestContext requestContextWithHint = new RequestContext(); KeyMapper.TargetHostHints.setRequestContextTargetHost(requestContextWithHint, uri1); RewriteClient hintedClient1 = (RewriteClient) loadBalancer .getClient(new URIRequest("d2://foo/id=" + ii), requestContextWithHint); String hintedUri1 = hintedClient1.getUri().toString(); Assert.assertEquals(hintedUri1, uri1.toString() + "/foo"); RewriteClient hintedClient2 = (RewriteClient) loadBalancer .getClient(new URIRequest("d2://foo/action=purge-all"), requestContextWithHint); String hintedUri2 = hintedClient2.getUri().toString(); Assert.assertEquals(hintedUri2, uri1.toString() + "/foo"); // end test KeyMapper target host hint if (partitionMethod == 2) { hashFunction = new MD5Hash(); } for (URI uri : expectedUris) { if (clientUri.contains(uri.toString())) { // check if only key belonging to partition 0 gets uri2 if (uri.equals(uri2)) { if (partitionMethod == 0) { assertTrue(ii < 50); } else if (partitionMethod == 1) { assertTrue(ii % 2 == 0); } else { str[0] = ii + ""; assertTrue(hashFunction.hash(str) % 2 == 0); } } // check if only key belonging to partition 1 gets uri3 if (uri.equals(uri3)) { if (partitionMethod == 0) { assertTrue(ii >= 50); } else if (partitionMethod == 1) { assertTrue(ii % 2 == 1); } else { str[0] = ii + ""; assertTrue(hashFunction.hash(str) % 2 == 1); } } } } } // two rings for two partitions Map<Integer, Ring<URI>> ringMap = loadBalancer.getRings(URI.create("d2://foo")); assertEquals(ringMap.size(), 2); if (partitionMethod != 2) { Set<String> keys = new HashSet<String>(); for (int j = 0; j < 50; j++) { if (partitionMethod == 0) { keys.add(j + ""); } else { keys.add(j * 2 + ""); } } // if it is range based partition, all keys from 0 ~ 49 belong to partition 0 according to the range definition // if it is modulo based partition, all even keys belong to partition 0 because the partition count is 2 // only from partition 0 MapKeyResult<Ring<URI>, String> mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys); Map<Ring<URI>, Collection<String>> keyToPartition = mapKeyResult.getMapResult(); assertEquals(keyToPartition.size(), 1); for (Ring<URI> ring : keyToPartition.keySet()) { assertEquals(ring, ringMap.get(0)); } // now also from partition 1 keys.add("51"); mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys); assertEquals(mapKeyResult.getMapResult().size(), 2); assertEquals(mapKeyResult.getUnmappedKeys().size(), 0); // now only from partition 1 keys.clear(); keys.add("99"); mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys); keyToPartition = mapKeyResult.getMapResult(); assertEquals(keyToPartition.size(), 1); assertEquals(mapKeyResult.getUnmappedKeys().size(), 0); for (Ring<URI> ring : keyToPartition.keySet()) { assertEquals(ring, ringMap.get(1)); } keys.add("100"); mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys); if (partitionMethod == 0) { // key out of range Collection<MapKeyResult.UnmappedKey<String>> unmappedKeys = mapKeyResult.getUnmappedKeys(); assertEquals(unmappedKeys.size(), 1); } try { loadBalancer.getClient(new URIRequest("d2://foo/id=100"), new RequestContext()); if (partitionMethod == 0) { // key out of range fail("Should throw ServiceUnavailableException caused by PartitionAccessException"); } } catch (ServiceUnavailableException e) { } } final CountDownLatch latch = new CountDownLatch(1); PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() { @Override public void done() { latch.countDown(); } }; state.shutdown(callback); if (!latch.await(60, TimeUnit.SECONDS)) { fail("unable to shutdown state"); } executorService.shutdownNow(); assertTrue(executorService.isShutdown(), "ExecutorService should have shut down!"); } }
From source file:com.bonsai.wallet32.WalletService.java
@Override public void onCreate() { mNM = (NotificationManager) getSystemService(NOTIFICATION_SERVICE); mLBM = LocalBroadcastManager.getInstance(this); mLogger.info("WalletService created"); mApp = (WalletApplication) getApplicationContext(); mContext = getApplicationContext();/*from w w w . j av a 2 s . c om*/ mRes = mContext.getResources(); mTimeoutWorker = Executors.newSingleThreadScheduledExecutor(); final String lockName = getPackageName() + " blockchain sync"; final PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE); mWakeLock = pm.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, lockName); mPrefs = PreferenceManager.getDefaultSharedPreferences(this); String fiatRateSource = mPrefs.getString(SettingsActivity.KEY_FIAT_RATE_SOURCE, ""); setFiatRateSource(fiatRateSource); // Register for future preference changes. mPrefs.registerOnSharedPreferenceChangeListener(this); // Register with the WalletApplication. mApp.setWalletService(this); }
From source file:ecg.ecgshow.ECGShowUI.java
private void createBloodOxygenData(long timeZone) { BloodOxygendatas = new short[2]; BloodOxygenData = new JPanel(); BloodOxygenData.setLayout(new FlowLayout()); BloodOxygenData.setBounds(0, 0, (int) (WIDTH * 0.14), (int) (HEIGHT * 0.15)); BloodOxygenData.setBackground(Color.BLACK); JLabel jLabel1 = new JLabel("---"); if (BloodOxygendatas[0] == 0x00 || BloodOxygendatas == null) { jLabel1.setText("---"); } else {// www . ja v a 2 s .c o m jLabel1.setText(Short.toString((short) BloodOxygendatas[0])); } System.out.println("BloodOxygendatas" + Short.toString(BloodOxygendatas[0])); jLabel1.setFont(loadFont("LED.tff", (float) (HEIGHT * 0.070))); jLabel1.setBackground(Color.BLACK); jLabel1.setForeground(Color.GREEN); jLabel1.setBounds(0, 0, 100, 100); jLabel1.setOpaque(true); JLabel jLabelName = new JLabel(" "); jLabelName.setFont(new Font("SansSerif", 0, (int) (HEIGHT * 0.020))); jLabelName.setBackground(Color.BLACK); jLabelName.setForeground(new Color(237, 65, 43)); jLabelName.setBounds(0, 0, 100, 100); jLabelName.setOpaque(true); //?? JLabel jLabelUnit = new JLabel(" %"); jLabelUnit.setFont(new Font("SansSerif", 0, (int) (HEIGHT * 0.020))); jLabelUnit.setBackground(Color.BLACK); jLabelUnit.setForeground(Color.GREEN); jLabelUnit.setBounds(0, 0, 100, 100); jLabelUnit.setOpaque(true); //?? BloodOxygenData.add(jLabelName); BloodOxygenData.add(jLabel1); BloodOxygenData.add(jLabelUnit); ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); scheduledExecutorService.scheduleAtFixedRate(new Runnable() { @Override public void run() { if (BloodOxygendatas[0] == 0 || HeartRatedatas[0] == -100) { jLabel1.setText("--"); } else { jLabel1.setText(String.valueOf(BloodOxygendatas[0])); } BloodOxygenData.repaint(); } }, 0, 3, TimeUnit.SECONDS); }
From source file:org.elasticsearch.client.sniff.SnifferTests.java
public void testTaskCancelling() throws Exception { RestClient restClient = mock(RestClient.class); HostsSniffer hostsSniffer = mock(HostsSniffer.class); Scheduler noOpScheduler = new Scheduler() { @Override//www. ja v a 2 s.c om public Future<?> schedule(Sniffer.Task task, long delayMillis) { return null; } @Override public void shutdown() { } }; Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); try { int numIters = randomIntBetween(50, 100); for (int i = 0; i < numIters; i++) { Sniffer.Task task = sniffer.new Task(0L); TaskWrapper wrapper = new TaskWrapper(task); Future<?> future; if (rarely()) { future = executor.schedule(wrapper, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS); } else { future = executor.submit(wrapper); } Sniffer.ScheduledTask scheduledTask = new Sniffer.ScheduledTask(task, future); boolean skip = scheduledTask.skip(); try { assertNull(future.get()); } catch (CancellationException ignore) { assertTrue(future.isCancelled()); } if (skip) { //the task was either cancelled before starting, in which case it will never start (thanks to Future#cancel), //or skipped, in which case it will run but do nothing (thanks to Task#skip). //Here we want to make sure that whenever skip returns true, the task either won't run or it won't do anything, //otherwise we may end up with parallel sniffing tracks given that each task schedules the following one. We need to // make sure that onFailure takes scheduling over while at the same time ordinary rounds don't go on. assertFalse(task.hasStarted()); assertTrue(task.isSkipped()); assertTrue(future.isCancelled()); assertTrue(future.isDone()); } else { //if a future is cancelled when its execution has already started, future#get throws CancellationException before //completion. The execution continues though so we use a latch to try and wait for the task to be completed. //Here we want to make sure that whenever skip returns false, the task will be completed, otherwise we may be //missing to schedule the following round, which means no sniffing will ever happen again besides on failure sniffing. assertTrue(wrapper.await()); //the future may or may not be cancelled but the task has for sure started and completed assertTrue(task.toString(), task.hasStarted()); assertFalse(task.isSkipped()); assertTrue(future.isDone()); } //subsequent cancel calls return false for sure int cancelCalls = randomIntBetween(1, 10); for (int j = 0; j < cancelCalls; j++) { assertFalse(scheduledTask.skip()); } } } finally { executor.shutdown(); executor.awaitTermination(1000, TimeUnit.MILLISECONDS); } }
From source file:org.dcm4che3.tool.probetc.ProbeTC.java
private static void initThreads(Device device) { ExecutorService executorService = Executors.newSingleThreadExecutor(); ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); device.setExecutor(executorService); device.setScheduledExecutor(scheduledExecutorService); }
From source file:org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream.java
@Test(timeout = 120000) public void testReadEventsWithTimeout() throws IOException, InterruptedException, MissingEventsException { Configuration conf = new HdfsConfiguration(); MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build(); try {/*from ww w. j a v a2s . co m*/ cluster.getDfsCluster().waitActive(); cluster.getDfsCluster().transitionToActive(0); final DFSClient client = new DFSClient(cluster.getDfsCluster().getNameNode(0).getNameNodeAddress(), conf); DFSInotifyEventInputStream eis = client.getInotifyEventStream(); ScheduledExecutorService ex = Executors.newSingleThreadScheduledExecutor(); ex.schedule(new Runnable() { @Override public void run() { try { client.mkdirs("/dir", null, false); } catch (IOException e) { // test will fail LOG.error("Unable to create /dir", e); } } }, 1, TimeUnit.SECONDS); // a very generous wait period -- the edit will definitely have been // processed by the time this is up EventBatch batch = eis.poll(5, TimeUnit.SECONDS); Assert.assertNotNull(batch); Assert.assertEquals(1, batch.getEvents().length); Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE); Assert.assertEquals("/dir", ((Event.CreateEvent) batch.getEvents()[0]).getPath()); } finally { cluster.shutdown(); } }
From source file:org.zeroturnaround.exec.ProcessExecutor.java
/** * Start the sub process. This method does not wait until the process exits. * Value passed to {@link #timeout(long, TimeUnit)} is ignored. * Use {@link Future#get()} to wait for the process to finish. * Invoke <code>future.cancel(true);</code> to destroy the process. * * @return Future representing the exit value of the finished process. * @throws IOException an error occurred when process was started. *///www .j ava 2s.co m public StartedProcess start() throws IOException { WaitForProcess task = startInternal(); ExecutorService service = Executors.newSingleThreadScheduledExecutor(); Future<ProcessResult> future = service.submit(task); // Previously submitted tasks are executed but no new tasks will be accepted. service.shutdown(); return new StartedProcess(task.getProcess(), future); }