List of usage examples for java.util.concurrent ScheduledExecutorService schedule
public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit);
From source file:org.apache.nifi.controller.service.StandardControllerServiceNode.java
/** * Will atomically enable this service by invoking its @OnEnabled operation. * It uses CAS operation on {@link #stateRef} to transition this service * from DISABLED to ENABLING state. If such transition succeeds the service * will be marked as 'active' (see {@link ControllerServiceNode#isActive()}). * If such transition doesn't succeed then no enabling logic will be * performed and the method will exit. In other words it is safe to invoke * this operation multiple times and from multiple threads. * <br>//w w w . j a v a 2s . c o m * This operation will also perform re-try of service enabling in the event * of exception being thrown by previous invocation of @OnEnabled. * <br> * Upon successful invocation of @OnEnabled this service will be transitioned to * ENABLED state. * <br> * In the event where enabling took longer then expected by the user and such user * initiated disable operation, this service will be automatically disabled as soon * as it reached ENABLED state. */ @Override public void enable(final ScheduledExecutorService scheduler, final long administrativeYieldMillis) { if (this.stateRef.compareAndSet(ControllerServiceState.DISABLED, ControllerServiceState.ENABLING)) { this.active.set(true); final ConfigurationContext configContext = new StandardConfigurationContext(this, this.serviceProvider, null, getVariableRegistry()); scheduler.execute(new Runnable() { @Override public void run() { try { try (final NarCloseable nc = NarCloseable.withComponentNarLoader( getControllerServiceImplementation().getClass(), getIdentifier())) { ReflectionUtils.invokeMethodsWithAnnotation(OnEnabled.class, getControllerServiceImplementation(), configContext); } boolean shouldEnable = false; synchronized (active) { shouldEnable = active.get() && stateRef.compareAndSet(ControllerServiceState.ENABLING, ControllerServiceState.ENABLED); } if (!shouldEnable) { LOG.debug("Disabling service " + this + " after it has been enabled due to disable action being initiated."); // Can only happen if user initiated DISABLE operation before service finished enabling. It's state will be // set to DISABLING (see disable() operation) invokeDisable(configContext); stateRef.set(ControllerServiceState.DISABLED); } } catch (Exception e) { final Throwable cause = e instanceof InvocationTargetException ? e.getCause() : e; final ComponentLog componentLog = new SimpleProcessLogger(getIdentifier(), StandardControllerServiceNode.this); componentLog.error("Failed to invoke @OnEnabled method due to {}", cause); LOG.error("Failed to invoke @OnEnabled method of {} due to {}", getControllerServiceImplementation(), cause.toString()); invokeDisable(configContext); if (isActive()) { scheduler.schedule(this, administrativeYieldMillis, TimeUnit.MILLISECONDS); } else { try (final NarCloseable nc = NarCloseable.withComponentNarLoader( getControllerServiceImplementation().getClass(), getIdentifier())) { ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnDisabled.class, getControllerServiceImplementation(), configContext); } stateRef.set(ControllerServiceState.DISABLED); } } } }); } }
From source file:org.elasticsearch.client.sniff.SnifferTests.java
/** * Test behaviour when a bunch of onFailure sniffing rounds are triggered in parallel. Each run will always * schedule a subsequent afterFailure round. Also, for each onFailure round that starts, the net scheduled round * (either afterFailure or ordinary) gets cancelled. *//*ww w . j a v a 2 s . c o m*/ public void testSniffOnFailure() throws Exception { RestClient restClient = mock(RestClient.class); CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); final AtomicBoolean initializing = new AtomicBoolean(true); final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); int minNumOnFailureRounds = randomIntBetween(5, 10); final CountDownLatch initializingLatch = new CountDownLatch(1); final Set<Sniffer.ScheduledTask> ordinaryRoundsTasks = new CopyOnWriteArraySet<>(); final AtomicReference<Future<?>> initializingFuture = new AtomicReference<>(); final Set<Sniffer.ScheduledTask> onFailureTasks = new CopyOnWriteArraySet<>(); final Set<Sniffer.ScheduledTask> afterFailureTasks = new CopyOnWriteArraySet<>(); final AtomicBoolean onFailureCompleted = new AtomicBoolean(false); final CountDownLatch completionLatch = new CountDownLatch(1); final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); try { Scheduler scheduler = new Scheduler() { @Override public Future<?> schedule(final Sniffer.Task task, long delayMillis) { if (initializing.compareAndSet(true, false)) { assertEquals(0L, delayMillis); Future<?> future = executor.submit(new Runnable() { @Override public void run() { try { task.run(); } finally { //we need to make sure that the sniffer is initialized, so the sniffOnFailure //call does what it needs to do. Otherwise nothing happens until initialized. initializingLatch.countDown(); } } }); assertTrue(initializingFuture.compareAndSet(null, future)); return future; } if (delayMillis == 0L) { Future<?> future = executor.submit(task); onFailureTasks.add(new Sniffer.ScheduledTask(task, future)); return future; } if (delayMillis == sniffAfterFailureDelay) { Future<?> future = scheduleOrSubmit(task); afterFailureTasks.add(new Sniffer.ScheduledTask(task, future)); return future; } assertEquals(sniffInterval, delayMillis); assertEquals(sniffInterval, task.nextTaskDelay); if (onFailureCompleted.get() && onFailureTasks.size() == afterFailureTasks.size()) { completionLatch.countDown(); return mock(Future.class); } Future<?> future = scheduleOrSubmit(task); ordinaryRoundsTasks.add(new Sniffer.ScheduledTask(task, future)); return future; } private Future<?> scheduleOrSubmit(Sniffer.Task task) { if (randomBoolean()) { return executor.schedule(task, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS); } else { return executor.submit(task); } } @Override public void shutdown() { } }; final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS)); ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20)); Set<Future<?>> onFailureFutures = new CopyOnWriteArraySet<>(); try { //with tasks executing quickly one after each other, it is very likely that the onFailure round gets skipped //as another round is already running. We retry till enough runs get through as that's what we want to test. while (onFailureTasks.size() < minNumOnFailureRounds) { onFailureFutures.add(onFailureExecutor.submit(new Runnable() { @Override public void run() { sniffer.sniffOnFailure(); } })); } assertThat(onFailureFutures.size(), greaterThanOrEqualTo(minNumOnFailureRounds)); for (Future<?> onFailureFuture : onFailureFutures) { assertNull(onFailureFuture.get()); } onFailureCompleted.set(true); } finally { onFailureExecutor.shutdown(); onFailureExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS); } assertFalse(initializingFuture.get().isCancelled()); assertTrue(initializingFuture.get().isDone()); assertNull(initializingFuture.get().get()); assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); assertThat(onFailureTasks.size(), greaterThanOrEqualTo(minNumOnFailureRounds)); assertEquals(onFailureTasks.size(), afterFailureTasks.size()); for (Sniffer.ScheduledTask onFailureTask : onFailureTasks) { assertFalse(onFailureTask.future.isCancelled()); assertTrue(onFailureTask.future.isDone()); assertNull(onFailureTask.future.get()); assertTrue(onFailureTask.task.hasStarted()); assertFalse(onFailureTask.task.isSkipped()); } int cancelledTasks = 0; int completedTasks = onFailureTasks.size() + 1; for (Sniffer.ScheduledTask afterFailureTask : afterFailureTasks) { if (assertTaskCancelledOrCompleted(afterFailureTask)) { completedTasks++; } else { cancelledTasks++; } } assertThat(ordinaryRoundsTasks.size(), greaterThan(0)); for (Sniffer.ScheduledTask task : ordinaryRoundsTasks) { if (assertTaskCancelledOrCompleted(task)) { completedTasks++; } else { cancelledTasks++; } } assertEquals(onFailureTasks.size(), cancelledTasks); assertEquals(completedTasks, hostsSniffer.runs.get()); int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg()); verifyNoMoreInteractions(restClient); } finally { executor.shutdown(); executor.awaitTermination(1000L, TimeUnit.MILLISECONDS); } }
From source file:org.apache.nifi.controller.StandardProcessorNode.java
/** * Will idempotently stop the processor using the following sequence: <i> * <ul>/*www. j a va 2 s . c o m*/ * <li>Transition (atomically) Processor's scheduled state from RUNNING to * STOPPING. If the above state transition succeeds, then invoke any method * on the Processor with the {@link OnUnscheduled} annotation. Once those methods * have been called and returned (either normally or exceptionally), start checking * to see if all of the Processor's active threads have finished. If not, check again * every 100 milliseconds until they have. * Once all after threads have completed, the processor's @OnStopped operation will be invoked * and its scheduled state is set to STOPPED which completes processor stop * sequence.</li> * </ul> * </i> * * <p> * If for some reason processor's scheduled state can not be transitioned to * STOPPING (e.g., the processor didn't finish @OnScheduled operation when * stop was called), the attempt will be made to transition processor's * scheduled state from STARTING to STOPPING which will allow * {@link #start(ScheduledExecutorService, long, ProcessContext, Runnable)} * method to initiate processor's shutdown upon exiting @OnScheduled * operation, otherwise the processor's scheduled state will remain * unchanged ensuring that multiple calls to this method are idempotent. * </p> */ @Override public <T extends ProcessContext & ControllerServiceLookup> void stop(final ScheduledExecutorService scheduler, final T processContext, final SchedulingAgent schedulingAgent, final ScheduleState scheduleState) { LOG.info("Stopping processor: " + this.processor.getClass()); if (this.scheduledState.compareAndSet(ScheduledState.RUNNING, ScheduledState.STOPPING)) { // will ensure that the Processor represented by this node can only be stopped once scheduleState.incrementActiveThreadCount(); // will continue to monitor active threads, invoking OnStopped once there are no // active threads (with the exception of the thread performing shutdown operations) scheduler.execute(new Runnable() { @Override public void run() { try { if (scheduleState.isScheduled()) { schedulingAgent.unschedule(StandardProcessorNode.this, scheduleState); try (final NarCloseable nc = NarCloseable.withComponentNarLoader(processor.getClass(), processor.getIdentifier())) { ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnUnscheduled.class, processor, processContext); } } // all threads are complete if the active thread count is 1. This is because this thread that is // performing the lifecycle actions counts as 1 thread. final boolean allThreadsComplete = scheduleState.getActiveThreadCount() == 1; if (allThreadsComplete) { try (final NarCloseable nc = NarCloseable.withComponentNarLoader(processor.getClass(), processor.getIdentifier())) { ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnStopped.class, processor, processContext); } scheduleState.decrementActiveThreadCount(); scheduledState.set(ScheduledState.STOPPED); } else { // Not all of the active threads have finished. Try again in 100 milliseconds. scheduler.schedule(this, 100, TimeUnit.MILLISECONDS); } } catch (final Exception e) { LOG.warn("Failed while shutting down processor " + processor, e); } } }); } else { /* * We do compareAndSet() instead of set() to ensure that Processor * stoppage is handled consistently including a condition where * Processor never got a chance to transition to RUNNING state * before stop() was called. If that happens the stop processor * routine will be initiated in start() method, otherwise the IF * part will handle the stop processor routine. */ this.scheduledState.compareAndSet(ScheduledState.STARTING, ScheduledState.STOPPING); } }
From source file:edu.umass.cs.gigapaxos.PaxosManager.java
/** * This test method is deprecated and will either be removed or * significantly revamped. Use TESTPaxosMain instead to run a single machine * test with multiple virtual nodes./*from ww w . j a v a 2 s .com*/ * * @param args * @throws InterruptedException * @throws IOException * @throws JSONException */ @Deprecated static void test(String[] args) throws InterruptedException, IOException, JSONException { int[] members = TESTPaxosConfig.getDefaultGroup(); int numNodes = members.length; SampleNodeConfig<Integer> snc = new SampleNodeConfig<Integer>(2000); snc.localSetup(Util.arrayToIntSet(members)); @SuppressWarnings("unchecked") PaxosManager<Integer>[] pms = new PaxosManager[numNodes]; TESTPaxosApp[] apps = new TESTPaxosApp[numNodes]; /* We always test with the first member crashed. This also ensures that * the system is fault-tolerant to the failure of the default * coordinator, which in our policy is the first (or lowest numbered) * node. */ TESTPaxosConfig.crash(members[0]); /* We disable sending replies to client in PaxosManager's unit-test. To * test with clients, we rely on other tests in TESTPaxosMain * (single-machine) or on TESTPaxosNode and TESTPaxosClient for * distributed testing. */ TESTPaxosConfig.setSendReplyToClient(false); /* This setting is "guilty until proven innocent", i.e., each node will * start out assuming that all other nodes are dead. This is probably * too pessimistic as it will cause every node to run for coordinator * when it starts up but is good for testing. */ FailureDetection.setParanoid(); // Set up paxos managers and apps with nio for (int i = 0; i < numNodes; i++) { System.out.println("Initiating PaxosManager at node " + members[i]); JSONNIOTransport<Integer> niot = new JSONNIOTransport<Integer>(members[i], snc, new PacketDemultiplexerDefault(), true); apps[i] = new TESTPaxosApp(niot); // app, PM reuse nio pms[i] = new PaxosManager<Integer>(members[i], snc, niot, apps[i]); } System.out.println("Initiated all " + numNodes + " paxos managers with failure detectors..\n"); /* We don't rigorously test with multiple groups as they are * independent, but this is useful for memory testing. */ int numPaxosGroups = 2; String[] names = new String[numPaxosGroups]; for (int i = 0; i < names.length; i++) names[i] = "paxos" + i; System.out.println("Creating " + numPaxosGroups + " paxos groups each with " + numNodes + " members each, one each at each of the " + numNodes + " nodes"); for (int node = 0; node < numNodes; node++) { int k = 1; for (int group = 0; group < numPaxosGroups; group++) { // creating a paxos instance may induce recovery from disk pms[node].createPaxosInstance(names[group], 0, Util.arrayToIntSet(members), apps[node], null, null, false); if (numPaxosGroups > 1000 && ((group % k == 0 && ((k *= 2) > 0)) || group % 100000 == 0)) { System.out.print(group + " "); } } System.out.println("..node" + members[node] + " done"); } Thread.sleep(1000); /* Wait for all paxos managers to finish recovery. Recovery is finished * when initiateRecovery() is complete. At this point, all the paxos * groups at that node would have also rolled forward. */ int maxRecoverySlot = -1; int maxRecoveredNode = -1; for (int i = 0; i < numNodes; i++) { while (!TESTPaxosConfig.isCrashed(members[i]) && !TESTPaxosConfig.getRecovered(members[i], names[0])) { log.info("Waiting for node " + members[i] + " to recover "); pms[i].waitToRecover(); } log.info("Node" + members[i] + " finished recovery including rollback;\n" + names[0] + " recovered at slot " + apps[i].getNumCommitted(names[0])); // need max recovery slot for names[0] below maxRecoverySlot = Math.max(maxRecoverySlot, apps[i].getNumCommitted(names[0])); maxRecoveredNode = i; } System.out.println("all nodes done creating groups."); /*********** Finished creating paxos instances for testing *****************/ /************* Begin ClientRequestTask **************************/ ScheduledExecutorService execpool = Executors.newScheduledThreadPool(5); class ClientRequestTask implements Runnable { private final RequestPacket request; private final PaxosManager<Integer> paxosManager; ClientRequestTask(RequestPacket req, PaxosManager<Integer> pm) { request = req; paxosManager = pm; } public void run() { try { JSONObject reqJson = request.toJSONObject(); JSONPacket.putPacketType(reqJson, PaxosPacketType.PAXOS_PACKET.getInt()); paxosManager.propose(request.getPaxosID(), request, null); } catch (JSONException e) { e.printStackTrace(); } } } /************* End ClientRequestTask **************************/ /* Create and schedule requests. All requests are scheduled immediately * to test concurrency */ int numRequests = 1000; RequestPacket[] reqs = new RequestPacket[numRequests]; ScheduledFuture<?>[] futures = new ScheduledFuture[numRequests]; int numExceptions = 0; double scheduledDelay = 0; for (int i = 0; i < numRequests; i++) { reqs[i] = new RequestPacket(i, "[ Sample write request numbered " + i + " ]", false); reqs[i].putPaxosID(names[0], 0); JSONObject reqJson = reqs[i].toJSONObject(); JSONPacket.putPacketType(reqJson, PaxosPacketType.PAXOS_PACKET.getInt()); try { ClientRequestTask crtask = new ClientRequestTask(reqs[i], pms[1]); futures[i] = (ScheduledFuture<?>) execpool.schedule(crtask, (long) scheduledDelay, TimeUnit.MILLISECONDS); scheduledDelay += 0; } catch (Exception e) { e.printStackTrace(); continue; } } /* Any exceptions below could occur because of exceptions inside paxos. * Scheduling a request will invoke PaxosManager.propose() that will * cause it to send the request to the corresponding * PaxosInstanceStateMachine. */ log.info("Waiting for request scheduling to complete."); for (int i = 0; i < numRequests; i++) { try { futures[i].get(); } catch (Exception e) { e.printStackTrace(); numExceptions++; } } log.info("Request scheduling complete; numExceptions=" + numExceptions); Thread.sleep(1000); /* Wait for scheduled requests to finish being processed by paxos. We * check for this by checking that at least one node has executed up to * the slot number maxRecoverySlot + numRequests. */ while (apps[maxRecoveredNode].getNumCommitted(names[0]) < maxRecoverySlot + numRequests) { apps[maxRecoveredNode].waitToFinish(); ; } log.info("Node" + maxRecoveredNode + " has executed up to slot " + (maxRecoverySlot + numRequests)); /* The code below waits for all uncrashed replicas to finish executing * up to the same slot and will then assert the SMR invariant, i.e., * they all made the same state transitions up to that slot. */ int numCommitted = 0; for (int i = 0; i < numNodes; i++) { for (int j = i + 1; j < numNodes; j++) { if (TESTPaxosConfig.isCrashed(members[i]) || TESTPaxosConfig.isCrashed(members[j])) continue; // ignore crashed nodes int committed1 = apps[i].getNumCommitted(names[0]); int committed2 = apps[j].getNumCommitted(names[0]); // Wait for the other node to catch up while (committed1 != committed2) { if (committed1 > committed2) apps[j].waitToFinish(names[0], committed1); else if (committed1 < committed2) apps[i].waitToFinish(names[0], committed2); log.info("Waiting : (slot1,hash1)=(" + committed1 + "," + apps[i].getHash(names[0]) + "(; (slot2,hash2=" + committed2 + "," + apps[j].getHash(names[0]) + ")"); Thread.sleep(1000); committed1 = apps[i].getNumCommitted(names[0]); committed2 = apps[j].getNumCommitted(names[0]); } // Both nodes caught up to the same slot assert (committed1 == committed2) : "numCommitted@" + i + "=" + committed1 + ", numCommitted@" + j + "=" + committed2; // Assert state machine replication invariant numCommitted = apps[i].getNumCommitted(names[0]); assert (apps[i].getHash(names[0]) == apps[j].getHash(names[0])) : ("Waiting : (slot1,hash1)=(" + committed1 + "," + apps[i].getHash(names[0]) + "(; (slot2,hash2=" + committed2 + "," + apps[j].getHash(names[0]) + ")"); ; // end of SMR invariant } } /* Print preempted requests if any. These could happen during * coordinator changes. Preempted requests are converted to no-ops and * forwarded to the current presumed coordinator by paxos. */ String preemptedReqs = "[ "; int numPreempted = 0; for (int i = 0; i < numRequests; i++) { if (!TESTPaxosConfig.isCommitted(reqs[i].requestID)) { preemptedReqs += (i + " "); numPreempted++; } } preemptedReqs += "]"; System.out.println("\n\nTest completed. Executed " + numCommitted + " requests consistently including " + (numRequests - numPreempted) + " of " + numRequests + " received requests;\nPreempted requests = " + preemptedReqs + "; numExceptions=" + numExceptions + "; average message log time=" + Util.df(DelayProfiler.get("logDelay")) + "ms.\n" + "\nNote that it is possible for the test to be successful even if the number of consistently\n" + "executed requests is less than the number of received requests as paxos only guarantees\n" + "consistency, i.e., that all replicas executed requests in the same order, not that all requests\n" + "issued will get executed. The latter property can be achieved by clients reissuing requests\n" + "until successfully executed. With reissuals, clients do need to worry about double execution,\n" + "so they should be careful. A client is not guaranteed to get a failure message if the request fails,\n" + "e.g., if the replica receiving a request dies immediately. If the client uses a timeout to detect\n" + "failure and thereupon reissue its request, it is possible that both the original and re-issued\n" + "requests are executed. Clients can get around this problem by using sequence numbers within\n" + "their app, reading the current sequence number, and then trying to commit their write provided the\n" + "sequence number has not changed in the meantime. There are other alternatives, but all of these\n" + "are application-specific; they are not paxos's problem\n"); for (int i = 0; i < numNodes; i++) { System.out.println(pms[i].printLog(names[0])); } execpool.shutdownNow(); for (PaxosManager<Integer> pm : pms) pm.close(); }
From source file:org.apache.nifi.controller.StandardProcessorNode.java
/** * Will idempotently start the processor using the following sequence: <i> * <ul>//from w w w . j a va2 s .c o m * <li>Validate Processor's state (e.g., PropertyDescriptors, * ControllerServices etc.)</li> * <li>Transition (atomically) Processor's scheduled state form STOPPED to * STARTING. If the above state transition succeeds, then execute the start * task (asynchronously) which will be re-tried until @OnScheduled is * executed successfully and "schedulingAgentCallback' is invoked, or until * STOP operation is initiated on this processor. If state transition fails * it means processor is already being started and WARN message will be * logged explaining it.</li> * </ul> * </i> * <p> * Any exception thrown while invoking operations annotated with @OnSchedule * will be caught and logged after which @OnUnscheduled operation will be * invoked (quietly) and the start sequence will be repeated (re-try) after * delay provided by 'administrativeYieldMillis'. * </p> * <p> * Upon successful completion of start sequence (@OnScheduled -> * 'schedulingAgentCallback') the attempt will be made to transition * processor's scheduling state to RUNNING at which point processor is * considered to be fully started and functioning. If upon successful * invocation of @OnScheduled operation the processor can not be * transitioned to RUNNING state (e.g., STOP operation was invoked on the * processor while it's @OnScheduled operation was executing), the * processor's @OnUnscheduled operation will be invoked and its scheduling * state will be set to STOPPED at which point the processor is considered * to be fully stopped. * </p> */ @Override public <T extends ProcessContext & ControllerServiceLookup> void start( final ScheduledExecutorService taskScheduler, final long administrativeYieldMillis, final T processContext, final SchedulingAgentCallback schedulingAgentCallback) { if (!this.isValid()) { throw new IllegalStateException("Processor " + this.getName() + " is not in a valid state due to " + this.getValidationErrors()); } final ComponentLog procLog = new SimpleProcessLogger(StandardProcessorNode.this.getIdentifier(), processor); if (this.scheduledState.compareAndSet(ScheduledState.STOPPED, ScheduledState.STARTING)) { // will ensure that the Processor represented by this node can only be started once final Runnable startProcRunnable = new Runnable() { @Override public void run() { try { invokeTaskAsCancelableFuture(schedulingAgentCallback, new Callable<Void>() { @Override public Void call() throws Exception { try (final NarCloseable nc = NarCloseable .withComponentNarLoader(processor.getClass(), processor.getIdentifier())) { ReflectionUtils.invokeMethodsWithAnnotation(OnScheduled.class, processor, processContext); return null; } } }); if (scheduledState.compareAndSet(ScheduledState.STARTING, ScheduledState.RUNNING)) { schedulingAgentCallback.trigger(); // callback provided by StandardProcessScheduler to essentially initiate component's onTrigger() cycle } else { // can only happen if stopProcessor was called before service was transitioned to RUNNING state try (final NarCloseable nc = NarCloseable.withComponentNarLoader(processor.getClass(), processor.getIdentifier())) { ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnUnscheduled.class, processor, processContext); } scheduledState.set(ScheduledState.STOPPED); } } catch (final Exception e) { final Throwable cause = e instanceof InvocationTargetException ? e.getCause() : e; procLog.error( "{} failed to invoke @OnScheduled method due to {}; processor will not be scheduled to run for {} seconds", new Object[] { StandardProcessorNode.this.getProcessor(), cause, administrativeYieldMillis / 1000L }, cause); LOG.error("Failed to invoke @OnScheduled method due to {}", cause.toString(), cause); ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnUnscheduled.class, processor, processContext); ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnStopped.class, processor, processContext); if (scheduledState.get() != ScheduledState.STOPPING) { // make sure we only continue retry loop if STOP action wasn't initiated taskScheduler.schedule(this, administrativeYieldMillis, TimeUnit.MILLISECONDS); } else { scheduledState.set(ScheduledState.STOPPED); } } } }; taskScheduler.execute(startProcRunnable); } else { final String procName = this.processor.getClass().getSimpleName(); LOG.warn("Can not start '" + procName + "' since it's already in the process of being started or it is DISABLED - " + scheduledState.get()); procLog.warn("Can not start '" + procName + "' since it's already in the process of being started or it is DISABLED - " + scheduledState.get()); } }