List of usage examples for java.util Queue isEmpty
boolean isEmpty();
From source file:org.jasig.portal.portlet.rendering.PortletEventCoordinatationService.java
@Override public void resolvePortletEvents(HttpServletRequest request, PortletEventQueue portletEventQueue) { final Queue<QueuedEvent> events = portletEventQueue.getUnresolvedEvents(); //Skip all processing if there are no new events. if (events.isEmpty()) { return;//from w w w.ja va 2 s .com } //Get all the portlets the user is subscribed to final IUserInstance userInstance = this.userInstanceManager.getUserInstance(request); final IUserPreferencesManager preferencesManager = userInstance.getPreferencesManager(); final IUserLayoutManager userLayoutManager = preferencesManager.getUserLayoutManager(); //Make a local copy so we can remove data from it final Set<String> allLayoutNodeIds = new LinkedHashSet<String>( userLayoutManager.getAllSubscribedChannels()); final Map<String, IPortletEntity> portletEntityCache = new LinkedHashMap<String, IPortletEntity>(); while (!events.isEmpty()) { final QueuedEvent queuedEvent = events.poll(); if (queuedEvent == null) { //no more queued events, done resolving return; } final IPortletWindowId sourceWindowId = queuedEvent.getPortletWindowId(); final Event event = queuedEvent.getEvent(); final boolean globalEvent = isGlobalEvent(request, sourceWindowId, event); final Set<IPortletDefinition> portletDefinitions = new LinkedHashSet<IPortletDefinition>(); if (globalEvent) { portletDefinitions.addAll(this.portletDefinitionRegistry.getAllPortletDefinitions()); } //Check each subscription to see what events it is registered to see for (final Iterator<String> layoutNodeIdItr = allLayoutNodeIds.iterator(); layoutNodeIdItr.hasNext();) { final String layoutNodeId = layoutNodeIdItr.next(); IPortletEntity portletEntity = portletEntityCache.get(layoutNodeId); if (portletEntity == null) { portletEntity = this.portletEntityRegistry.getOrCreatePortletEntity(request, userInstance, layoutNodeId); // if portlet entity registry returned null, then portlet has been deleted - remove it (see UP-3378) if (portletEntity == null) { layoutNodeIdItr.remove(); continue; } final IPortletDefinitionId portletDefinitionId = portletEntity.getPortletDefinitionId(); final PortletDefinition portletDescriptor = this.portletDefinitionRegistry .getParentPortletDescriptor(portletDefinitionId); if (portletDescriptor == null) { //Missconfigured portlet, remove it from the list so we don't check again and ignore it layoutNodeIdItr.remove(); continue; } final List<? extends EventDefinitionReference> supportedProcessingEvents = portletDescriptor .getSupportedProcessingEvents(); //Skip portlets that don't handle any events and remove them from the set so they are not checked again if (supportedProcessingEvents == null || supportedProcessingEvents.size() == 0) { layoutNodeIdItr.remove(); continue; } portletEntityCache.put(layoutNodeId, portletEntity); } final IPortletDefinition portletDefinition = portletEntity.getPortletDefinition(); final IPortletDefinitionId portletDefinitionId = portletDefinition.getPortletDefinitionId(); if (this.supportsEvent(event, portletDefinitionId)) { this.logger.debug("{} supports event {}", portletDefinition, event); //If this is the default portlet entity remove the definition from the all defs set to avoid duplicate processing final IPortletEntity defaultPortletEntity = this.portletEntityRegistry .getOrCreateDefaultPortletEntity(request, portletDefinitionId); if (defaultPortletEntity.equals(portletEntity)) { portletDefinitions.remove(portletDefinition); } final IPortletEntityId portletEntityId = portletEntity.getPortletEntityId(); final Set<IPortletWindow> portletWindows = this.portletWindowRegistry .getAllPortletWindowsForEntity(request, portletEntityId); for (final IPortletWindow portletWindow : portletWindows) { this.logger.debug("{} resolved target {}", event, portletWindow); final IPortletWindowId portletWindowId = portletWindow.getPortletWindowId(); final Event unmarshalledEvent = this.unmarshall(portletWindow, event); portletEventQueue.offerEvent(portletWindowId, new QueuedEvent(sourceWindowId, unmarshalledEvent)); } } else { portletDefinitions.remove(portletDefinition); } } if (!portletDefinitions.isEmpty()) { final IPerson user = userInstance.getPerson(); final EntityIdentifier ei = user.getEntityIdentifier(); final IAuthorizationPrincipal ap = AuthorizationService.instance().newPrincipal(ei.getKey(), ei.getType()); //If the event is global there might still be portlet definitions that need targeting for (final IPortletDefinition portletDefinition : portletDefinitions) { final IPortletDefinitionId portletDefinitionId = portletDefinition.getPortletDefinitionId(); //Check if the user can render the portlet definition before doing event tests if (ap.canRender(portletDefinitionId.getStringId())) { if (this.supportsEvent(event, portletDefinitionId)) { this.logger.debug("{} supports event {}", portletDefinition, event); final IPortletEntity portletEntity = this.portletEntityRegistry .getOrCreateDefaultPortletEntity(request, portletDefinitionId); final IPortletEntityId portletEntityId = portletEntity.getPortletEntityId(); final Set<IPortletWindow> portletWindows = this.portletWindowRegistry .getAllPortletWindowsForEntity(request, portletEntityId); for (final IPortletWindow portletWindow : portletWindows) { this.logger.debug("{} resolved target {}", event, portletWindow); final IPortletWindowId portletWindowId = portletWindow.getPortletWindowId(); final Event unmarshalledEvent = this.unmarshall(portletWindow, event); portletEventQueue.offerEvent(portletWindowId, new QueuedEvent(sourceWindowId, unmarshalledEvent)); } } } } } } }
From source file:com.liveramp.hank.partition_server.UpdateManager.java
@Override public void update() throws IOException { HankTimer timer = new HankTimer(); try {/* www .j ava 2s.c o m*/ // Delete unknown files deleteUnknownFiles(); // Perform update Semaphore concurrentUpdatesSemaphore = new Semaphore(configurator.getNumConcurrentUpdates()); List<Throwable> encounteredThrowables = new ArrayList<Throwable>(); PartitionUpdateTaskStatisticsAggregator partitionUpdateTaskStatisticsAggregator = new PartitionUpdateTaskStatisticsAggregator(); Map<String, Queue<PartitionUpdateTask>> dataDirectoryToUpdateTasks = new HashMap<String, Queue<PartitionUpdateTask>>(); List<PartitionUpdateTask> allUpdateTasks = buildPartitionUpdateTasks( partitionUpdateTaskStatisticsAggregator, encounteredThrowables); // Build and organize update tasks per data directory for (PartitionUpdateTask updateTask : allUpdateTasks) { String dataDirectory = updateTask.getDataDirectory(); Queue<PartitionUpdateTask> updateTasks = dataDirectoryToUpdateTasks.get(dataDirectory); if (updateTasks == null) { updateTasks = new LinkedList<PartitionUpdateTask>(); dataDirectoryToUpdateTasks.put(dataDirectory, updateTasks); } updateTasks.add(updateTask); } // Logging LOG.info("Number of update tasks: " + allUpdateTasks.size()); for (Map.Entry<String, Queue<PartitionUpdateTask>> entry : dataDirectoryToUpdateTasks.entrySet()) { LOG.info("Number of update tasks scheduled in " + entry.getKey() + ": " + entry.getValue().size()); } // Build executor services Map<String, ExecutorService> dataDirectoryToExecutorService = new HashMap<String, ExecutorService>(); for (String dataDirectory : dataDirectoryToUpdateTasks.keySet()) { dataDirectoryToExecutorService.put(dataDirectory, new UpdateThreadPoolExecutor(configurator.getMaxConcurrentUpdatesPerDataDirectory(), new UpdaterThreadFactory(dataDirectory), concurrentUpdatesSemaphore)); } LOG.info("Submitting update tasks for " + dataDirectoryToUpdateTasks.size() + " directories."); // Execute tasks. We execute one task for each data directory and loop around so that the tasks // attempt to acquire the semaphore in a reasonable order. boolean remaining = true; while (remaining) { remaining = false; for (Map.Entry<String, Queue<PartitionUpdateTask>> entry : dataDirectoryToUpdateTasks.entrySet()) { // Pop next task Queue<PartitionUpdateTask> partitionUpdateTasks = entry.getValue(); if (!partitionUpdateTasks.isEmpty()) { PartitionUpdateTask partitionUpdateTask = partitionUpdateTasks.remove(); // Execute task dataDirectoryToExecutorService.get(entry.getKey()).execute(partitionUpdateTask); } if (!partitionUpdateTasks.isEmpty()) { remaining = true; } } } LOG.info("All update tasks submitted, shutting down executor services"); // Shutdown executors for (ExecutorService executorService : dataDirectoryToExecutorService.values()) { executorService.shutdown(); } LOG.info("Waiting for executors to finish."); // Wait for executors to finish for (Map.Entry<String, ExecutorService> entry : dataDirectoryToExecutorService.entrySet()) { String directory = entry.getKey(); ExecutorService executorService = entry.getValue(); boolean keepWaiting = true; while (keepWaiting) { try { LOG.info("Waiting for updates to complete on data directory: " + directory); boolean terminated = executorService.awaitTermination( UPDATE_EXECUTOR_TERMINATION_CHECK_TIMEOUT_VALUE, UPDATE_EXECUTOR_TERMINATION_CHECK_TIMEOUT_UNIT); if (terminated) { // We finished executing all tasks // Otherwise, timeout elapsed and current thread was not interrupted. Keep waiting. LOG.info("Finished updates for directory: " + directory); keepWaiting = false; } // Record update ETA Hosts.setUpdateETA(host, partitionUpdateTaskStatisticsAggregator.computeETA()); } catch (InterruptedException e) { // Received interruption (stop request). // Swallow the interrupted state and ask the executor to shutdown immediately. Also, keep waiting. LOG.info( "The update manager was interrupted. Stopping the update process (stop executing new partition update tasks" + " and wait for those that were running to finish)."); // Shutdown all executors for (ExecutorService otherExecutorService : dataDirectoryToExecutorService.values()) { otherExecutorService.shutdownNow(); } // Record failed update exception (we need to keep waiting) encounteredThrowables.add( new IOException("Failed to complete update: update interruption was requested.")); } } } LOG.info("All executors have finished updates"); // Shutdown all executors for (ExecutorService executorService : dataDirectoryToExecutorService.values()) { executorService.shutdownNow(); } LOG.info("Finished with " + encounteredThrowables.size() + " errors."); // Detect failures if (!encounteredThrowables.isEmpty()) { LOG.error(String.format("%d exceptions encountered while running partition update tasks:", encounteredThrowables.size())); int i = 0; for (Throwable t : encounteredThrowables) { LOG.error(String.format("Exception %d/%d:", ++i, encounteredThrowables.size()), t); } throw new IOException(String.format( "Failed to complete update: %d exceptions encountered while running partition update tasks.", encounteredThrowables.size())); } // Garbage collect useless host domains garbageCollectHostDomains(host); // Log statistics partitionUpdateTaskStatisticsAggregator.logStats(); } catch (IOException e) { LOG.info("Update failed and took " + FormatUtils.formatSecondsDuration(timer.getDurationMs() / 1000)); throw e; } LOG.info("Update succeeded and took " + FormatUtils.formatSecondsDuration(timer.getDurationMs() / 1000)); }
From source file:org.aksw.simba.cetus.yago.YagoBasedTypeSearcher.java
protected void searchDolceSuperClasses(Set<Resource> types) { Queue<Resource> queue = new LinkedList<Resource>(types); Resource classResource, superClass; RDFNode node;// ww w .j a va2 s.co m NodeIterator nodeIterator; Set<Resource> yagoSuperClasses = new HashSet<Resource>(); Set<Resource> dolceSuperClasses = new HashSet<Resource>(); boolean dolceClassFound = false; while (!queue.isEmpty()) { classResource = queue.poll(); // If this resource is a DOLCE resource if (dolceClassModel.containsResource(classResource)) { dolceClassFound = true; } else { nodeIterator = classesModel.listObjectsOfProperty(classResource, RDFS.subClassOf); yagoSuperClasses.clear(); dolceSuperClasses.clear(); while (nodeIterator.hasNext()) { node = nodeIterator.next(); if (node.isResource()) { superClass = node.asResource(); if (dolceClassModel.containsResource(superClass)) { dolceSuperClasses.add(superClass); } else { yagoSuperClasses.add(superClass); } } else { LOGGER.error("Expected a resource in the statement (" + classResource + ", rdfs:subClassOf, " + node + "). Ignoring this statement."); } } // If a DOLCE class has been found if (dolceSuperClasses.size() > 0) { // add only the DOLCE classes and discard all others types.addAll(dolceSuperClasses); dolceClassFound = true; if (LOGGER.isDebugEnabled()) { LOGGER.debug("Added super classes of " + classResource.getURI() + " --> " + Arrays.toString(dolceSuperClasses.toArray())); } } else { for (Resource r : yagoSuperClasses) { // If they have not been found before (and already have // been // added to the queue) if (!types.contains(r)) { types.add(r); queue.add(r); } if (LOGGER.isDebugEnabled()) { LOGGER.debug("Added super classes of " + classResource.getURI() + " --> " + Arrays.toString(yagoSuperClasses.toArray())); } } } } } if (!dolceClassFound) { LOGGER.warn("Couldn't find a DOLCE class for the following list of types: " + Arrays.toString(types.toArray())); } }
From source file:org.apache.gobblin.example.wikipedia.WikipediaExtractor.java
private long createLowWatermarkForBootstrap(WorkUnitState state) throws IOException { String bootstrapPeriodString = state.getProp(BOOTSTRAP_PERIOD, DEFAULT_BOOTSTRAP_PERIOD); Period period = Period.parse(bootstrapPeriodString); DateTime startTime = DateTime.now().minus(period); try {// www . j a v a2 s .c o m Queue<JsonElement> firstRevision = retrievePageRevisions(ImmutableMap.<String, String>builder() .putAll(this.baseQuery).put("rvprop", "ids").put("titles", this.requestedTitle) .put("rvlimit", "1").put("rvstart", WIKIPEDIA_TIMESTAMP_FORMAT.print(startTime)) .put("rvdir", "newer").build()); if (firstRevision.isEmpty()) { throw new IOException("Could not retrieve oldest revision, returned empty revisions list."); } return parseRevision(firstRevision.poll()); } catch (URISyntaxException use) { throw new IOException(use); } }
From source file:org.dkpro.lab.engine.impl.BatchTaskEngine.java
/** * Locate the latest task execution compatible with the given task configuration. * //from ww w. ja v a2 s .com * @param aContext * the context of the current batch task. * @param aConfig * the current parameter configuration. * @param aExecutedSubtasks * already executed subtasks. */ protected void executeConfiguration(BatchTask aConfiguration, TaskContext aContext, Map<String, Object> aConfig, Set<String> aExecutedSubtasks) throws ExecutionException, LifeCycleException { if (log.isTraceEnabled()) { // Show all subtasks executed so far for (String est : aExecutedSubtasks) { log.trace("-- Already executed: " + est); } } // Set up initial scope used by sub-batch-tasks using the inherited scope. The scope is // extended as the subtasks of this batch are executed with the present configuration. // FIXME: That means that sub-batch-tasks in two different configurations cannot see // each other. Is that intended? Mind that the "executedSubtasks" set is intentionally // maintained *across* configurations, so maybe the scope should also be maintained // *across* configurations? - REC 2014-06-15 Set<String> scope = new HashSet<String>(); if (aConfiguration.getScope() != null) { scope.addAll(aConfiguration.getScope()); } // Configure subtasks for (Task task : aConfiguration.getTasks()) { aContext.getLifeCycleManager().configure(aContext, task, aConfig); } Queue<Task> queue = new LinkedList<Task>(aConfiguration.getTasks()); Set<Task> loopDetection = new HashSet<Task>(); List<UnresolvedImportException> deferralReasons = new ArrayList<UnresolvedImportException>(); while (!queue.isEmpty()) { Task task = queue.poll(); try { // Check if a subtask execution compatible with the present configuration has // does already exist ... TaskContextMetadata execution = getExistingExecution(aConfiguration, aContext, task, aConfig, aExecutedSubtasks); if (execution == null) { // ... otherwise execute it with the present configuration log.info("Executing task [" + task.getType() + "]"); // set scope here so that the inherited scopes are considered // set scope here so that tasks added to scope in this loop are considered if (task instanceof BatchTask) { ((BatchTask) task).setScope(scope); } execution = runNewExecution(aContext, task, aConfig, aExecutedSubtasks); } else { log.debug("Using existing execution [" + execution.getId() + "]"); } // Record new/existing execution aExecutedSubtasks.add(execution.getId()); scope.add(execution.getId()); loopDetection.clear(); deferralReasons.clear(); } catch (UnresolvedImportException e) { // Add task back to queue log.debug("Deferring execution of task [" + task.getType() + "]: " + e.getMessage()); queue.add(task); // Detect endless loop if (loopDetection.contains(task)) { StringBuilder details = new StringBuilder(); for (UnresolvedImportException r : deferralReasons) { details.append("\n -"); details.append(r.getMessage()); } // throw an UnresolvedImportException in case there is an outer BatchTask which needs to be executed first throw new UnresolvedImportException(e, details.toString()); } // Record failed execution loopDetection.add(task); deferralReasons.add(e); } } }
From source file:org.phenotips.data.permissions.internal.DefaultPatientAccessHelper.java
@Override public AccessLevel getAccessLevel(Patient patient, EntityReference user) { AccessLevel result = this.manager.resolveAccessLevel("none"); if (patient == null || user == null) { return result; }//www. j a va 2 s . c o m try { EntityReference owner = getOwner(patient).getUser(); Collection<Collaborator> collaborators = getCollaborators(patient); Set<DocumentReference> processedEntities = new HashSet<DocumentReference>(); Queue<DocumentReference> entitiesToCheck = new LinkedList<DocumentReference>(); entitiesToCheck.add((DocumentReference) user); AccessLevel currentItemAccess = null; DocumentReference currentItem; XWikiContext context = getXWikiContext(); XWikiGroupService groupService = context.getWiki().getGroupService(context); while (!entitiesToCheck.isEmpty()) { currentItem = entitiesToCheck.poll(); currentItemAccess = getAccessLevel(currentItem, owner, collaborators); if (currentItemAccess.compareTo(result) > 0) { result = currentItemAccess; } processedEntities.add(currentItem); Collection<DocumentReference> groups = groupService.getAllGroupsReferencesForMember(currentItem, 0, 0, context); groups.removeAll(processedEntities); entitiesToCheck.addAll(groups); } } catch (XWikiException ex) { this.logger.warn("Failed to compute access level for [{}] on [{}]: {}", user, patient.getId(), ex.getMessage()); } return result; }
From source file:org.apereo.portal.portlet.rendering.PortletEventCoordinatationService.java
@Override public void resolvePortletEvents(HttpServletRequest request, PortletEventQueue portletEventQueue) { final Queue<QueuedEvent> events = portletEventQueue.getUnresolvedEvents(); //Skip all processing if there are no new events. if (events.isEmpty()) { return;/* w ww . j av a 2 s .co m*/ } //Get all the portlets the user is subscribed to final IUserInstance userInstance = this.userInstanceManager.getUserInstance(request); final IUserPreferencesManager preferencesManager = userInstance.getPreferencesManager(); final IUserLayoutManager userLayoutManager = preferencesManager.getUserLayoutManager(); //Make a local copy so we can remove data from it final Set<String> allLayoutNodeIds = new LinkedHashSet<String>( userLayoutManager.getAllSubscribedChannels()); final Map<String, IPortletEntity> portletEntityCache = new LinkedHashMap<String, IPortletEntity>(); while (!events.isEmpty()) { final QueuedEvent queuedEvent = events.poll(); if (queuedEvent == null) { //no more queued events, done resolving return; } final IPortletWindowId sourceWindowId = queuedEvent.getPortletWindowId(); final Event event = queuedEvent.getEvent(); final boolean globalEvent = isGlobalEvent(request, sourceWindowId, event); final Set<IPortletDefinition> portletDefinitions = new LinkedHashSet<IPortletDefinition>(); if (globalEvent) { portletDefinitions.addAll(this.portletDefinitionRegistry.getAllPortletDefinitions()); } //Check each subscription to see what events it is registered to see for (final Iterator<String> layoutNodeIdItr = allLayoutNodeIds.iterator(); layoutNodeIdItr.hasNext();) { final String layoutNodeId = layoutNodeIdItr.next(); IPortletEntity portletEntity = portletEntityCache.get(layoutNodeId); if (portletEntity == null) { portletEntity = this.portletEntityRegistry.getOrCreatePortletEntity(request, userInstance, layoutNodeId); // if portlet entity registry returned null, then portlet has been deleted - remove it (see UP-3378) if (portletEntity == null) { layoutNodeIdItr.remove(); continue; } final IPortletDefinitionId portletDefinitionId = portletEntity.getPortletDefinitionId(); final PortletDefinition portletDescriptor = this.portletDefinitionRegistry .getParentPortletDescriptor(portletDefinitionId); if (portletDescriptor == null) { //Missconfigured portlet, remove it from the list so we don't check again and ignore it layoutNodeIdItr.remove(); continue; } final List<? extends EventDefinitionReference> supportedProcessingEvents = portletDescriptor .getSupportedProcessingEvents(); //Skip portlets that don't handle any events and remove them from the set so they are not checked again if (supportedProcessingEvents == null || supportedProcessingEvents.size() == 0) { layoutNodeIdItr.remove(); continue; } portletEntityCache.put(layoutNodeId, portletEntity); } final IPortletDefinition portletDefinition = portletEntity.getPortletDefinition(); final IPortletDefinitionId portletDefinitionId = portletDefinition.getPortletDefinitionId(); if (this.supportsEvent(event, portletDefinitionId)) { this.logger.debug("{} supports event {}", portletDefinition, event); //If this is the default portlet entity remove the definition from the all defs set to avoid duplicate processing final IPortletEntity defaultPortletEntity = this.portletEntityRegistry .getOrCreateDefaultPortletEntity(request, portletDefinitionId); if (defaultPortletEntity.equals(portletEntity)) { portletDefinitions.remove(portletDefinition); } // Is this portlet permitted to receive events? (Or is it disablePortletEvents=true?) IPortletDefinitionParameter disablePortletEvents = portletDefinition .getParameter(PortletExecutionManager.DISABLE_PORTLET_EVENTS_PARAMETER); if (disablePortletEvents != null && Boolean.parseBoolean(disablePortletEvents.getValue())) { logger.info("Ignoring portlet events for portlet '{}' because they have been disabled.", portletDefinition.getFName()); continue; } final IPortletEntityId portletEntityId = portletEntity.getPortletEntityId(); final Set<IPortletWindow> portletWindows = this.portletWindowRegistry .getAllPortletWindowsForEntity(request, portletEntityId); for (final IPortletWindow portletWindow : portletWindows) { this.logger.debug("{} resolved target {}", event, portletWindow); final IPortletWindowId portletWindowId = portletWindow.getPortletWindowId(); final Event unmarshalledEvent = this.unmarshall(portletWindow, event); portletEventQueue.offerEvent(portletWindowId, new QueuedEvent(sourceWindowId, unmarshalledEvent)); } } else { portletDefinitions.remove(portletDefinition); } } if (!portletDefinitions.isEmpty()) { final IPerson user = userInstance.getPerson(); final EntityIdentifier ei = user.getEntityIdentifier(); final IAuthorizationPrincipal ap = AuthorizationService.instance().newPrincipal(ei.getKey(), ei.getType()); //If the event is global there might still be portlet definitions that need targeting for (final IPortletDefinition portletDefinition : portletDefinitions) { // Is this portlet permitted to receive events? (Or is it disablePortletEvents=true?) IPortletDefinitionParameter disablePortletEvents = portletDefinition .getParameter(PortletExecutionManager.DISABLE_PORTLET_EVENTS_PARAMETER); if (disablePortletEvents != null && Boolean.parseBoolean(disablePortletEvents.getValue())) { logger.info("Ignoring portlet events for portlet '{}' because they have been disabled.", portletDefinition.getFName()); continue; } final IPortletDefinitionId portletDefinitionId = portletDefinition.getPortletDefinitionId(); //Check if the user can render the portlet definition before doing event tests if (ap.canRender(portletDefinitionId.getStringId())) { if (this.supportsEvent(event, portletDefinitionId)) { this.logger.debug("{} supports event {}", portletDefinition, event); final IPortletEntity portletEntity = this.portletEntityRegistry .getOrCreateDefaultPortletEntity(request, portletDefinitionId); final IPortletEntityId portletEntityId = portletEntity.getPortletEntityId(); final Set<IPortletWindow> portletWindows = this.portletWindowRegistry .getAllPortletWindowsForEntity(request, portletEntityId); for (final IPortletWindow portletWindow : portletWindows) { this.logger.debug("{} resolved target {}", event, portletWindow); final IPortletWindowId portletWindowId = portletWindow.getPortletWindowId(); final Event unmarshalledEvent = this.unmarshall(portletWindow, event); portletEventQueue.offerEvent(portletWindowId, new QueuedEvent(sourceWindowId, unmarshalledEvent)); } } } } } } }
From source file:org.cleverbus.component.externalcall.ExternalCallComponentTest.java
private boolean sendAndVerifyBatch(Message[] messages) throws Exception { boolean lockFailureEncountered = false; HashMap<Message, Future<String>> replies = new HashMap<Message, Future<String>>(); // send messages that have no reply, resend messages that have LockFailureException instead of a reply // verify results and re-send failures - test has timeout set because this is potentially endless Queue<Message> unverifiedMessages = new LinkedList<Message>(Arrays.asList(messages)); while (!unverifiedMessages.isEmpty()) { Message message = unverifiedMessages.poll(); boolean replyAvailable = replies.containsKey(message); if (replyAvailable) { Future<String> reply = replies.get(message); try { reply.get(); // this will throw an exception if it occurred during processing } catch (Exception exc) { if (ExceptionUtils.indexOfType(exc, LockFailureException.class) != -1) { // expected cause - this test verifies that this scenario happens and is handled properly lockFailureEncountered = true; replyAvailable = false; // mark reply unavailable to resend the original message } else { // fail by rethrowing Log.error("Unexpected failure for message {} --", message, exc); throw exc; }/*from ww w. j a v a 2 s .c o m*/ } } if (!replyAvailable) { unverifiedMessages.add(message); // mark message as still unverified replies.put(message, requestViaExternalCallAsync(message, "mock:test", "concurrentKey", "external call original body")); } } // check the call is now in DB as OK and with the correct LAST msg timestamp assertExtCallStateInDB(extCallId, ExternalCallStateEnum.OK, messages[messages.length - 1]); return lockFailureEncountered; }
From source file:org.rhq.core.plugin.testutil.AbstractAgentPluginTest.java
/** * Test that executes all the no arg operations for all the subresources of a provided resource. * Notes:/*w w w .jav a 2s.c o m*/ * 1) no operations are executed on the root resource provided. * 2) if a resource is ignored then all of subresource of that resources are ignored * * @param rootResource root resource * @param ignoredResources resources to be ignored * @param ignoredOperations operations to be ignored * @throws PluginContainerException */ protected void executeNoArgOperations(Resource rootResource, List<String> ignoredResources, List<String> ignoredOperations) throws PluginContainerException { ignoredResources = (ignoredResources == null) ? new ArrayList<String>() : ignoredResources; ignoredOperations = (ignoredOperations == null) ? new ArrayList<String>() : ignoredOperations; Queue<Resource> unparsedResources = new LinkedList<Resource>(); addCommitedChildrenToCollection(unparsedResources, rootResource, ignoredResources); while (!unparsedResources.isEmpty()) { Resource resourceUnderTest = unparsedResources.poll(); addCommitedChildrenToCollection(unparsedResources, resourceUnderTest, ignoredResources); for (OperationDefinition operationUnderTest : resourceUnderTest.getResourceType() .getOperationDefinitions()) { if (!ignoredOperations.contains(operationUnderTest.getName())) { if (operationUnderTest.getParametersConfigurationDefinition() == null || operationUnderTest .getParametersConfigurationDefinition().getPropertyDefinitions().isEmpty()) { this.invokeOperationAndAssertSuccess(resourceUnderTest, operationUnderTest.getName(), new Configuration()); } } } } }
From source file:org.openhab.binding.networkhealth.discovery.NetworkHealthDiscoveryService.java
/** * Starts the DiscoveryThread for each IP on the Networks * @param allNetworkIPs/*from w w w .ja v a2 s. c om*/ */ private void startDiscovery(final Queue<String> networkIPs) { Runnable runnable = new Runnable() { public void run() { DiscoveryThread discoveryThread = null; DiscoveryThreadResult discoveryThreadResult = new DiscoveryThreadResult() { @Override public void newDevice(String ip) { submitDiscoveryResults(ip); } }; // ensures that only one thread at a time access the queue synchronized (lockObject) { if (networkIPs.isEmpty()) { discoveryJob.cancel(false); } else { discoveryThread = new DiscoveryThread(networkIPs.remove(), discoveryThreadResult); } } if (discoveryThread != null) discoveryThread.start(); } }; /* Every milisecond a new thread will be created. Due to the fact that the PING has a timeout of 1 sec, * only about 1000 Threads will be create at max */ discoveryJob = scheduler.scheduleAtFixedRate(runnable, 0, TASK_CREATING_TIME_IN_MS, TimeUnit.MILLISECONDS); }