List of usage examples for java.util Queue add
boolean add(E e);
From source file:org.codice.ddf.spatial.ogc.csw.catalog.endpoint.CswEndpointTest.java
private List<QueryResponse> getQueryResponseBatch(int batchSize, int total) { Queue<Result> results = new ArrayDeque<>(); for (int i = 1; i <= total; i++) { MetacardImpl metacard = new MetacardImpl(); metacard.setId(i + ""); results.add(new ResultImpl(metacard)); }//w w w . j a v a 2 s .c o m List<QueryResponse> queryResponses = new ArrayList<>(); while (!results.isEmpty()) { List<Result> batchList = new ArrayList<>(); for (int i = 0; i < batchSize; i++) { Result result = results.poll(); if (result == null) { break; } batchList.add(result); } queryResponses.add(new QueryResponseImpl(null, batchList, total)); } // Add one empty response list to the end queryResponses.add(new QueryResponseImpl(null, Collections.emptyList(), 0)); return queryResponses; }
From source file:com.epam.reportportal.apache.http.impl.client.AuthenticationStrategyImpl.java
public Queue<AuthOption> select(final Map<String, Header> challenges, final HttpHost authhost, final HttpResponse response, final HttpContext context) throws MalformedChallengeException { Args.notNull(challenges, "Map of auth challenges"); Args.notNull(authhost, "Host"); Args.notNull(response, "HTTP response"); Args.notNull(context, "HTTP context"); final HttpClientContext clientContext = HttpClientContext.adapt(context); final Queue<AuthOption> options = new LinkedList<AuthOption>(); final Lookup<AuthSchemeProvider> registry = clientContext.getAuthSchemeRegistry(); if (registry == null) { this.log.debug("Auth scheme registry not set in the context"); return options; }/* ww w . j a v a 2s . co m*/ final CredentialsProvider credsProvider = clientContext.getCredentialsProvider(); if (credsProvider == null) { this.log.debug("Credentials provider not set in the context"); return options; } final RequestConfig config = clientContext.getRequestConfig(); Collection<String> authPrefs = getPreferredAuthSchemes(config); if (authPrefs == null) { authPrefs = DEFAULT_SCHEME_PRIORITY; } if (this.log.isDebugEnabled()) { this.log.debug("Authentication schemes in the order of preference: " + authPrefs); } for (final String id : authPrefs) { final Header challenge = challenges.get(id.toLowerCase(Locale.US)); if (challenge != null) { final AuthSchemeProvider authSchemeProvider = registry.lookup(id); if (authSchemeProvider == null) { if (this.log.isWarnEnabled()) { this.log.warn("Authentication scheme " + id + " not supported"); // Try again } continue; } final AuthScheme authScheme = authSchemeProvider.create(context); authScheme.processChallenge(challenge); final AuthScope authScope = new AuthScope(authhost.getHostName(), authhost.getPort(), authScheme.getRealm(), authScheme.getSchemeName()); final Credentials credentials = credsProvider.getCredentials(authScope); if (credentials != null) { options.add(new AuthOption(authScheme, credentials)); } } else { if (this.log.isDebugEnabled()) { this.log.debug("Challenge for " + id + " authentication scheme not available"); // Try again } } } return options; }
From source file:org.apache.http.HC4.impl.client.AuthenticationStrategyImpl.java
@Override public Queue<AuthOption> select(final Map<String, Header> challenges, final HttpHost authhost, final HttpResponse response, final HttpContext context) throws MalformedChallengeException { Args.notNull(challenges, "Map of auth challenges"); Args.notNull(authhost, "Host"); Args.notNull(response, "HTTP response"); Args.notNull(context, "HTTP context"); final HttpClientContext clientContext = HttpClientContext.adapt(context); final Queue<AuthOption> options = new LinkedList<AuthOption>(); final Lookup<AuthSchemeProvider> registry = clientContext.getAuthSchemeRegistry(); if (registry == null) { this.log.debug("Auth scheme registry not set in the context"); return options; }/*from w ww . j ava2 s . c o m*/ final CredentialsProvider credsProvider = clientContext.getCredentialsProvider(); if (credsProvider == null) { this.log.debug("Credentials provider not set in the context"); return options; } final RequestConfig config = clientContext.getRequestConfig(); Collection<String> authPrefs = getPreferredAuthSchemes(config); if (authPrefs == null) { authPrefs = DEFAULT_SCHEME_PRIORITY; } if (this.log.isDebugEnabled()) { this.log.debug("Authentication schemes in the order of preference: " + authPrefs); } for (final String id : authPrefs) { final Header challenge = challenges.get(id.toLowerCase(Locale.ROOT)); if (challenge != null) { final AuthSchemeProvider authSchemeProvider = registry.lookup(id); if (authSchemeProvider == null) { if (this.log.isWarnEnabled()) { this.log.warn("Authentication scheme " + id + " not supported"); // Try again } continue; } final AuthScheme authScheme = authSchemeProvider.create(context); authScheme.processChallenge(challenge); final AuthScope authScope = new AuthScope(authhost.getHostName(), authhost.getPort(), authScheme.getRealm(), authScheme.getSchemeName()); final Credentials credentials = credsProvider.getCredentials(authScope); if (credentials != null) { options.add(new AuthOption(authScheme, credentials)); } } else { if (this.log.isDebugEnabled()) { this.log.debug("Challenge for " + id + " authentication scheme not available"); // Try again } } } return options; }
From source file:io.cloudslang.lang.tools.build.verifier.SlangContentVerifier.java
public PreCompileResult createModelsAndValidate(String directoryPath, boolean shouldValidateDescription, boolean shouldValidateCheckstyle) { Validate.notEmpty(directoryPath, "You must specify a path"); Validate.isTrue(new File(directoryPath).isDirectory(), "Directory path argument \'" + directoryPath + "\' does not lead to a directory"); Map<String, Executable> slangModels = new HashMap<>(); Collection<File> slangFiles = slangCompilationService.listSlangFiles(new File(directoryPath), true); loggingService.logEvent(Level.INFO, "Start compiling all slang files under: " + directoryPath); loggingService.logEvent(Level.INFO, slangFiles.size() + " .sl files were found"); loggingService.logEvent(Level.INFO, ""); Queue<RuntimeException> exceptions = new ArrayDeque<>(); String errorMessagePrefixMetadata = ""; for (File slangFile : slangFiles) { Executable sourceModel = null; try {/*from www .java 2 s.c o m*/ errorMessagePrefixMetadata = "Failed to extract metadata for file: \'" + slangFile.getAbsoluteFile() + "\'.\n"; String errorMessagePrefixCompilation = "Failed to compile file: \'" + slangFile.getAbsoluteFile() + "\'.\n"; Validate.isTrue(slangFile.isFile(), "file path \'" + slangFile.getAbsolutePath() + "\' must lead to a file"); SlangSource slangSource = SlangSource.fromFile(slangFile); ExecutableModellingResult preCompileResult = slangCompiler.preCompileSource(slangSource); sourceModel = preCompileResult.getExecutable(); exceptions.addAll(prependPrefix(preCompileResult.getErrors(), errorMessagePrefixCompilation)); MetadataModellingResult metadataResult = metadataExtractor .extractMetadataModellingResult(slangSource, shouldValidateCheckstyle); Metadata sourceMetadata = metadataResult.getMetadata(); exceptions.addAll(prependPrefix(metadataResult.getErrors(), errorMessagePrefixMetadata)); if (sourceModel != null) { int size = exceptions.size(); staticValidator.validateSlangFile(slangFile, sourceModel, sourceMetadata, shouldValidateDescription, exceptions); if (size == exceptions.size()) { slangModels.put(getUniqueName(sourceModel), sourceModel); } } } catch (Exception e) { String errorMessage = errorMessagePrefixMetadata + e.getMessage(); loggingService.logEvent(Level.ERROR, errorMessage); exceptions.add(new RuntimeException(errorMessage, e)); if (e instanceof MetadataMissingException && sourceModel != null) { slangModels.put(getUniqueName(sourceModel), sourceModel); } } } if (slangFiles.size() != slangModels.size()) { exceptions.add(new RuntimeException("Some Slang files were not pre-compiled.\nFound: " + slangFiles.size() + " executable files in path: \'" + directoryPath + "\' But managed to create slang models for only: " + slangModels.size())); } PreCompileResult preCompileResult = new PreCompileResult(); preCompileResult.addExceptions(exceptions); preCompileResult.addResults(slangModels); return preCompileResult; }
From source file:com.mirth.connect.plugins.datapruner.DataPruner.java
private Queue<PrunerTask> buildTaskQueue() throws Exception { List<Channel> channels = com.mirth.connect.server.controllers.ChannelController.getInstance() .getChannels(null);// w w w. ja va 2 s. c o m Queue<PrunerTask> queue = new LinkedList<PrunerTask>(); for (Channel channel : channels) { if (!(channel instanceof InvalidChannel)) { ChannelProperties properties = channel.getProperties(); Integer pruneMetaDataDays = properties.getPruneMetaDataDays(); Integer pruneContentDays = properties.getPruneContentDays(); Calendar contentDateThreshold = null; Calendar messageDateThreshold = null; switch (channel.getProperties().getMessageStorageMode()) { case DEVELOPMENT: case PRODUCTION: case RAW: if (pruneContentDays != null) { contentDateThreshold = Calendar.getInstance(); contentDateThreshold.set(Calendar.DAY_OF_MONTH, contentDateThreshold.get(Calendar.DAY_OF_MONTH) - pruneContentDays); } case METADATA: if (pruneMetaDataDays != null) { messageDateThreshold = Calendar.getInstance(); messageDateThreshold.set(Calendar.DAY_OF_MONTH, messageDateThreshold.get(Calendar.DAY_OF_MONTH) - pruneMetaDataDays); } if (messageDateThreshold != null || contentDateThreshold != null) { queue.add(new PrunerTask(channel.getId(), channel.getName(), messageDateThreshold, contentDateThreshold, channel.getProperties().isArchiveEnabled())); status.getPendingChannelIds().add(channel.getId()); } break; case DISABLED: break; default: String errorMessage = "Unrecognized message storage mode: " + properties.getMessageStorageMode().toString(); logger.error(errorMessage); Map<String, String> attributes = new HashMap<String, String>(); attributes.put("Channel", channel.getName()); attributes.put("Error", errorMessage); eventController.dispatchEvent(new ServerEvent(serverId, DataPrunerService.PLUGINPOINT, Level.ERROR, Outcome.FAILURE, attributes)); break; } } } return queue; }
From source file:org.dkpro.lab.engine.impl.BatchTaskEngine.java
/** * Locate the latest task execution compatible with the given task configuration. * /* ww w . ja v a2s. co m*/ * @param aContext * the context of the current batch task. * @param aConfig * the current parameter configuration. * @param aExecutedSubtasks * already executed subtasks. */ protected void executeConfiguration(BatchTask aConfiguration, TaskContext aContext, Map<String, Object> aConfig, Set<String> aExecutedSubtasks) throws ExecutionException, LifeCycleException { if (log.isTraceEnabled()) { // Show all subtasks executed so far for (String est : aExecutedSubtasks) { log.trace("-- Already executed: " + est); } } // Set up initial scope used by sub-batch-tasks using the inherited scope. The scope is // extended as the subtasks of this batch are executed with the present configuration. // FIXME: That means that sub-batch-tasks in two different configurations cannot see // each other. Is that intended? Mind that the "executedSubtasks" set is intentionally // maintained *across* configurations, so maybe the scope should also be maintained // *across* configurations? - REC 2014-06-15 Set<String> scope = new HashSet<String>(); if (aConfiguration.getScope() != null) { scope.addAll(aConfiguration.getScope()); } // Configure subtasks for (Task task : aConfiguration.getTasks()) { aContext.getLifeCycleManager().configure(aContext, task, aConfig); } Queue<Task> queue = new LinkedList<Task>(aConfiguration.getTasks()); Set<Task> loopDetection = new HashSet<Task>(); List<UnresolvedImportException> deferralReasons = new ArrayList<UnresolvedImportException>(); while (!queue.isEmpty()) { Task task = queue.poll(); try { // Check if a subtask execution compatible with the present configuration has // does already exist ... TaskContextMetadata execution = getExistingExecution(aConfiguration, aContext, task, aConfig, aExecutedSubtasks); if (execution == null) { // ... otherwise execute it with the present configuration log.info("Executing task [" + task.getType() + "]"); // set scope here so that the inherited scopes are considered // set scope here so that tasks added to scope in this loop are considered if (task instanceof BatchTask) { ((BatchTask) task).setScope(scope); } execution = runNewExecution(aContext, task, aConfig, aExecutedSubtasks); } else { log.debug("Using existing execution [" + execution.getId() + "]"); } // Record new/existing execution aExecutedSubtasks.add(execution.getId()); scope.add(execution.getId()); loopDetection.clear(); deferralReasons.clear(); } catch (UnresolvedImportException e) { // Add task back to queue log.debug("Deferring execution of task [" + task.getType() + "]: " + e.getMessage()); queue.add(task); // Detect endless loop if (loopDetection.contains(task)) { StringBuilder details = new StringBuilder(); for (UnresolvedImportException r : deferralReasons) { details.append("\n -"); details.append(r.getMessage()); } // throw an UnresolvedImportException in case there is an outer BatchTask which needs to be executed first throw new UnresolvedImportException(e, details.toString()); } // Record failed execution loopDetection.add(task); deferralReasons.add(e); } } }
From source file:de.hybris.platform.test.ThreadPoolTest.java
/** * CORE-66PLA-10816 Potential chance to fetch a PoolableThread with pending transaction from previous run * //from w w w .ja v a 2 s . c o m * together with setting logger level for a log4j.logger.de.hybris.platform.util.threadpool=DEBUG prints out * information who/where started the stale transaction */ @Test public void testTransactionCleanUp() throws Exception { final Queue<Transaction> recordedTransactions = new ConcurrentLinkedQueue<Transaction>(); final boolean flagBefore = Config.getBoolean("transaction.monitor.begin", false); Config.setParameter("transaction.monitor.begin", "true"); ThreadPool pool = null; try { // create own pool since we don't want to mess up the system pool = new ThreadPool(Registry.getCurrentTenantNoFallback().getTenantID(), MAX_THREADS); final GenericObjectPool.Config config = new GenericObjectPool.Config(); config.maxActive = MAX_THREADS; config.maxIdle = 1; config.maxWait = -1; config.whenExhaustedAction = GenericObjectPool.WHEN_EXHAUSTED_BLOCK; config.testOnBorrow = true; config.testOnReturn = true; config.timeBetweenEvictionRunsMillis = 30 * 1000; // keep idle threads for at most 30 sec pool.setConfig(config); final int maxSize = pool.getMaxActive(); final int activeBefore = pool.getNumActive(); final List<NoClosingTransactionProcess> started = new ArrayList<NoClosingTransactionProcess>(maxSize); for (int i = activeBefore; i < maxSize; i++) { final PoolableThread poolableThread = pool.borrowThread(); final NoClosingTransactionProcess noClosingTransactionProcess = new NoClosingTransactionProcess(); started.add(noClosingTransactionProcess); poolableThread.execute(noClosingTransactionProcess); } Thread.sleep(1000); transacationStartingBarrier.await(); //await for all transacations to start //record all started transactions for (final NoClosingTransactionProcess singleStarted : started) { recordedTransactions.add(singleStarted.getStartedTransaction()); } finishedStaleTransactionLatch.await(180, TimeUnit.SECONDS); Thread.sleep(1000);//give them 1 second to finish final List<HasNoCurrentRunningTransactionProcess> ranAfter = new ArrayList<HasNoCurrentRunningTransactionProcess>( maxSize); Transaction recordedTransaction = recordedTransactions.poll(); do { final PoolableThread poolableThread = pool.borrowThread(); final HasNoCurrentRunningTransactionProcess hasNoCurrentRunningTransactionProcess = new HasNoCurrentRunningTransactionProcess( recordedTransaction); ranAfter.add(hasNoCurrentRunningTransactionProcess); poolableThread.execute(hasNoCurrentRunningTransactionProcess); recordedTransaction = recordedTransactions.poll(); } while (recordedTransaction != null); //still can borrow Assert.assertNotNull(pool.borrowThread()); Thread.sleep(1000); //verify if really Thread had a non started transaction on the enter for (final HasNoCurrentRunningTransactionProcess singleRanAfter : ranAfter) { if (singleRanAfter.getException() != null) { singleRanAfter.getException().printException(); Assert.fail("Some of the thread(s) captured not finshied transaction in the pool "); } } } finally { if (pool != null) { try { pool.close(); } catch (final Exception e) { // can't help it } } Config.setParameter("transaction.monitor.begin", BooleanUtils.toStringTrueFalse(flagBefore)); } }
From source file:org.apache.zeppelin.socket.NotebookServer.java
private void addUserConnection(String user, NotebookSocket conn) { conn.setUser(user);/*from www . java2 s. c o m*/ if (userConnectedSockets.containsKey(user)) { userConnectedSockets.get(user).add(conn); } else { Queue<NotebookSocket> socketQueue = new ConcurrentLinkedQueue<>(); socketQueue.add(conn); userConnectedSockets.put(user, socketQueue); } }
From source file:org.lockss.repository.RepositoryNodeImpl.java
private List enumerateEncodedChildren(File[] children, CachedUrlSetSpec filter, boolean includeInactive) { // holds fully decoded immediate children List<File> expandedDirectories = new ArrayList<File>(); // holds immediate children that still need to be decoded, and may // yield more than one expanded child Queue<File> unexpandedDirectories = new LinkedList<File>(); // add initial set of unexpanded directories for (File file : children) { if (file.getName().endsWith("\\")) { unexpandedDirectories.add(file); } else {//w ww . j av a2 s .c o m expandedDirectories.add(file); } } // keep expanding directories until no more unexpanded directories exist // core algorithm: BFS while (!unexpandedDirectories.isEmpty()) { File child = unexpandedDirectories.poll(); if (child.getName().endsWith("\\")) { File[] newChildren = child.listFiles(); for (File newChild : newChildren) { unexpandedDirectories.add(newChild); } } else { expandedDirectories.add(child); } } // using iterator to traverse safely Iterator<File> iter = expandedDirectories.iterator(); while (iter.hasNext()) { File child = iter.next(); if ((child.getName().equals(CONTENT_DIR)) || (!child.isDirectory())) { // iter remove instead of list.remove iter.remove(); } } // normalization needed? CheckUnnormalizedMode unnormMode = RepositoryManager.getCheckUnnormalizedMode(); // We switch to using a sorted set, this time we hold strings // representing the url List<String> subUrls = new ArrayList<String>(); for (File child : expandedDirectories) { try { // http://root/child -> /child String location = child.getCanonicalPath() .substring(nodeRootFile.getCanonicalFile().toString().length()); location = decodeUrl(location); String oldLocation = location; switch (unnormMode) { case Log: case Fix: // Normalization done here against the url string, instead of // against the file in the repository. This alleviates us from // dealing with edge conditions where the file split occurs // around an encoding. e.g. %/5C is special in file, but decoded // URL string is %5C and we handle it correctly. location = normalizeTrailingQuestion(location); location = normalizeUrlEncodingCase(location); if (!oldLocation.equals(location)) { switch (unnormMode) { case Fix: // most dangerous part done here, where we copy and // delete. Maybe we should move to a lost in found instead? :) String newRepoLocation = LockssRepositoryImpl .mapUrlToFileLocation(repository.getRootLocation(), url + location); logger.debug("Fixing unnormalized " + oldLocation + " => " + location); FileUtils.copyDirectory(child, new File(newRepoLocation)); FileUtils.deleteDirectory(child); break; case Log: logger.debug("Detected unnormalized " + oldLocation + ", s.b. " + location); break; } } break; } location = url + location; subUrls.add(location); } catch (IOException e) { logger.error("Normalizing (" + unnormMode + ") " + child, e); } catch (NullPointerException ex) { logger.error("Normalizing (" + unnormMode + ") " + child, ex); } } int listSize; if (filter == null) { listSize = subUrls.size(); } else { // give a reasonable minimum since, if it's filtered, the array size // may be much smaller than the total children, particularly in very // flat trees listSize = Math.min(40, subUrls.size()); } // generate the arraylist with urls and return ArrayList childL = new ArrayList(); for (String childUrl : subUrls) { if ((filter == null) || (filter.matches(childUrl))) { try { RepositoryNode node = repository.getNode(childUrl); if (node == null) continue; // add all nodes which are internal or active leaves // deleted nodes never included // boolean activeInternal = !node.isLeaf() && !node.isDeleted(); // boolean activeLeaf = node.isLeaf() && !node.isDeleted() && // (!node.isContentInactive() || includeInactive); // if (activeInternal || activeLeaf) { if (!node.isDeleted() && (!node.isContentInactive() || (includeInactive || !node.isLeaf()))) { childL.add(node); } } catch (MalformedURLException ignore) { // this can safely skip bad files because they will // eventually be trimmed by the repository integrity checker // and the content will be replaced by a poll repair logger.error("Malformed child url: " + childUrl); } } } return childL; }
From source file:org.apache.hadoop.hdfs.notifier.server.ServerHistory.java
/** * Checks what notifications are saved in history for the given event and * adds those notifications in the given queue. Only the notifications * which happened strictly after the edit log operations with the given * transaction id are put in the queue./*from w w w .java2s.c o m*/ * The notifications are put in the queue in the order of their * transaction id. * * @param event the event for which the notifications should be stored * in the queue. * @param txId the given transaction id * @param notifications the queue in which the notifications will be placed. * * @throws TransactionIdTooOldException raised when we can't guarantee that * we got all notifications that happened after the given * transaction id. */ @Override public void addNotificationsToQueue(NamespaceEvent event, long txId, Queue<NamespaceNotification> notifications) throws TransactionIdTooOldException { if (LOG.isDebugEnabled()) { LOG.debug("Got addNotificationsToQueue for: " + NotifierUtils.asString(event) + " and txId: " + txId); } historyLock.readLock().lock(); try { if (orderedHistoryList == null || orderedHistoryList.size() == 0) { throw new TransactionIdTooOldException("No data in history."); } if (orderedHistoryList.get(0).txnId > txId || orderedHistoryList.get(orderedHistoryList.size() - 1).txnId < txId) { throw new TransactionIdTooOldException("No data in history for txId " + txId); } int index = Collections.binarySearch(orderedHistoryList, new HistoryTreeEntry(0, txId, event.type), comparatorByID); if (index < 0) { // If we got here, there are 2 possibilities: // * The client gave us a bad transaction id. // * We missed one (or more) transaction(s) LOG.error("Potential corrupt history. Got request for: " + NotifierUtils.asString(event) + " and txId: " + txId); throw new TransactionIdTooOldException("Potentially corrupt server history"); } String dirFormatPath = event.path; if (!dirFormatPath.endsWith(Path.SEPARATOR)) { dirFormatPath += Path.SEPARATOR; } for (int i = index + 1; i < orderedHistoryList.size(); i++) { HistoryTreeEntry entry = orderedHistoryList.get(i); if (event.type != entry.type) { continue; } String entryPath = entry.getFullPath(); if (entryPath.startsWith(dirFormatPath)) { notifications.add(new NamespaceNotification(entryPath, entry.type, entry.txnId)); } } } finally { historyLock.readLock().unlock(); } }