List of usage examples for java.util LinkedList descendingIterator
public Iterator<E> descendingIterator()
From source file:org.exoplatform.web.application.javascript.JavascriptConfigService.java
private String encode(LinkedList<String> params, String alias) { alias = alias.replace("/", "_"); int idx = -1; Iterator<String> iterator = params.descendingIterator(); while (iterator.hasNext()) { String param = iterator.next(); Matcher matcher = INDEX_PATTERN.matcher(param); if (matcher.matches()) { idx = Integer.parseInt(matcher.group(2)); break; } else if (alias.equals(param)) { idx = 0;/*from ww w . ja va 2 s . c o m*/ break; } } if (idx != -1) { StringBuilder tmp = new StringBuilder(alias); tmp.append("_").append(idx + 1); return tmp.toString(); } else { return alias; } }
From source file:net.dv8tion.jda.core.entities.MessageHistory.java
/** * Retrieves messages from Discord that were sent more recently than the most recently sent message in * MessageHistory's history cache ({@link #getRetrievedHistory()}). * Use case for this method is for getting more recent messages after jumping to a specific point in history * using something like {@link MessageChannel#getHistoryAround(String, int)}. * <br>This method works in the same way as {@link #retrievePast(int)}'s Additional Retrieval mode. * <p>/*from ww w . j a v a 2 s . co m*/ * <b>Note:</b> This method can only be used after {@link net.dv8tion.jda.core.entities.Message Messages} have already * been retrieved from Discord. * <p> * Possible {@link net.dv8tion.jda.core.requests.ErrorResponse ErrorResponses} include: * <ul> * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_MESSAGE UNKNOWN_MESSAGE} * <br>Can occur if retrieving in Additional Mode and the Message being used as the marker for the last retrieved * Message was deleted. Currently, to fix this, you need to create a new * {@link net.dv8tion.jda.core.entities.MessageHistory MessageHistory} instance.</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_ACCESS MISSING_ACCESS} * <br>Can occur if the request for history retrieval was executed <i>after</i> JDA lost access to the Channel, * typically due to the account being removed from the {@link net.dv8tion.jda.core.entities.Guild Guild} or * {@link net.dv8tion.jda.client.entities.Group Group}.</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#MISSING_PERMISSIONS MISSING_PERMISSIONS} * <br>Can occur if the request for history retrieval was executed <i>after</i> JDA lost the * {@link net.dv8tion.jda.core.Permission#MESSAGE_HISTORY} permission.</li> * * <li>{@link net.dv8tion.jda.core.requests.ErrorResponse#UNKNOWN_CHANNEL UNKNOWN_CHANNEL} * <br>The send request was attempted after the channel was deleted.</li> * </ul> * * @param amount * The amount of {@link net.dv8tion.jda.core.entities.Message Messages} to retrieve. * * @throws java.lang.IllegalArgumentException * The the {@code amount} is less than {@code 1} or greater than {@code 100}. * @throws java.lang.IllegalStateException * If no messages have been retrieved by this MessageHistory. * * * @return {@link net.dv8tion.jda.core.requests.RestAction RestAction} - * Type: {@link java.util.List List}{@literal <}{@link net.dv8tion.jda.core.entities.Message Message}{@literal >} * <br>Retrieved Messages are placed in a List and provided in order of most recent to oldest with most recent * starting at index 0. If the list is empty, there were no more messages left to retrieve. */ @CheckReturnValue public RestAction<List<Message>> retrieveFuture(int amount) { if (amount > 100 || amount < 1) throw new IllegalArgumentException( "Message retrieval limit is between 1 and 100 messages. No more, no less. Limit provided: " + amount); if (history.isEmpty()) throw new IllegalStateException( "No messages have been retrieved yet, so there is no message to act as a marker to retrieve more recent messages based on."); Route.CompiledRoute route = Route.Messages.GET_MESSAGE_HISTORY.compile(channel.getId()) .withQueryParams("limit", Integer.toString(amount), "after", String.valueOf(history.firstKey())); return new RestAction<List<Message>>(getJDA(), route) { @Override protected void handleResponse(Response response, Request<List<Message>> request) { if (!response.isOk()) { request.onFailure(response); return; } EntityBuilder builder = api.getEntityBuilder(); ; LinkedList<Message> msgs = new LinkedList<>(); JSONArray historyJson = response.getArray(); for (int i = 0; i < historyJson.length(); i++) msgs.add(builder.createMessage(historyJson.getJSONObject(i))); for (Iterator<Message> it = msgs.descendingIterator(); it.hasNext();) { Message m = it.next(); history.put(0, m.getIdLong(), m); } request.onSuccess(msgs); } }; }
From source file:org.codice.ddf.security.filter.delegate.DelegateServletFilter.java
@Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException { HttpServletRequest httpRequest = (HttpServletRequest) servletRequest; if (contextPolicyManager != null) { String contextPath = !StringUtils.isBlank(httpRequest.getContextPath()) ? httpRequest.getContextPath() : httpRequest.getServletPath() + httpRequest.getPathInfo(); if (contextPolicyManager.isWhiteListed(contextPath)) { LOGGER.debug(//from w w w . j ava 2s. c o m "Current Context path {} has been white listed by the local policy, no authentication or authorization filters will be applied.", contextPath); filterChain.doFilter(servletRequest, servletResponse); return; } } LinkedList<ServiceReference<Filter>> serviceRefs = new LinkedList<ServiceReference<Filter>>(); try { serviceRefs.addAll(ctx.getServiceReferences(Filter.class, null)); } catch (InvalidSyntaxException e) { LOGGER.warn("Could not lookup service references.", e); } if (!serviceRefs.isEmpty()) { LOGGER.debug("Found {} filter, now filtering...", serviceRefs.size()); ProxyFilterChain chain = new ProxyFilterChain(filterChain); Iterator<ServiceReference<Filter>> reverseIterator = serviceRefs.descendingIterator(); while (reverseIterator.hasNext()) { ServiceReference<Filter> curReference = reverseIterator.next(); Filter curFilter = ctx.getService(curReference); if (!curFilter.getClass().toString().equals(this.getClass().toString())) { chain.addFilter(curFilter); } reverseIterator.remove(); } chain.doFilter(servletRequest, servletResponse); } else { LOGGER.debug("Did not find any filters"); filterChain.doFilter(servletRequest, servletResponse); } }
From source file:org.nuclos.server.dblayer.impl.oracle.OracleDBAccess.java
private List<Constraint> disableConstraints() throws SQLException { LinkedList<Constraint> constraints = getEnabledConstraints(); if (constraints.size() == 0) { return constraints; }//from w w w .j ava 2 s . c o m String cmd = getAlterConstraintsCmd(constraints.descendingIterator(), false); executor.executeUpdate(cmd); return constraints; }
From source file:org.marketcetera.marketdata.yahoo.YahooFeedEventTranslator.java
/** * Gets all the events it can find from the given data collection. * * @param inData a <code>Map<YahooField,String></code> value * @param inHandle /*from w w w . j ava2 s .c o m*/ * @return a <code>List<Event></code> value */ private List<Event> getEventsFrom(Map<YahooField, String> inData, String inHandle) { SLF4JLoggerProxy.debug(YahooFeedEventTranslator.class, "Getting events from {}", //$NON-NLS-1$ inData); String errorIndication = inData.get(YahooField.ERROR_INDICATION); if (!errorIndication.equals(NO_ERROR)) { SLF4JLoggerProxy.warn(org.marketcetera.core.Messages.USER_MSG_CATEGORY, errorIndication); return EMPTY_EVENT_LIST; } // no error found, continue LinkedList<Event> events = new LinkedList<Event>(); // look for specific event types lookForBidEvent(inData, events, inHandle); lookForAskEvent(inData, events, inHandle); lookForTradeEvent(inData, events); lookForDividendEvent(inData, events); // iterate over the event candidates in reverse order to accomplish two things: // 1) Mark events as part or final (this is the EVENT_BOUNDARY capability contract) // 2) compare events to the event cache to make sure we're not sending the same event over and over - this is necessary // because the data source is poll-based rather than push-based. Iterator<Event> marker = events.descendingIterator(); boolean markedFinal = false; while (marker.hasNext()) { Event event = marker.next(); // compare event candidate to cache to make sure we're not just repeating ourselves if (shouldSendEvent(event)) { if (event instanceof HasEventType) { if (!markedFinal) { ((HasEventType) event).setEventType(EventType.UPDATE_FINAL); markedFinal = true; } else { ((HasEventType) event).setEventType(EventType.UPDATE_PART); } } } else { // this event matches the cache, so don't return it marker.remove(); } } return events; }
From source file:net.itransformers.topologyviewer.gui.GraphViewerPanel.java
public Set<String> findShortest(String aFrom, String aTo, Graph<String, String> aGraph) { final Set<String> mOrderedPred = new LinkedHashSet<String>(); final LinkedList<String> mPred = new LinkedList<String>(); if (aFrom == null || aTo == null) { return mOrderedPred; }/*from w ww. j av a 2 s . c om*/ BFSDistanceLabeler<String, String> bdl = new BFSDistanceLabeler<String, String>(); bdl.labelDistances(aGraph, aFrom); // grab a predecessor String v = aTo; Set<String> prd = bdl.getPredecessors(v); mPred.add(aTo); while (prd != null && prd.size() > 0) { v = prd.iterator().next(); System.out.println("V: " + v); mPred.add(v); if (v.equals(aFrom)) { final Iterator<String> stringIterator = mPred.descendingIterator(); while (stringIterator.hasNext()) { String next = stringIterator.next(); mOrderedPred.add(next); } return mOrderedPred; } prd = bdl.getPredecessors(v); } // Reorder the set of nodes final Iterator<String> stringIterator = mPred.descendingIterator(); while (stringIterator.hasNext()) { String next = stringIterator.next(); mOrderedPred.add(next); } return mOrderedPred; }
From source file:org.matonto.catalog.impl.SimpleCatalogManager.java
/** * Gets an iterator which contains all of the Resources (commits) leading up to the provided Resource identifying a * commit.//from ww w . j a v a 2 s . com * * @param commitId The Resource identifying the commit that you want to get the chain for. * @param conn The RepositoryConnection which will be queried for the Commits. * @return Iterator of Values containing the requested commits. */ private Iterator<Value> getCommitChainIterator(Resource commitId, RepositoryConnection conn) { TupleQuery query = conn.prepareTupleQuery(GET_COMMIT_CHAIN); query.setBinding(COMMIT_BINDING, commitId); TupleQueryResult result = query.evaluate(); LinkedList<Value> commits = new LinkedList<>(); result.forEach(bindingSet -> bindingSet.getBinding(PARENT_BINDING) .ifPresent(binding -> commits.add(binding.getValue()))); commits.addFirst(commitId); return commits.descendingIterator(); }
From source file:enumj.Enumerator.java
/** * Returns an enumerator that reverses the order of the enumerated elements. * * @return the reversed enumerator.//from ww w . j ava 2 s . c o m */ public default Enumerator<E> reverse() { final LinkedList<E> elements = new LinkedList(); while (hasNext()) { elements.add(next()); } return Enumerator.of(elements.descendingIterator()); }
From source file:com.streamsets.pipeline.stage.origin.jdbc.cdc.oracle.OracleCDCSource.java
private int addRecordsToQueue(LocalDateTime commitTimestamp, String commitScn, String xid) throws InterruptedException { TransactionIdKey key = new TransactionIdKey(xid); int seq = 0;//from www . ja va 2 s. c o m bufferedRecordsLock.lock(); HashQueue<RecordSequence> records; try { records = bufferedRecords.getOrDefault(key, EMPTY_LINKED_HASHSET); records.completeInserts(); bufferedRecords.remove(key); } finally { bufferedRecordsLock.unlock(); } final List<FutureWrapper> parseFutures = new ArrayList<>(); while (!records.isEmpty()) { RecordSequence r = records.remove(); if (configBean.keepOriginalQuery) { r.headers.put(QUERY_KEY, r.sqlString); } final Future<Record> recordFuture = parsingExecutor .submit(() -> generateRecord(r.sqlString, r.headers, r.opCode)); parseFutures.add(new FutureWrapper(recordFuture, r.sqlString, r.seq)); } records.close(); LinkedList<RecordOffset> recordOffsets = new LinkedList<>(); for (FutureWrapper recordFuture : parseFutures) { try { Record record = recordFuture.future.get(); if (record != null) { final RecordOffset recordOffset = new RecordOffset(record, new Offset(VERSION_UNCOMMITTED, commitTimestamp, commitScn, recordFuture.seq, xid)); // Is this a record generated by a rollback? If it is find the previous record that matches this row id and // remove it from the queue. if (recordOffset.record.getHeader().getAttribute(ROLLBACK).equals(ONE)) { String rowId = recordOffset.record.getHeader().getAttribute(ROWID_KEY); Iterator<RecordOffset> reverseIter = recordOffsets.descendingIterator(); while (reverseIter.hasNext()) { if (reverseIter.next().record.getHeader().getAttribute(ROWID_KEY).equals(rowId)) { reverseIter.remove(); break; } } } else { recordOffsets.add(recordOffset); } } } catch (ExecutionException e) { LOG.error("{}:{}", JDBC_405.getMessage(), e.getMessage(), e); final Throwable cause = e.getCause(); if (cause instanceof UnparseableSQLException) { unparseable.offer(recordFuture.sql); } else { otherErrors.offer(new ErrorAndCause(JDBC_405, cause)); } } } for (RecordOffset ro : recordOffsets) { try { seq = ro.offset.sequence; while (!recordQueue.offer(ro, 1, TimeUnit.SECONDS)) { if (getContext().isStopped()) { return seq; } } LOG.debug(GENERATED_RECORD, ro.record, ro.record.getHeader().getAttribute(XID)); } catch (InterruptedException ex) { try { errorRecordHandler.onError(JDBC_405, ex); } catch (StageException stageException) { addToStageExceptionsQueue(stageException); } } } return seq; }
From source file:com.streamsets.pipeline.stage.processor.mapper.FieldMapperProcessor.java
private void transformFieldPaths(Record record) throws StageException { final Map<String, List<Field>> newPathsToFields = new LinkedHashMap<>(); final LinkedList<String> pathsToDelete = new LinkedList<>(); final Map<Field, String> fieldsToPreviousPaths = new HashMap<>(); record.forEachField(fv -> {// ww w . j a v a 2 s .c o m final String fieldPath = fv.getFieldPath(); final String fieldName = fv.getFieldName(); final Field field = fv.getField(); if (checkSkipFieldAndSetContextVar(fieldPath, fieldName, field, true)) { return; } try { final String newPath = mapperExpressionEval.eval(expressionVars, fieldMapperConfig.mappingExpression, String.class); newPathsToFields.computeIfAbsent(newPath, k -> new LinkedList<>()); newPathsToFields.get(newPath).add(field); } catch (ELEvalException e) { throw new RuntimeException(String.format("Failed to evaluate mapper expression %s: %s", fieldMapperConfig.mappingExpression, e.getMessage()), e); } if (!fieldMapperConfig.maintainOriginalPaths) { pathsToDelete.add(fieldPath); } fieldsToPreviousPaths.put(field, fieldPath); }); for (String newPath : newPathsToFields.keySet()) { final List<Field> mappedFields = new LinkedList<>(newPathsToFields.get(newPath)); if (aggregationEval != null) { expressionVars.addVariable("fields", mappedFields); AggregationEL.setFieldsToPreviousPathsInContext(expressionVars, fieldsToPreviousPaths); final Object aggregationResult = aggregationEval.eval(expressionVars, fieldMapperConfig.aggregationExpression, Object.class); expressionVars.addVariable("fields", null); if (aggregationResult instanceof Field) { record.set(newPath, (Field) aggregationResult); } else { final Field.Type aggregationResultType = FieldUtils.getTypeFromObject(aggregationResult); record.set(newPath, Field.create(aggregationResultType, aggregationResult)); } } else { boolean replaceValues = false; if (record.has(newPath)) { final Field existingField = record.get(newPath); if (existingField.getType() == Field.Type.LIST) { final List<Field> valueAsList = existingField.getValueAsList(); if (!fieldMapperConfig.appendListValues) { valueAsList.clear(); } valueAsList.addAll(mappedFields); } else if (fieldMapperConfig.structureChangeAllowed) { replaceValues = true; } } else if (fieldMapperConfig.structureChangeAllowed) { replaceValues = true; } if (replaceValues) { if (mappedFields.size() > 1) { record.set(newPath, Field.create(new LinkedList<>(mappedFields))); } else { record.set(newPath, mappedFields.iterator().next()); } } } } pathsToDelete.descendingIterator().forEachRemaining(path -> record.delete(path)); }