List of usage examples for java.util ArrayDeque toArray
@SuppressWarnings("unchecked") public <T> T[] toArray(T[] a)
From source file:Main.java
public static void main(String[] args) { ArrayDeque<Integer> deque = new ArrayDeque<Integer>(8); deque.add(3);/*from w w w . java2 s. co m*/ deque.add(1); deque.add(25); deque.add(18); System.out.println(deque); // printing the array length of elements added above Integer[] ob = deque.toArray(new Integer[deque.size()]); System.out.println("\n Array Size : " + ob.length); }
From source file:com.espertech.esper.epl.property.PropertyEvaluatorNested.java
public EventBean[] getProperty(EventBean theEvent, ExprEvaluatorContext exprEvaluatorContext) { ArrayDeque<EventBean> resultEvents = new ArrayDeque<EventBean>(); eventsPerStream[0] = theEvent;/*from www . j a v a 2s.c o m*/ populateEvents(theEvent, 0, resultEvents, exprEvaluatorContext); if (resultEvents.isEmpty()) { return null; } return resultEvents.toArray(new EventBean[resultEvents.size()]); }
From source file:com.espertech.esper.view.window.TimeWindowView.java
/** * This method removes (expires) objects from the window and schedules a new callback for the * time when the next oldest message would expire from the window. */// w w w . j a va 2s . c om protected final void expire() { long expireBeforeTimestamp = agentInstanceContext.getStatementContext().getSchedulingService().getTime() - millisecondsBeforeExpiry + 1; // Remove from the timeWindow any events that have an older or timestamp then the given timestamp // The window extends from X to (X - millisecondsBeforeExpiry + 1) ArrayDeque<EventBean> expired = timeWindow.expireEvents(expireBeforeTimestamp); // If there are child views, fireStatementStopped update method if (this.hasViews()) { if ((expired != null) && (!expired.isEmpty())) { EventBean[] oldEvents = expired.toArray(new EventBean[expired.size()]); if (viewUpdatedCollection != null) { viewUpdatedCollection.update(null, oldEvents); } updateChildren(null, oldEvents); } } scheduleExpiryCallback(); }
From source file:com.espertech.esper.core.start.EPPreparedExecuteSingleStream.java
/** * Executes the prepared query./*from ww w. j a v a2 s. c o m*/ * @return query results */ public EPPreparedQueryResult execute(ContextPartitionSelector[] contextPartitionSelectors) { if (contextPartitionSelectors != null && contextPartitionSelectors.length != 1) { throw new IllegalArgumentException("Number of context partition selectors must be one"); } ContextPartitionSelector optionalSingleSelector = contextPartitionSelectors != null && contextPartitionSelectors.length > 0 ? contextPartitionSelectors[0] : null; // validate context if (processor.getContextName() != null && statementSpec.getOptionalContextName() != null && !processor.getContextName().equals(statementSpec.getOptionalContextName())) { throw new EPException("Context for named window is '" + processor.getContextName() + "' and query specifies context '" + statementSpec.getOptionalContextName() + "'"); } // handle non-specified context if (statementSpec.getOptionalContextName() == null) { NamedWindowProcessorInstance processorInstance = processor.getProcessorInstanceNoContext(); if (processorInstance != null) { EventBean[] rows = executor.execute(processorInstance); if (rows.length > 0) { dispatch(); } return new EPPreparedQueryResult(processor.getNamedWindowType(), rows); } } // context partition runtime query Collection<Integer> agentInstanceIds = EPPreparedExecuteMethodHelper.getAgentInstanceIds(processor, optionalSingleSelector, services.getContextManagementService(), processor.getContextName()); // collect events and agent instances if (agentInstanceIds.isEmpty()) { return new EPPreparedQueryResult(processor.getNamedWindowType(), CollectionUtil.EVENT_PER_STREAM_EMPTY); } if (agentInstanceIds.size() == 1) { int agentInstanceId = agentInstanceIds.iterator().next(); NamedWindowProcessorInstance processorInstance = processor.getProcessorInstance(agentInstanceId); EventBean[] rows = executor.execute(processorInstance); if (rows.length > 0) { dispatch(); } return new EPPreparedQueryResult(processor.getNamedWindowType(), rows); } ArrayDeque<EventBean> allRows = new ArrayDeque<EventBean>(); for (int agentInstanceId : agentInstanceIds) { NamedWindowProcessorInstance processorInstance = processor.getProcessorInstance(agentInstanceId); if (processorInstance != null) { EventBean[] rows = executor.execute(processorInstance); allRows.addAll(Arrays.asList(rows)); } } if (allRows.size() > 0) { dispatch(); } return new EPPreparedQueryResult(processor.getNamedWindowType(), allRows.toArray(new EventBean[allRows.size()])); }
From source file:com.espertech.esper.core.start.EPPreparedExecuteIUDSingleStream.java
/** * Executes the prepared query.//from w w w. j a v a 2s .c o m * @return query results */ public EPPreparedQueryResult execute(ContextPartitionSelector[] contextPartitionSelectors) { try { if (contextPartitionSelectors != null && contextPartitionSelectors.length != 1) { throw new IllegalArgumentException("Number of context partition selectors must be one"); } ContextPartitionSelector optionalSingleSelector = contextPartitionSelectors != null && contextPartitionSelectors.length > 0 ? contextPartitionSelectors[0] : null; // validate context if (processor.getContextName() != null && statementSpec.getOptionalContextName() != null && !processor.getContextName().equals(statementSpec.getOptionalContextName())) { throw new EPException("Context for named window is '" + processor.getContextName() + "' and query specifies context '" + statementSpec.getOptionalContextName() + "'"); } // handle non-specified context if (statementSpec.getOptionalContextName() == null) { FireAndForgetInstance processorInstance = processor.getProcessorInstanceNoContext(); if (processorInstance != null) { EventBean[] rows = executor.execute(processorInstance); if (rows != null && rows.length > 0) { dispatch(); } return new EPPreparedQueryResult(processor.getEventTypePublic(), rows); } } // context partition runtime query Collection<Integer> agentInstanceIds = EPPreparedExecuteMethodHelper.getAgentInstanceIds(processor, optionalSingleSelector, services.getContextManagementService(), processor.getContextName()); // collect events and agent instances if (agentInstanceIds.isEmpty()) { return new EPPreparedQueryResult(processor.getEventTypeResultSetProcessor(), CollectionUtil.EVENTBEANARRAY_EMPTY); } if (agentInstanceIds.size() == 1) { int agentInstanceId = agentInstanceIds.iterator().next(); FireAndForgetInstance processorInstance = processor .getProcessorInstanceContextById(agentInstanceId); EventBean[] rows = executor.execute(processorInstance); if (rows.length > 0) { dispatch(); } return new EPPreparedQueryResult(processor.getEventTypeResultSetProcessor(), rows); } ArrayDeque<EventBean> allRows = new ArrayDeque<EventBean>(); for (int agentInstanceId : agentInstanceIds) { FireAndForgetInstance processorInstance = processor .getProcessorInstanceContextById(agentInstanceId); if (processorInstance != null) { EventBean[] rows = executor.execute(processorInstance); allRows.addAll(Arrays.asList(rows)); } } if (allRows.size() > 0) { dispatch(); } return new EPPreparedQueryResult(processor.getEventTypeResultSetProcessor(), allRows.toArray(new EventBean[allRows.size()])); } finally { if (hasTableAccess) { services.getTableService().getTableExprEvaluatorContext().releaseAcquiredLocks(); } } }
From source file:oracle.kv.hadoop.hive.table.TableStorageHandlerBase.java
/** * Method required by the HiveStoragePredicateHandler interface. * <p>/*from ww w . j a va 2 s .com*/ * This method validates the components of the given predicate and * ultimately produces the following artifacts: * * <ul> * <li>a Hive object representing the predicate that will be pushed to * the backend for server side filtering * <li>the String form of the computed predicate to push; which can be * passed to the server via the ONSQL query mechanism * <li>a Hive object consisting of the remaining components of the * original predicate input to this method -- referred to as the * 'residual' predicate; which represents the criteria the Hive * infrastructure will apply (on the client side) to the results * returned after server side filtering has been performed * </ul> * * The predicate analysis model that Hive employs is basically a two * step process. First, an instance of the Hive IndexPredicateAnalyzer * class is created and its analyzePredicate method is invoked, which * returns a Hive class representing the residual predicate, and also * populates a Collection whose contents is dependent on the particular * implementation of IndexPredicateAnalyzer that is used. After * analyzePredicate is invoked, the analyzer's translateSearchConditions * method is invoked to convert the contents of the populated Collection * to a Hive object representing the predicate that can be pushed to * the server side. Finally, the object that is returned is an instance * of the Hive DecomposedPredicate class; which contains the computed * predicate to push and the residual predicate. * <p> * Note that because the Hive built-in IndexPredicateAnalyzer produces * only predicates that consist of 'AND' statements, and which correspond * to PrimaryKey based or IndexKey based predicates, if the Hive built-in * analyzer does not produce a predicate to push, then a custom analyzer * that extends the capabilities of the Hive built-in analyzer is * employed. This extended analyzer handles statements that the built-in * analyzer does not handle. Additionally, whereas the built-in analyzer * populates a List of Hive IndexSearchConditions corresponding to the * filtering criteria of the predicate to push, the extended analyzer * populates an ArrayDeque in which the top (first element) of the * Deque is a Hive object consisting of all the components of the original * input predicate, but with 'invalid' operators replaced with 'valid' * operators; for example, with 'IN <list>' replaced with 'OR' statements. * <p> * In each case, translateSearchConditions constructs the appropriate * Hive predicate to push from the contents of the given Collection; * either List of IndexSearchCondition, or ArrayDeque. */ @Override @SuppressWarnings("deprecation") public DecomposedPredicate decomposePredicate(JobConf jobConfig, org.apache.hadoop.hive.serde2.Deserializer deserializer, ExprNodeDesc predicate) { /* Reset query state to default values. */ TableHiveInputFormat.resetQueryInfo(); DecomposedPredicate decomposedPredicate = null; /* * Try the Hive built-in analyzer first; which will validate the * components of the given predicate and separate them into two * disjoint sets: a set of search conditions that correspond to * either a valid PrimaryKey or IndexKey (and optional FieldRange) * that can be scanned (in the backend KVStore server) using one * of the TableIterators; and a set containing the remaining components * of the predicate (the 'residual' predicate), which Hive will * apply to the results returned after the search conditions have * first been applied on (pushed to) the backend. */ final IndexPredicateAnalyzer analyzer = TableHiveInputFormat.sargablePredicateAnalyzer(predicate, (TableSerDe) deserializer); if (analyzer != null) { /* Use TableScan or IndexScan */ /* Decompose predicate into search conditions and residual. */ final List<IndexSearchCondition> searchConditions = new ArrayList<IndexSearchCondition>(); final ExprNodeGenericFuncDesc residualPredicate = (ExprNodeGenericFuncDesc) analyzer .analyzePredicate(predicate, searchConditions); decomposedPredicate = new DecomposedPredicate(); decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions(searchConditions); decomposedPredicate.residualPredicate = residualPredicate; /* * Valid search conditions and residual have been obtained. * Determine whether the search conditions are index based or * based on the table's primary key. If index based, then tell * the InputFormat to build splits and scan (iterate) based on * shards; otherwise, tell the InputFormat to base the iterator * on partition sets. */ final StringBuilder whereBuf = new StringBuilder(); TableHiveInputFormat.buildPushPredicate(decomposedPredicate.pushedPredicate, whereBuf); final String whereStr = whereBuf.toString(); TableHiveInputFormat.setQueryInfo(searchConditions, (TableSerDe) deserializer, whereStr); if (LOG.isDebugEnabled()) { LOG.debug("-----------------------------"); LOG.debug("residual = " + decomposedPredicate.residualPredicate); LOG.debug("predicate = " + decomposedPredicate.pushedPredicate); LOG.debug("search conditions = " + searchConditions); switch (TableHiveInputFormat.getQueryBy()) { case TableInputSplit.QUERY_BY_INDEX: LOG.debug("push predicate to secondary index [" + "WHERE " + whereStr + "]"); break; case TableInputSplit.QUERY_BY_PRIMARY_ALL_PARTITIONS: case TableInputSplit.QUERY_BY_PRIMARY_SINGLE_PARTITION: LOG.debug("push predicate to primary index [" + "WHERE " + whereStr + "]"); break; default: break; } LOG.debug("-----------------------------"); } } else { /* IndexPredicateAnalyzer == null ==> Use native query */ /* * The given predicate does not consist of search conditions that * correspond to either a valid PrimaryKey or IndexKey (or * FieldRange). Thus, employ the extended analyzer to handle * statements the built-in analyzer cannot handle. */ final TableHiveInputFormat.ExtendedPredicateAnalyzer extendedAnalyzer = TableHiveInputFormat .createPredicateAnalyzerForOnql((TableSerDe) deserializer); if (extendedAnalyzer == null) { LOG.debug("extended predicate analyzer = null ... " + "NO PREDICATE PUSHDOWN"); return null; } final ArrayDeque<ExprNodeDesc> pushPredicateDeque = new ArrayDeque<ExprNodeDesc>(); final ExprNodeGenericFuncDesc residualPredicate = (ExprNodeGenericFuncDesc) extendedAnalyzer .analyzePredicate(predicate, pushPredicateDeque); if (LOG.isTraceEnabled()) { final ExprNodeDesc[] qElements = pushPredicateDeque .toArray(new ExprNodeDesc[pushPredicateDeque.size()]); LOG.trace("-----------------------------"); LOG.trace("push predicate queue elements:"); for (int i = 0; i < qElements.length; i++) { LOG.trace("element[" + i + "] = " + qElements[i]); } LOG.trace("-----------------------------"); } decomposedPredicate = new DecomposedPredicate(); final StringBuilder whereBuf = new StringBuilder(); decomposedPredicate.residualPredicate = residualPredicate; decomposedPredicate.pushedPredicate = extendedAnalyzer.translateSearchConditions(pushPredicateDeque, whereBuf); if (decomposedPredicate.pushedPredicate != null) { if (LOG.isTraceEnabled()) { TableHiveInputFormat.ExtendedPredicateAnalyzer .displayNodeTree(decomposedPredicate.pushedPredicate); } final String whereStr = whereBuf.toString(); if (LOG.isDebugEnabled()) { LOG.debug("-----------------------------"); LOG.debug("residual = " + decomposedPredicate.residualPredicate); LOG.debug("predicate = " + decomposedPredicate.pushedPredicate); LOG.debug("push predicate via native query [" + "WHERE " + whereStr + "]"); LOG.debug("-----------------------------"); } TableHiveInputFormat.setQueryInfo((TableSerDe) deserializer, whereStr); } else { LOG.debug("Extended predicate analyzer found no predicate " + "to push. Will use all of residual for filtering."); } } /* endif: IndexPredicteAnalyzer != null or == null */ return decomposedPredicate; }
From source file:com.espertech.esper.epl.expression.ExprTimePeriodImpl.java
public void validate(ExprValidationContext validationContext) throws ExprValidationException { evaluators = ExprNodeUtility.getEvaluators(this.getChildNodes()); for (ExprNode childNode : this.getChildNodes()) { validate(childNode);//from w w w. j a v a 2s . co m } ArrayDeque<TimePeriodAdder> list = new ArrayDeque<TimePeriodAdder>(); if (hasYear) { list.add(new TimePeriodAdderYear()); } if (hasMonth) { list.add(new TimePeriodAdderMonth()); } if (hasWeek) { list.add(new TimePeriodAdderWeek()); } if (hasDay) { list.add(new TimePeriodAdderDay()); } if (hasHour) { list.add(new TimePeriodAdderHour()); } if (hasMinute) { list.add(new TimePeriodAdderMinute()); } if (hasSecond) { list.add(new TimePeriodAdderSecond()); } if (hasMillisecond) { list.add(new TimePeriodAdderMSec()); } adders = list.toArray(new TimePeriodAdder[list.size()]); }