List of usage examples for java.util ArrayDeque ArrayDeque
public ArrayDeque()
From source file:com.espertech.esper.core.EPRuntimeIsolatedImpl.java
private void processMatches(EventBean event) { // get matching filters ArrayBackedCollection<FilterHandle> matches = matchesArrayThreadLocal.get(); services.getFilterService().evaluate(event, matches, isolatedTimeEvalContext); if (ThreadLogUtil.ENABLED_TRACE) { ThreadLogUtil.trace("Found matches for underlying ", matches.size(), event.getUnderlying()); }//from w ww . j a v a2 s .c o m if (matches.size() == 0) { return; } Map<EPStatementHandle, ArrayDeque<FilterHandleCallback>> stmtCallbacks = matchesPerStmtThreadLocal.get(); Object[] matchArray = matches.getArray(); int entryCount = matches.size(); for (int i = 0; i < entryCount; i++) { EPStatementHandleCallback handleCallback = (EPStatementHandleCallback) matchArray[i]; EPStatementHandle handle = handleCallback.getEpStatementHandle(); // Self-joins require that the internal dispatch happens after all streams are evaluated. // Priority or preemptive settings also require special ordering. if (handle.isCanSelfJoin() || isPrioritized) { ArrayDeque<FilterHandleCallback> callbacks = stmtCallbacks.get(handle); if (callbacks == null) { callbacks = new ArrayDeque<FilterHandleCallback>(); stmtCallbacks.put(handle, callbacks); } callbacks.add(handleCallback.getFilterCallback()); continue; } processStatementFilterSingle(handle, handleCallback, event); } matches.clear(); if (stmtCallbacks.isEmpty()) { return; } for (Map.Entry<EPStatementHandle, ArrayDeque<FilterHandleCallback>> entry : stmtCallbacks.entrySet()) { EPStatementHandle handle = entry.getKey(); ArrayDeque<FilterHandleCallback> callbackList = entry.getValue(); processStatementFilterMultiple(handle, callbackList, event); if ((isPrioritized) && (handle.isPreemptive())) { break; } } stmtCallbacks.clear(); }
From source file:org.apache.kylin.metadata.model.DataModelDesc.java
private void reorderJoins(Map<String, TableDesc> tables) { if (joinTables.length == 0) { return;//from w w w .j a va2 s. c o m } Map<String, List<JoinTableDesc>> fkMap = Maps.newHashMap(); for (JoinTableDesc joinTable : joinTables) { JoinDesc join = joinTable.getJoin(); String fkSideName = join.getFKSide().getAlias(); if (fkMap.containsKey(fkSideName)) { fkMap.get(fkSideName).add(joinTable); } else { List<JoinTableDesc> joinTableList = Lists.newArrayList(); joinTableList.add(joinTable); fkMap.put(fkSideName, joinTableList); } } JoinTableDesc[] orderedJoinTables = new JoinTableDesc[joinTables.length]; int orderedIndex = 0; Queue<JoinTableDesc> joinTableBuff = new ArrayDeque<JoinTableDesc>(); TableDesc rootDesc = tables.get(rootFactTable); joinTableBuff.addAll(fkMap.get(rootDesc.getName())); while (!joinTableBuff.isEmpty()) { JoinTableDesc head = joinTableBuff.poll(); orderedJoinTables[orderedIndex++] = head; String headAlias = head.getJoin().getPKSide().getAlias(); if (fkMap.containsKey(headAlias)) { joinTableBuff.addAll(fkMap.get(headAlias)); } } joinTables = orderedJoinTables; }
From source file:com.google.gwt.emultest.java.util.ArrayDequeTest.java
public void testRemoveLastOccurrence() { Object o1 = new Object(); Object o2 = new Object(); Object o3 = new Object(); ArrayDeque<Object> deque = new ArrayDeque<>(); assertFalse(deque.removeLastOccurrence(o1)); deque.add(o1);//www . j a va 2 s .c o m assertTrue(deque.removeLastOccurrence(o1)); assertTrue(deque.isEmpty()); deque = new ArrayDeque<>(); deque.add(o1); deque.add(o2); deque.add(o3); assertTrue(deque.removeLastOccurrence(o2)); checkDequeSizeAndContent(deque, o1, o3); deque = new ArrayDeque<>(); deque.add(o1); deque.add(o2); deque.add(o3); deque.add(o1); deque.add(o2); deque.add(o3); assertTrue(deque.removeLastOccurrence(o2)); checkDequeSizeAndContent(deque, o1, o2, o3, o1, o3); assertTrue(deque.removeLastOccurrence(o2)); checkDequeSizeAndContent(deque, o1, o3, o1, o3); assertTrue(deque.removeLastOccurrence(o3)); checkDequeSizeAndContent(deque, o1, o3, o1); assertTrue(deque.removeLastOccurrence(o3)); checkDequeSizeAndContent(deque, o1, o1); assertFalse(deque.removeLastOccurrence(o3)); assertFalse(deque.removeLastOccurrence(null)); }
From source file:com.blm.orc.OrcRawRecordMerger.java
@Override public ObjectInspector getObjectInspector() { // Read the configuration parameters String columnNameProperty = conf.get(serdeConstants.LIST_COLUMNS); // NOTE: if "columns.types" is missing, all columns will be of String type String columnTypeProperty = conf.get(serdeConstants.LIST_COLUMN_TYPES); // Parse the configuration parameters ArrayList<String> columnNames = new ArrayList<String>(); Deque<Integer> virtualColumns = new ArrayDeque<Integer>(); if (columnNameProperty != null && columnNameProperty.length() > 0) { String[] colNames = columnNameProperty.split(","); for (int i = 0; i < colNames.length; i++) { if (VirtualColumn.VIRTUAL_COLUMN_NAMES.contains(colNames[i])) { virtualColumns.addLast(i); } else { columnNames.add(colNames[i]); }//from www .ja va 2s .c o m } } if (columnTypeProperty == null) { // Default type: all string StringBuilder sb = new StringBuilder(); for (int i = 0; i < columnNames.size(); i++) { if (i > 0) { sb.append(":"); } sb.append("string"); } columnTypeProperty = sb.toString(); } ArrayList<TypeInfo> fieldTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty); while (virtualColumns.size() > 0) { fieldTypes.remove(virtualColumns.removeLast()); } StructTypeInfo rowType = new StructTypeInfo(); rowType.setAllStructFieldNames(columnNames); rowType.setAllStructFieldTypeInfos(fieldTypes); return OrcRecordUpdater.createEventSchema(OrcStruct.createObjectInspector(rowType)); }
From source file:org.shaman.terrain.polygonal.PolygonalMapGenerator.java
private void createBiomes() { if (graph == null) { return;/*ww w . j a va 2 s .c o m*/ } //assign temperatures for (Graph.Corner c : graph.corners) { c.temperature = c.elevation; c.temperature *= c.temperature; c.temperature = 1 - c.temperature; } assignCenterTemperature(); //create random rivers Random rand = new Random(seed * 3); for (Graph.Corner c : graph.corners) { c.river = 0; } float riverProb = 0.2f; float riverStartHeight = 0.7f; int riverCounter = 0; corner: for (Graph.Corner c : graph.corners) { if (c.water || c.elevation < riverStartHeight) { continue; } if (rand.nextFloat() > riverProb) { continue; } if (c.river > 0) continue; for (Graph.Corner c2 : c.adjacent) { if (c2.river > 0) { continue corner; } for (Graph.Corner c3 : c2.adjacent) { if (c3.river > 0) { continue corner; } } } //start new river from here Graph.Corner current = c; current.river = Math.max(current.river, 1); while (!current.ocean && !current.coast) { float minH = current.elevation; Graph.Corner minC = null; for (Graph.Corner c2 : current.adjacent) { if (c2.river > 0 && c2.elevation < current.elevation) { minC = c2; //force closing of rivers break; } if (c2.elevation < minH) { minC = c2; minH = c2.elevation; } } if (minC == null) { LOG.warning("river stuck in a local minima without reaching the ocean"); break; } minC.river = Math.max(minC.river, current.river + 1); current = minC; } riverCounter++; } LOG.info("count of created rivers: " + riverCounter); showRivers = true; //assign moisture Queue<Graph.Corner> queue = new ArrayDeque<>(); for (Graph.Corner q : graph.corners) { if ((q.water || q.river > 0) && !q.ocean) { q.moisture = q.river > 0 ? Math.min(3.0f, (0.4f * q.river)) : 1; queue.add(q); } else { q.moisture = 0; } } while (!queue.isEmpty()) { Graph.Corner q = queue.poll(); for (Graph.Corner r : q.adjacent) { float newMoisture = q.moisture * 0.8f; if (newMoisture > r.moisture) { r.moisture = newMoisture; queue.add(r); } } } for (Graph.Corner q : graph.corners) { if (q.ocean || q.coast) { q.moisture = 1; } } //redistribute moisture ArrayList<Graph.Corner> corners = new ArrayList<>(); for (Graph.Corner q : graph.corners) { if (!q.ocean && !q.coast) { corners.add(q); } } Collections.sort(corners, new Comparator<Graph.Corner>() { @Override public int compare(Graph.Corner o1, Graph.Corner o2) { return Float.compare(o1.moisture, o2.moisture); } }); for (int i = 0; i < corners.size(); i++) { corners.get(i).moisture = i / (float) (corners.size() - 1); } assignCenterMoisture(); assignBiomes(); //update mesh updateTemperatureGeometry(); updateMoistureGeometry(); updateBiomesGeometry(); }
From source file:com.google.gwt.emultest.java.util.ArrayDequeTest.java
public void testRolloverInvariants() { ArrayDeque<Integer> deque = new ArrayDeque<>(); assertTrue(deque.add(1));/*from w w w . j a v a2 s . c o m*/ assertEquals(1, (int) deque.removeFirst()); for (int i = 0; i < 100; i++) { assertTrue(deque.add(i)); } assertNotNull(deque.peek()); assertFalse(deque.isEmpty()); Iterator<Integer> it = deque.iterator(); for (int i = 0; i < 100; i++) { assertTrue(it.hasNext()); assertEquals(i, (int) it.next()); it.remove(); } assertFalse(it.hasNext()); assertNull(deque.peek()); assertTrue(deque.isEmpty()); }
From source file:com.espertech.esper.core.EPRuntimeImpl.java
private void processScheduleHandles(ArrayBackedCollection<ScheduleHandle> handles) { if (ThreadLogUtil.ENABLED_TRACE) { ThreadLogUtil.trace("Found schedules for", handles.size()); }/*from ww w .j ava 2s . c o m*/ if (handles.size() == 0) { return; } // handle 1 result separatly for performance reasons if (handles.size() == 1) { Object[] handleArray = handles.getArray(); EPStatementHandleCallback handle = (EPStatementHandleCallback) handleArray[0]; if ((MetricReportingPath.isMetricsEnabled) && (handle.getEpStatementHandle().getMetricsHandle().isEnabled())) { long cpuTimeBefore = MetricUtil.getCPUCurrentThread(); long wallTimeBefore = MetricUtil.getWall(); processStatementScheduleSingle(handle, services, engineFilterAndDispatchTimeContext); long wallTimeAfter = MetricUtil.getWall(); long cpuTimeAfter = MetricUtil.getCPUCurrentThread(); long deltaCPU = cpuTimeAfter - cpuTimeBefore; long deltaWall = wallTimeAfter - wallTimeBefore; services.getMetricsReportingService().accountTime(handle.getEpStatementHandle().getMetricsHandle(), deltaCPU, deltaWall, 1); } else { if ((ThreadingOption.isThreadingEnabled) && (services.getThreadingService().isTimerThreading())) { services.getThreadingService().submitTimerWork( new TimerUnitSingle(services, this, handle, this.engineFilterAndDispatchTimeContext)); } else { processStatementScheduleSingle(handle, services, engineFilterAndDispatchTimeContext); } } handles.clear(); return; } Object[] matchArray = handles.getArray(); int entryCount = handles.size(); // sort multiple matches for the event into statements Map<EPStatementHandle, Object> stmtCallbacks = schedulePerStmtThreadLocal.get(); stmtCallbacks.clear(); for (int i = 0; i < entryCount; i++) // need to use the size of the collection { EPStatementHandleCallback handleCallback = (EPStatementHandleCallback) matchArray[i]; EPStatementHandle handle = handleCallback.getEpStatementHandle(); ScheduleHandleCallback callback = handleCallback.getScheduleCallback(); Object entry = stmtCallbacks.get(handle); // This statement has not been encountered before if (entry == null) { stmtCallbacks.put(handle, callback); continue; } // This statement has been encountered once before if (entry instanceof ScheduleHandleCallback) { ScheduleHandleCallback existingCallback = (ScheduleHandleCallback) entry; ArrayDeque<ScheduleHandleCallback> entries = new ArrayDeque<ScheduleHandleCallback>(); entries.add(existingCallback); entries.add(callback); stmtCallbacks.put(handle, entries); continue; } // This statement has been encountered more then once before ArrayDeque<ScheduleHandleCallback> entries = (ArrayDeque<ScheduleHandleCallback>) entry; entries.add(callback); } handles.clear(); for (Map.Entry<EPStatementHandle, Object> entry : stmtCallbacks.entrySet()) { EPStatementHandle handle = entry.getKey(); Object callbackObject = entry.getValue(); if ((MetricReportingPath.isMetricsEnabled) && (handle.getMetricsHandle().isEnabled())) { long cpuTimeBefore = MetricUtil.getCPUCurrentThread(); long wallTimeBefore = MetricUtil.getWall(); processStatementScheduleMultiple(handle, callbackObject, services, this.engineFilterAndDispatchTimeContext); long wallTimeAfter = MetricUtil.getWall(); long cpuTimeAfter = MetricUtil.getCPUCurrentThread(); long deltaCPU = cpuTimeAfter - cpuTimeBefore; long deltaWall = wallTimeAfter - wallTimeBefore; int numInput = (callbackObject instanceof Collection) ? ((Collection) callbackObject).size() : 1; services.getMetricsReportingService().accountTime(handle.getMetricsHandle(), deltaCPU, deltaWall, numInput); } else { if ((ThreadingOption.isThreadingEnabled) && (services.getThreadingService().isTimerThreading())) { services.getThreadingService().submitTimerWork(new TimerUnitMultiple(services, this, handle, callbackObject, this.engineFilterAndDispatchTimeContext)); } else { processStatementScheduleMultiple(handle, callbackObject, services, this.engineFilterAndDispatchTimeContext); } } if ((isPrioritized) && (handle.isPreemptive())) { break; } } }
From source file:com.google.gwt.emultest.java.util.ArrayDequeTest.java
@Override protected Collection makeCollection() { return new ArrayDeque<>(); }
From source file:com.espertech.esper.core.service.EPRuntimeImpl.java
private void processScheduleHandles(ArrayBackedCollection<ScheduleHandle> handles) { if (ThreadLogUtil.ENABLED_TRACE) { ThreadLogUtil.trace("Found schedules for", handles.size()); }//from w w w . j a va 2s .com if (handles.size() == 0) { return; } // handle 1 result separatly for performance reasons if (handles.size() == 1) { Object[] handleArray = handles.getArray(); EPStatementHandleCallback handle = (EPStatementHandleCallback) handleArray[0]; if ((MetricReportingPath.isMetricsEnabled) && (handle.getAgentInstanceHandle().getStatementHandle().getMetricsHandle().isEnabled())) { long cpuTimeBefore = MetricUtil.getCPUCurrentThread(); long wallTimeBefore = MetricUtil.getWall(); processStatementScheduleSingle(handle, services, engineFilterAndDispatchTimeContext); long wallTimeAfter = MetricUtil.getWall(); long cpuTimeAfter = MetricUtil.getCPUCurrentThread(); long deltaCPU = cpuTimeAfter - cpuTimeBefore; long deltaWall = wallTimeAfter - wallTimeBefore; services.getMetricsReportingService().accountTime( handle.getAgentInstanceHandle().getStatementHandle().getMetricsHandle(), deltaCPU, deltaWall, 1); } else { if ((ThreadingOption.isThreadingEnabled) && (services.getThreadingService().isTimerThreading())) { services.getThreadingService().submitTimerWork( new TimerUnitSingle(services, this, handle, this.engineFilterAndDispatchTimeContext)); } else { processStatementScheduleSingle(handle, services, engineFilterAndDispatchTimeContext); } } handles.clear(); return; } Object[] matchArray = handles.getArray(); int entryCount = handles.size(); // sort multiple matches for the event into statements Map<EPStatementAgentInstanceHandle, Object> stmtCallbacks = schedulePerStmtThreadLocal.get(); stmtCallbacks.clear(); for (int i = 0; i < entryCount; i++) // need to use the size of the collection { EPStatementHandleCallback handleCallback = (EPStatementHandleCallback) matchArray[i]; EPStatementAgentInstanceHandle handle = handleCallback.getAgentInstanceHandle(); ScheduleHandleCallback callback = handleCallback.getScheduleCallback(); Object entry = stmtCallbacks.get(handle); // This statement has not been encountered before if (entry == null) { stmtCallbacks.put(handle, callback); continue; } // This statement has been encountered once before if (entry instanceof ScheduleHandleCallback) { ScheduleHandleCallback existingCallback = (ScheduleHandleCallback) entry; ArrayDeque<ScheduleHandleCallback> entries = new ArrayDeque<ScheduleHandleCallback>(); entries.add(existingCallback); entries.add(callback); stmtCallbacks.put(handle, entries); continue; } // This statement has been encountered more then once before ArrayDeque<ScheduleHandleCallback> entries = (ArrayDeque<ScheduleHandleCallback>) entry; entries.add(callback); } handles.clear(); for (Map.Entry<EPStatementAgentInstanceHandle, Object> entry : stmtCallbacks.entrySet()) { EPStatementAgentInstanceHandle handle = entry.getKey(); Object callbackObject = entry.getValue(); if ((MetricReportingPath.isMetricsEnabled) && (handle.getStatementHandle().getMetricsHandle().isEnabled())) { long cpuTimeBefore = MetricUtil.getCPUCurrentThread(); long wallTimeBefore = MetricUtil.getWall(); processStatementScheduleMultiple(handle, callbackObject, services, this.engineFilterAndDispatchTimeContext); long wallTimeAfter = MetricUtil.getWall(); long cpuTimeAfter = MetricUtil.getCPUCurrentThread(); long deltaCPU = cpuTimeAfter - cpuTimeBefore; long deltaWall = wallTimeAfter - wallTimeBefore; int numInput = (callbackObject instanceof Collection) ? ((Collection) callbackObject).size() : 1; services.getMetricsReportingService().accountTime(handle.getStatementHandle().getMetricsHandle(), deltaCPU, deltaWall, numInput); } else { if ((ThreadingOption.isThreadingEnabled) && (services.getThreadingService().isTimerThreading())) { services.getThreadingService().submitTimerWork(new TimerUnitMultiple(services, this, handle, callbackObject, this.engineFilterAndDispatchTimeContext)); } else { processStatementScheduleMultiple(handle, callbackObject, services, this.engineFilterAndDispatchTimeContext); } } if ((isPrioritized) && (handle.isPreemptive())) { break; } } }
From source file:oracle.kv.hadoop.hive.table.TableStorageHandlerBase.java
/** * Method required by the HiveStoragePredicateHandler interface. * <p>//from w w w . j av a2s. com * This method validates the components of the given predicate and * ultimately produces the following artifacts: * * <ul> * <li>a Hive object representing the predicate that will be pushed to * the backend for server side filtering * <li>the String form of the computed predicate to push; which can be * passed to the server via the ONSQL query mechanism * <li>a Hive object consisting of the remaining components of the * original predicate input to this method -- referred to as the * 'residual' predicate; which represents the criteria the Hive * infrastructure will apply (on the client side) to the results * returned after server side filtering has been performed * </ul> * * The predicate analysis model that Hive employs is basically a two * step process. First, an instance of the Hive IndexPredicateAnalyzer * class is created and its analyzePredicate method is invoked, which * returns a Hive class representing the residual predicate, and also * populates a Collection whose contents is dependent on the particular * implementation of IndexPredicateAnalyzer that is used. After * analyzePredicate is invoked, the analyzer's translateSearchConditions * method is invoked to convert the contents of the populated Collection * to a Hive object representing the predicate that can be pushed to * the server side. Finally, the object that is returned is an instance * of the Hive DecomposedPredicate class; which contains the computed * predicate to push and the residual predicate. * <p> * Note that because the Hive built-in IndexPredicateAnalyzer produces * only predicates that consist of 'AND' statements, and which correspond * to PrimaryKey based or IndexKey based predicates, if the Hive built-in * analyzer does not produce a predicate to push, then a custom analyzer * that extends the capabilities of the Hive built-in analyzer is * employed. This extended analyzer handles statements that the built-in * analyzer does not handle. Additionally, whereas the built-in analyzer * populates a List of Hive IndexSearchConditions corresponding to the * filtering criteria of the predicate to push, the extended analyzer * populates an ArrayDeque in which the top (first element) of the * Deque is a Hive object consisting of all the components of the original * input predicate, but with 'invalid' operators replaced with 'valid' * operators; for example, with 'IN <list>' replaced with 'OR' statements. * <p> * In each case, translateSearchConditions constructs the appropriate * Hive predicate to push from the contents of the given Collection; * either List of IndexSearchCondition, or ArrayDeque. */ @Override @SuppressWarnings("deprecation") public DecomposedPredicate decomposePredicate(JobConf jobConfig, org.apache.hadoop.hive.serde2.Deserializer deserializer, ExprNodeDesc predicate) { /* Reset query state to default values. */ TableHiveInputFormat.resetQueryInfo(); DecomposedPredicate decomposedPredicate = null; /* * Try the Hive built-in analyzer first; which will validate the * components of the given predicate and separate them into two * disjoint sets: a set of search conditions that correspond to * either a valid PrimaryKey or IndexKey (and optional FieldRange) * that can be scanned (in the backend KVStore server) using one * of the TableIterators; and a set containing the remaining components * of the predicate (the 'residual' predicate), which Hive will * apply to the results returned after the search conditions have * first been applied on (pushed to) the backend. */ final IndexPredicateAnalyzer analyzer = TableHiveInputFormat.sargablePredicateAnalyzer(predicate, (TableSerDe) deserializer); if (analyzer != null) { /* Use TableScan or IndexScan */ /* Decompose predicate into search conditions and residual. */ final List<IndexSearchCondition> searchConditions = new ArrayList<IndexSearchCondition>(); final ExprNodeGenericFuncDesc residualPredicate = (ExprNodeGenericFuncDesc) analyzer .analyzePredicate(predicate, searchConditions); decomposedPredicate = new DecomposedPredicate(); decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions(searchConditions); decomposedPredicate.residualPredicate = residualPredicate; /* * Valid search conditions and residual have been obtained. * Determine whether the search conditions are index based or * based on the table's primary key. If index based, then tell * the InputFormat to build splits and scan (iterate) based on * shards; otherwise, tell the InputFormat to base the iterator * on partition sets. */ final StringBuilder whereBuf = new StringBuilder(); TableHiveInputFormat.buildPushPredicate(decomposedPredicate.pushedPredicate, whereBuf); final String whereStr = whereBuf.toString(); TableHiveInputFormat.setQueryInfo(searchConditions, (TableSerDe) deserializer, whereStr); if (LOG.isDebugEnabled()) { LOG.debug("-----------------------------"); LOG.debug("residual = " + decomposedPredicate.residualPredicate); LOG.debug("predicate = " + decomposedPredicate.pushedPredicate); LOG.debug("search conditions = " + searchConditions); switch (TableHiveInputFormat.getQueryBy()) { case TableInputSplit.QUERY_BY_INDEX: LOG.debug("push predicate to secondary index [" + "WHERE " + whereStr + "]"); break; case TableInputSplit.QUERY_BY_PRIMARY_ALL_PARTITIONS: case TableInputSplit.QUERY_BY_PRIMARY_SINGLE_PARTITION: LOG.debug("push predicate to primary index [" + "WHERE " + whereStr + "]"); break; default: break; } LOG.debug("-----------------------------"); } } else { /* IndexPredicateAnalyzer == null ==> Use native query */ /* * The given predicate does not consist of search conditions that * correspond to either a valid PrimaryKey or IndexKey (or * FieldRange). Thus, employ the extended analyzer to handle * statements the built-in analyzer cannot handle. */ final TableHiveInputFormat.ExtendedPredicateAnalyzer extendedAnalyzer = TableHiveInputFormat .createPredicateAnalyzerForOnql((TableSerDe) deserializer); if (extendedAnalyzer == null) { LOG.debug("extended predicate analyzer = null ... " + "NO PREDICATE PUSHDOWN"); return null; } final ArrayDeque<ExprNodeDesc> pushPredicateDeque = new ArrayDeque<ExprNodeDesc>(); final ExprNodeGenericFuncDesc residualPredicate = (ExprNodeGenericFuncDesc) extendedAnalyzer .analyzePredicate(predicate, pushPredicateDeque); if (LOG.isTraceEnabled()) { final ExprNodeDesc[] qElements = pushPredicateDeque .toArray(new ExprNodeDesc[pushPredicateDeque.size()]); LOG.trace("-----------------------------"); LOG.trace("push predicate queue elements:"); for (int i = 0; i < qElements.length; i++) { LOG.trace("element[" + i + "] = " + qElements[i]); } LOG.trace("-----------------------------"); } decomposedPredicate = new DecomposedPredicate(); final StringBuilder whereBuf = new StringBuilder(); decomposedPredicate.residualPredicate = residualPredicate; decomposedPredicate.pushedPredicate = extendedAnalyzer.translateSearchConditions(pushPredicateDeque, whereBuf); if (decomposedPredicate.pushedPredicate != null) { if (LOG.isTraceEnabled()) { TableHiveInputFormat.ExtendedPredicateAnalyzer .displayNodeTree(decomposedPredicate.pushedPredicate); } final String whereStr = whereBuf.toString(); if (LOG.isDebugEnabled()) { LOG.debug("-----------------------------"); LOG.debug("residual = " + decomposedPredicate.residualPredicate); LOG.debug("predicate = " + decomposedPredicate.pushedPredicate); LOG.debug("push predicate via native query [" + "WHERE " + whereStr + "]"); LOG.debug("-----------------------------"); } TableHiveInputFormat.setQueryInfo((TableSerDe) deserializer, whereStr); } else { LOG.debug("Extended predicate analyzer found no predicate " + "to push. Will use all of residual for filtering."); } } /* endif: IndexPredicteAnalyzer != null or == null */ return decomposedPredicate; }