List of usage examples for java.util ListIterator remove
void remove();
From source file:org.gatein.integration.jboss.as7.deployment.PortletBridgeDependencyProcessor.java
private void addPortletBridgeListener(DeploymentUnit deploymentUnit) { WarMetaData warMetaData = deploymentUnit.getAttachment(WarMetaData.ATTACHMENT_KEY); if (warMetaData == null) { log.debug("Not installing Portlet Bridge web tier integration as no war metadata found"); return;// w w w. ja va2 s . co m } JBossWebMetaData webMetaData = warMetaData.getMergedJBossWebMetaData(); if (webMetaData == null) { log.debug("Not installing Portlet Bridge web tier integration as no merged web metadata found"); return; } List<ListenerMetaData> listeners = webMetaData.getListeners(); if (listeners == null) { listeners = new ArrayList<ListenerMetaData>(); webMetaData.setListeners(listeners); } else { //if the portlet bridge listener is present remove it //this should allow wars to be portable between AS7 and servlet containers final ListIterator<ListenerMetaData> iterator = listeners.listIterator(); while (iterator.hasNext()) { final ListenerMetaData listener = iterator.next(); if (listener.getListenerClass().trim().equals(PBR_LISTENER)) { log.debugf( "Removing portlet bridge listener %s from web config, as it is not needed in EE6 environments", PBR_LISTENER); iterator.remove(); break; } } } listeners.add(0, cdiListener); }
From source file:org.apache.fop.render.rtf.rtflib.rtfdoc.RtfTextrun.java
/** * Inserts paragraph break before all close group marks. * * @throws IOException for I/O problems * @return The paragraph break element// w w w .ja va 2s . co m */ public RtfParagraphBreak addParagraphBreak() throws IOException { // get copy of children list List children = getChildren(); Stack tmp = new Stack(); RtfParagraphBreak par = null; // delete all previous CloseGroupMark int deletedCloseGroupCount = 0; ListIterator lit = children.listIterator(children.size()); while (lit.hasPrevious() && (lit.previous() instanceof RtfCloseGroupMark)) { tmp.push(Integer.valueOf(((RtfCloseGroupMark) lit.next()).getBreakType())); lit.remove(); deletedCloseGroupCount++; } if (children.size() != 0) { // add paragraph break and restore all deleted close group marks setChildren(children); par = new RtfParagraphBreak(this, writer); for (int i = 0; i < deletedCloseGroupCount; i++) { addCloseGroupMark(((Integer) tmp.pop()).intValue()); } } return par; }
From source file:com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask.java
/** * Scans a list of records to filter out records up to and including the most recent checkpoint value and to get * the greatest extended sequence number from the retained records. Also emits metrics about the records. * * @param scope metrics scope to emit metrics into * @param records list of records to scan and change in-place as needed * @param lastCheckpointValue the most recent checkpoint value * @param lastLargestPermittedCheckpointValue previous largest permitted checkpoint value * @return the largest extended sequence number among the retained records *///from w w w . j ava 2 s . c o m private ExtendedSequenceNumber filterAndGetMaxExtendedSequenceNumber(IMetricsScope scope, List<Record> records, final ExtendedSequenceNumber lastCheckpointValue, final ExtendedSequenceNumber lastLargestPermittedCheckpointValue) { ExtendedSequenceNumber largestExtendedSequenceNumber = lastLargestPermittedCheckpointValue; ListIterator<Record> recordIterator = records.listIterator(); while (recordIterator.hasNext()) { Record record = recordIterator.next(); ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(record.getSequenceNumber(), record instanceof UserRecord ? ((UserRecord) record).getSubSequenceNumber() : null); if (extendedSequenceNumber.compareTo(lastCheckpointValue) <= 0) { recordIterator.remove(); LOG.debug("removing record with ESN " + extendedSequenceNumber + " because the ESN is <= checkpoint (" + lastCheckpointValue + ")"); continue; } if (largestExtendedSequenceNumber == null || largestExtendedSequenceNumber.compareTo(extendedSequenceNumber) < 0) { largestExtendedSequenceNumber = extendedSequenceNumber; } scope.addData(DATA_BYTES_PROCESSED_METRIC, record.getData().limit(), StandardUnit.Bytes, MetricsLevel.SUMMARY); } return largestExtendedSequenceNumber; }
From source file:org.openhab.binding.canopen.internal.CANOpenBinding.java
public void bindingChanged(BindingProvider provider, String itemName) { super.bindingChanged(provider, itemName); // register as listener! if (!((CANOpenBindingProvider) provider).providesBindingFor(itemName)) { // Item was removed // logger.debug("removing item " + itemName); // TODO provide for removal of unused sockets // remove PDO Integer pdoId = itemPDOMap.get(itemName); if (pdoId != null) { LinkedList<CANOpenItemConfig> pdoList = pdoConfigMap.get(pdoId); if (pdoList != null) { ListIterator<CANOpenItemConfig> iterator = pdoList.listIterator(); while (iterator.hasNext()) { if (itemName.equals(iterator.next().getItemName())) iterator.remove(); }/*ww w . ja va 2 s . c o m*/ } itemPDOMap.remove(itemName); } // remove NMT Iterator<CANOpenItemConfig> configsIterator = nmtConfigMap.values().iterator(); while (configsIterator.hasNext()) { if (itemName.equals(configsIterator.next().getItemName())) configsIterator.remove(); } // remove SDOs for (SDODeviceManager manager : sdoDeviceManagerMap.values()) { if (manager.removeItemName(itemName)) break; } } else { CANOpenItemConfig itemConfig = ((CANOpenBindingProvider) provider).getItemConfig(itemName); ISocketConnection conn = null; try { conn = CANOpenActivator.getConnection(itemConfig.getCanInterfaceId()); conn.addMessageReceivedListener(this); conn.open(); } catch (Exception e) { logger.error( "Error adding listener to or opening socket " + itemConfig.getCanInterfaceId() + ": " + e); } if (conn != null) { initializeItem(conn, itemConfig); } // add PDO if (itemConfig.providesTxPDO()) { LinkedList<CANOpenItemConfig> pdoList = pdoConfigMap.get(itemConfig.getPDOId()); if (pdoList == null) { pdoList = new LinkedList<CANOpenItemConfig>(); pdoConfigMap.put(itemConfig.getPDOId(), pdoList); } pdoList.add(itemConfig); itemPDOMap.put(itemName, itemConfig.getPDOId()); } // add NMT if (itemConfig.providesNMT()) { nmtConfigMap.put(itemConfig.getDeviceID(), itemConfig); } // add SDO if (itemConfig.providesSDO()) { SDODeviceManager manager = sdoDeviceManagerMap.get(itemConfig.getDeviceID()); if (manager == null) { manager = new SDODeviceManager(this, sdoResponseTimeout); sdoDeviceManagerMap.put(itemConfig.getDeviceID(), manager); } manager.add(itemConfig); } logger.debug("added item config " + itemConfig); } }
From source file:chat.viska.commons.pipelines.Pipeline.java
/** * Removes a {@link Pipe}. Fails silently if the specified {@link Pipe} does * not exist in the pipeline.//w w w .j ava 2 s . c om */ @SchedulerSupport(SchedulerSupport.IO) public Maybe<Pipe> remove(final Pipe pipe) { return Maybe.fromCallable((Callable<@Nullable Pipe>) () -> { pipeLock.writeLock().lockInterruptibly(); try { final ListIterator iterator = getIteratorOf(pipe); if (iterator == null) { return null; } iterator.next(); // For the remove() to work iterator.remove(); pipe.onRemovedFromPipeline(this); } finally { pipeLock.writeLock().unlock(); } return pipe; }).subscribeOn(Schedulers.io()); }
From source file:org.broadleafcommerce.core.order.strategy.FulfillmentGroupItemStrategyImpl.java
@Override public CartOperationRequest verify(CartOperationRequest request) throws PricingException { Order order = request.getOrder();/* w ww . j a va 2s.co m*/ if (isRemoveEmptyFulfillmentGroups() && order.getFulfillmentGroups() != null) { ListIterator<FulfillmentGroup> fgIter = order.getFulfillmentGroups().listIterator(); while (fgIter.hasNext()) { FulfillmentGroup fg = fgIter.next(); if (fg.getFulfillmentGroupItems() == null || fg.getFulfillmentGroupItems().size() == 0) { fgIter.remove(); fulfillmentGroupService.delete(fg); } } } Map<Long, Integer> oiQuantityMap = new HashMap<Long, Integer>(); List<OrderItem> expandedOrderItems = new ArrayList<OrderItem>(); Map<Long, FulfillmentGroupItem> fgItemMap = new HashMap<Long, FulfillmentGroupItem>(); for (OrderItem oi : order.getOrderItems()) { if (oi instanceof BundleOrderItem) { for (DiscreteOrderItem doi : ((BundleOrderItem) oi).getDiscreteOrderItems()) { expandedOrderItems.add(doi); } } else if (oi instanceof DiscreteOrderItem) { expandedOrderItems.add(oi); } else { expandedOrderItems.add(oi); } } for (OrderItem oi : expandedOrderItems) { Integer oiQuantity = oiQuantityMap.get(oi.getId()); if (oiQuantity == null) { oiQuantity = 0; } if (oi instanceof DiscreteOrderItem && ((DiscreteOrderItem) oi).getBundleOrderItem() != null) { oiQuantity += ((DiscreteOrderItem) oi).getBundleOrderItem().getQuantity() * oi.getQuantity(); } else { oiQuantity += oi.getQuantity(); } oiQuantityMap.put(oi.getId(), oiQuantity); } for (FulfillmentGroup fg : order.getFulfillmentGroups()) { for (FulfillmentGroupItem fgi : fg.getFulfillmentGroupItems()) { Long oiId = fgi.getOrderItem().getId(); Integer oiQuantity = oiQuantityMap.get(oiId); if (oiQuantity == null) { throw new IllegalStateException( "Fulfillment group items and discrete order items are not in sync. DiscreteOrderItem id: " + oiId); } oiQuantity -= fgi.getQuantity(); oiQuantityMap.put(oiId, oiQuantity); fgItemMap.put(fgi.getId(), fgi); } } for (Entry<Long, Integer> entry : oiQuantityMap.entrySet()) { if (!entry.getValue().equals(0)) { if (useSingleFulfillmentGroupQtySync(order.getFulfillmentGroups())) { LOG.warn("Not enough fulfillment group items found for DiscreteOrderItem id:" + entry.getKey()); // There are edge cases where the OrderItem and FulfillmentGroupItem quantities can fall out of sync. If this happens // we set the FGItem to the correct quantity from the OrderItem and save/reprice the order to synchronize them. FulfillmentGroupItem fgItem = fgItemMap.get(entry.getKey()); for (OrderItem oi : expandedOrderItems) { if (oi.getId().equals(fgItem.getOrderItem().getId())) { LOG.warn("Synchronizing FulfillmentGroupItem to match OrderItem [" + entry.getKey() + "] quantity of : " + oi.getQuantity()); fgItem.setQuantity(oi.getQuantity()); } } // We price the order in order to get the right amount after the qty change order = orderService.save(order, true); request.setOrder(order); } else { throw new IllegalStateException( "Not enough fulfillment group items found for DiscreteOrderItem id: " + entry.getKey()); } } } return request; }
From source file:chat.viska.commons.pipelines.Pipeline.java
/** * Removes a {@link Pipe}.//from w ww . j a v a 2 s . c o m */ @SchedulerSupport(SchedulerSupport.IO) public Maybe<Pipe> remove(final String name) { Validate.notBlank(name); return Maybe.fromCallable((Callable<@Nullable Pipe>) () -> { final Pipe pipe; pipeLock.writeLock().lockInterruptibly(); try { final ListIterator<Map.Entry<String, Pipe>> iterator = getIteratorOf(name); if (iterator == null) { return null; } pipe = iterator.next().getValue(); iterator.remove(); pipe.onRemovedFromPipeline(this); } finally { pipeLock.writeLock().unlock(); } return pipe; }).subscribeOn(Schedulers.io()); }
From source file:org.apache.pig.backend.hadoop.hbase.HBaseTableInputFormat.java
@Override public List<InputSplit> getSplits(org.apache.hadoop.mapreduce.JobContext context) throws IOException { List<InputSplit> splits = super.getSplits(context); ListIterator<InputSplit> splitIter = splits.listIterator(); while (splitIter.hasNext()) { TableSplit split = (TableSplit) splitIter.next(); byte[] startKey = split.getStartRow(); byte[] endKey = split.getEndRow(); // Skip if the region doesn't satisfy configured options. if ((skipRegion(CompareOp.LESS, startKey, lt_)) || (skipRegion(CompareOp.GREATER, endKey, gt_)) || (skipRegion(CompareOp.GREATER, endKey, gte_)) || (skipRegion(CompareOp.LESS_OR_EQUAL, startKey, lte_))) { splitIter.remove(); }/*from ww w .j av a 2s.c om*/ } return splits; }
From source file:com.starup.traven.travelkorea.ImageGridFragment.java
private String loadXmlFromNetwork(String urlString) throws XmlPullParserException, IOException { InputStream stream = null;/*from w ww .ja va 2 s . co m*/ VisitKoreaXmlParser stackOverflowXmlParser = new VisitKoreaXmlParser(); try { stream = downloadUrl(urlString); mGridData = null; mGridData = stackOverflowXmlParser.parse(stream); ListIterator<Entry> iter = mGridData.listIterator(); while (iter.hasNext()) { Entry entry = iter.next(); if (entry.firstimage == null) { iter.remove(); } } // Makes sure that the InputStream is closed after the app is // finished using it. } finally { if (stream != null) { stream.close(); } } return "OK"; }
From source file:org.xwiki.refactoring.internal.splitter.DefaultDocumentSplitter.java
/** * A recursive method for traversing the xdom of the root document and splitting it into sub documents. * * @param parentDoc the parent {@link WikiDocument} under which the given list of children reside. * @param children current list of blocks being traversed. * @param depth the depth from the root xdom to current list of children. * @param result space for storing the resulting documents. * @param splittingCriterion the {@link SplittingCriterion}. * @param namingCriterion the {@link NamingCriterion}. *//*from w w w. java 2 s . co m*/ private void split(WikiDocument parentDoc, List<Block> children, int depth, List<WikiDocument> result, SplittingCriterion splittingCriterion, NamingCriterion namingCriterion) { ListIterator<Block> it = children.listIterator(); while (it.hasNext()) { Block block = it.next(); if (splittingCriterion.shouldSplit(block, depth)) { // Split a new document and add it to the results list. XDOM xdom = new XDOM(block.getChildren()); String newDocumentName = namingCriterion.getDocumentName(xdom); WikiDocument newDoc = new WikiDocument(newDocumentName, xdom, parentDoc); result.add(newDoc); // Remove the original block from the parent document. it.remove(); // Place a link from the parent to child. it.add(new NewLineBlock()); it.add(createLink(block, newDocumentName)); // Check whether this node should be further traversed. if (splittingCriterion.shouldIterate(block, depth)) { split(newDoc, newDoc.getXdom().getChildren(), depth + 1, result, splittingCriterion, namingCriterion); } } else if (splittingCriterion.shouldIterate(block, depth)) { split(parentDoc, block.getChildren(), depth + 1, result, splittingCriterion, namingCriterion); } } }