List of usage examples for java.util ListIterator remove
void remove();
From source file:org.kuali.kra.proposaldevelopment.hierarchy.service.impl.ProposalHierarchyServiceImpl.java
protected void syncProposalPersons(DevelopmentProposal childProposal, DevelopmentProposal hierarchyProposal, ProposalPerson pi, List<ProposalPerson> removedPersons) { if (proposalPersonExtendedAttributesToDelete == null) { proposalPersonExtendedAttributesToDelete = new ArrayList<ProposalPersonExtendedAttributes>(); }/* www . j a v a 2s. c om*/ //now remove any other attachments for the persons we removed for (ProposalPerson removedPerson : removedPersons) { List<ProposalPersonBiography> currentBiographies = hierarchyProposal.getPropPersonBios(); ListIterator<ProposalPersonBiography> iter = currentBiographies.listIterator(); while (iter.hasNext()) { ProposalPersonBiography bio = iter.next(); if (StringUtils.equalsIgnoreCase(bio.getPersonId(), removedPerson.getPersonId()) && removedPerson.getProposalPersonNumber().equals(bio.getProposalPersonNumber())) { iter.remove(); } } if (removedPerson.getProposalPersonExtendedAttributes() != null) { proposalPersonExtendedAttributesToDelete.add(removedPerson.getProposalPersonExtendedAttributes()); } } }
From source file:com.ppp.prm.portal.server.service.gwt.HibernateDetachUtility.java
/** * @param value the object needing to be detached/scrubbed. * @param checkedObjectMap This maps identityHashCodes to Objects we've already detached. In that way we can * quickly determine if we've already done the work for the incoming value and avoid taversing it again. This * works well almost all of the time, but it is possible that two different objects can have the same identity hash * (conflicts are always possible with a hash). In that case we utilize the checkedObjectCollisionMap (see below). * @param checkedObjectCollisionMap checkedObjectMap maps the identityhash to the *first* object with that hash. In * most cases there will only be mapping for one hash, but it is possible to encounter the same hash for multiple * objects, especially on 32bit or IBM JVMs. It is important to know if an object has already been detached * because if it is somehow self-referencing, we have to stop the recursion. This map holds the 2nd..Nth mapping * for a single hash and is used to ensure we never try to detach an object already processed. * @param depth used to stop infinite recursion, defaults to a depth we don't expectto see, but it is configurable. * @param serializationType/*from w w w .j a v a2 s. c om*/ * @throws Exception if a problem occurs * @throws IllegalStateException if the recursion depth limit is reached */ private static void nullOutUninitializedFields(Object value, Map<Integer, Object> checkedObjectMap, Map<Integer, List<Object>> checkedObjectCollisionMap, int depth, SerializationType serializationType) throws Exception { if (depth > depthAllowed) { String warningMessage = "Recursed too deep [" + depth + " > " + depthAllowed + "], will not attempt to detach object of type [" + ((value != null) ? value.getClass().getName() : "N/A") + "]. This may cause serialization errors later. " + "You can try to work around this by setting the system property [" + DEPTH_ALLOWED_SYSPROP + "] to a value higher than [" + depth + "] or you can set the system property [" + THROW_EXCEPTION_ON_DEPTH_LIMIT_SYSPROP + "] to 'false'"; LOG.warn(warningMessage); if (throwExceptionOnDepthLimit) { throw new IllegalStateException(warningMessage); } return; } if (null == value) { return; } // System.identityHashCode is a hash code, and therefore not guaranteed to be unique. And we've seen this // be the case. So, we use it to try and avoid duplicating work, but handle the case when two objects may // have an identity crisis. Integer valueIdentity = hashCodeGenerator.getHashCode(value); Object checkedObject = checkedObjectMap.get(valueIdentity); if (null == checkedObject) { // if we have not yet encountered an object with this hash, store it in our map and start scrubbing checkedObjectMap.put(valueIdentity, value); } else if (value == checkedObject) { // if we have scrubbed this already, no more work to be done return; } else { // we have a situation where multiple objects have the same identity hashcode, work with our // collision map to decide whether it needs to be scrubbed and add if necessary. // Note that this code block is infrequently hit, it is by design that we've pushed the extra // work, map, etc, involved for this infrequent case into its own block. The standard cases must // be as fast and lean as possible. boolean alreadyDetached = false; List<Object> collisionObjects = checkedObjectCollisionMap.get(valueIdentity); if (null == collisionObjects) { // if this is the 2nd occurrence for this hash, create a new map entry collisionObjects = new ArrayList<Object>(1); checkedObjectCollisionMap.put(valueIdentity, collisionObjects); } else { // if we have scrubbed this already, no more work to be done for (Object collisionObject : collisionObjects) { if (value == collisionObject) { alreadyDetached = true; break; } } } if (LOG.isDebugEnabled()) { StringBuilder message = new StringBuilder("\n\tIDENTITY HASHCODE COLLISION [hash="); message.append(valueIdentity); message.append(", alreadyDetached="); message.append(alreadyDetached); message.append("]"); message.append("\n\tCurrent : "); message.append(value.getClass().getName()); message.append("\n\t "); message.append(value); message.append("\n\tPrevious : "); message.append(checkedObject.getClass().getName()); message.append("\n\t "); message.append(checkedObject); for (Object collisionObject : collisionObjects) { message.append("\n\tPrevious : "); message.append(collisionObject.getClass().getName()); message.append("\n\t "); message.append(collisionObject); } LOG.debug(message); } // now that we've done our logging, if already detached we're done. Otherwise add to the list of collision // objects for this hash, and start scrubbing if (alreadyDetached) { return; } collisionObjects.add(value); } // Perform the detaching if (value instanceof Object[]) { Object[] objArray = (Object[]) value; for (int i = 0; i < objArray.length; i++) { Object listEntry = objArray[i]; Object replaceEntry = replaceObject(listEntry); if (replaceEntry != null) { objArray[i] = replaceEntry; } nullOutUninitializedFields(objArray[i], checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } } else if (value instanceof List) { // Null out any entries in initialized collections ListIterator i = ((List) value).listIterator(); while (i.hasNext()) { Object val = i.next(); Object replace = replaceObject(val); if (replace != null) { val = replace; i.set(replace); } nullOutUninitializedFields(val, checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } } else if (value instanceof Collection) { Collection collection = (Collection) value; Collection itemsToBeReplaced = new ArrayList(); Collection replacementItems = new ArrayList(); for (Object item : collection) { Object replacementItem = replaceObject(item); if (replacementItem != null) { itemsToBeReplaced.add(item); replacementItems.add(replacementItem); item = replacementItem; } nullOutUninitializedFields(item, checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } collection.removeAll(itemsToBeReplaced); collection.addAll(replacementItems); // watch out! if this collection is a Set, HashMap$MapSet doesn't support addAll. See BZ 688000 } else if (value instanceof Map) { Map originalMap = (Map) value; HashMap<Object, Object> replaceMap = new HashMap<Object, Object>(); for (Iterator i = originalMap.keySet().iterator(); i.hasNext();) { // get original key and value - these might be hibernate proxies Object originalKey = i.next(); Object originalKeyValue = originalMap.get(originalKey); // replace with non-hibernate classes, if appropriate (will be null otherwise) Object replaceKey = replaceObject(originalKey); Object replaceValue = replaceObject(originalKeyValue); // if either original key or original value was a hibernate proxy object, we have to // remove it from the original map, and remember the replacement objects for later if (replaceKey != null || replaceValue != null) { Object newKey = (replaceKey != null) ? replaceKey : originalKey; Object newValue = (replaceValue != null) ? replaceValue : originalKeyValue; replaceMap.put(newKey, newValue); i.remove(); } } // all hibernate proxies have been removed, we need to replace them with their // non-proxy object representations that we got from replaceObject() calls originalMap.putAll(replaceMap); // now go through each item in the map and null out their internal fields for (Object key : originalMap.keySet()) { nullOutUninitializedFields(originalMap.get(key), checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); nullOutUninitializedFields(key, checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } } else if (value instanceof Enum) { // don't need to detach enums, treat them as special objects return; } if (serializationType == SerializationType.JAXB) { XmlAccessorType at = value.getClass().getAnnotation(XmlAccessorType.class); if (at != null && at.value() == XmlAccessType.FIELD) { nullOutFieldsByFieldAccess(value, checkedObjectMap, checkedObjectCollisionMap, depth, serializationType); } else { nullOutFieldsByAccessors(value, checkedObjectMap, checkedObjectCollisionMap, depth, serializationType); } } else if (serializationType == SerializationType.SERIALIZATION) { nullOutFieldsByFieldAccess(value, checkedObjectMap, checkedObjectCollisionMap, depth, serializationType); } }
From source file:ch.flashcard.HibernateDetachUtility.java
/** * @param value the object needing to be detached/scrubbed. * @param checkedObjectMap This maps identityHashCodes to Objects we've already detached. In that way we can * quickly determine if we've already done the work for the incoming value and avoid taversing it again. This * works well almost all of the time, but it is possible that two different objects can have the same identity hash * (conflicts are always possible with a hash). In that case we utilize the checkedObjectCollisionMap (see below). * @param checkedObjectCollisionMap checkedObjectMap maps the identityhash to the *first* object with that hash. In * most cases there will only be mapping for one hash, but it is possible to encounter the same hash for multiple * objects, especially on 32bit or IBM JVMs. It is important to know if an object has already been detached * because if it is somehow self-referencing, we have to stop the recursion. This map holds the 2nd..Nth mapping * for a single hash and is used to ensure we never try to detach an object already processed. * @param depth used to stop infinite recursion, defaults to a depth we don't expectto see, but it is configurable. * @param serializationType//from w w w. ja v a 2s . c om * @throws Exception if a problem occurs * @throws IllegalStateException if the recursion depth limit is reached */ private static void nullOutUninitializedFields(Object value, Map<Integer, Object> checkedObjectMap, Map<Integer, List<Object>> checkedObjectCollisionMap, int depth, SerializationType serializationType) throws Exception { if (depth > depthAllowed) { String warningMessage = "Recursed too deep [" + depth + " > " + depthAllowed + "], will not attempt to detach object of type [" + ((value != null) ? value.getClass().getName() : "N/A") + "]. This may cause serialization errors later. " + "You can try to work around this by setting the system property [" + DEPTH_ALLOWED_SYSPROP + "] to a value higher than [" + depth + "] or you can set the system property [" + THROW_EXCEPTION_ON_DEPTH_LIMIT_SYSPROP + "] to 'false'"; LOG.warn(warningMessage); if (throwExceptionOnDepthLimit) { throw new IllegalStateException(warningMessage); } return; } if (null == value) { return; } // System.identityHashCode is a hash code, and therefore not guaranteed to be unique. And we've seen this // be the case. So, we use it to try and avoid duplicating work, but handle the case when two objects may // have an identity crisis. Integer valueIdentity = hashCodeGenerator.getHashCode(value); Object checkedObject = checkedObjectMap.get(valueIdentity); if (null == checkedObject) { // if we have not yet encountered an object with this hash, store it in our map and start scrubbing checkedObjectMap.put(valueIdentity, value); } else if (value == checkedObject) { // if we have scrubbed this already, no more work to be done return; } else { // we have a situation where multiple objects have the same identity hashcode, work with our // collision map to decide whether it needs to be scrubbed and add if necessary. // Note that this code block is infrequently hit, it is by design that we've pushed the extra // work, map, etc, involved for this infrequent case into its own block. The standard cases must // be as fast and lean as possible. boolean alreadyDetached = false; List<Object> collisionObjects = checkedObjectCollisionMap.get(valueIdentity); if (null == collisionObjects) { // if this is the 2nd occurrence for this hash, create a new map entry collisionObjects = new ArrayList<Object>(1); checkedObjectCollisionMap.put(valueIdentity, collisionObjects); } else { // if we have scrubbed this already, no more work to be done for (Object collisionObject : collisionObjects) { if (value == collisionObject) { alreadyDetached = true; break; } } } if (LOG.isDebugEnabled()) { StringBuilder message = new StringBuilder("\n\tIDENTITY HASHCODE COLLISION [hash="); message.append(valueIdentity); message.append(", alreadyDetached="); message.append(alreadyDetached); message.append("]"); message.append("\n\tCurrent : "); message.append(value.getClass().getName()); message.append("\n\t "); message.append(value); message.append("\n\tPrevious : "); message.append(checkedObject.getClass().getName()); message.append("\n\t "); message.append(checkedObject); for (Object collisionObject : collisionObjects) { message.append("\n\tPrevious : "); message.append(collisionObject.getClass().getName()); message.append("\n\t "); message.append(collisionObject); } LOG.debug(message); } // now that we've done our logging, if already detached we're done. Otherwise add to the list of collision // objects for this hash, and start scrubbing if (alreadyDetached) { return; } collisionObjects.add(value); } // Perform the detaching if (value instanceof Object[]) { Object[] objArray = (Object[]) value; for (int i = 0; i < objArray.length; i++) { Object listEntry = objArray[i]; Object replaceEntry = replaceObject(listEntry); if (replaceEntry != null) { objArray[i] = replaceEntry; } nullOutUninitializedFields(objArray[i], checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } } else if (value instanceof List) { // Null out any entries in initialized collections ListIterator i = ((List) value).listIterator(); while (i.hasNext()) { Object val = i.next(); Object replace = replaceObject(val); if (replace != null) { val = replace; i.set(replace); } nullOutUninitializedFields(val, checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } } else if (value instanceof Collection) { Collection collection = (Collection) value; Collection itemsToBeReplaced = new ArrayList(); Collection replacementItems = new ArrayList(); for (Object item : collection) { Object replacementItem = replaceObject(item); if (replacementItem != null) { itemsToBeReplaced.add(item); replacementItems.add(replacementItem); item = replacementItem; } nullOutUninitializedFields(item, checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } collection.removeAll(itemsToBeReplaced); collection.addAll(replacementItems); // watch out! if this collection is a Set, HashMap$MapSet doesn't support addAll. See BZ 688000 } else if (value instanceof Map) { Map originalMap = (Map) value; HashMap<Object, Object> replaceMap = new HashMap<Object, Object>(); for (Iterator i = originalMap.keySet().iterator(); i.hasNext();) { // get original key and value - these might be hibernate proxies Object originalKey = i.next(); Object originalKeyValue = originalMap.get(originalKey); // replace with non-hibernate classes, if appropriate (will be null otherwise) Object replaceKey = replaceObject(originalKey); Object replaceValue = replaceObject(originalKeyValue); // if either original key or original value was a hibernate proxy object, we have to // remove it from the original map, and remember the replacement objects for later if (replaceKey != null || replaceValue != null) { Object newKey = (replaceKey != null) ? replaceKey : originalKey; Object newValue = (replaceValue != null) ? replaceValue : originalKeyValue; replaceMap.put(newKey, newValue); i.remove(); } } // all hibernate proxies have been removed, we need to replace them with their // non-proxy object representations that we got from replaceObject() calls originalMap.putAll(replaceMap); // now go through each item in the map and null out their internal fields for (Object key : originalMap.keySet()) { nullOutUninitializedFields(originalMap.get(key), checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); nullOutUninitializedFields(key, checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } } else if (value instanceof Enum) { // don't need to detach enums, treat them as special objects return; } if (serializationType == SerializationType.JAXB) { XmlAccessorType at = (XmlAccessorType) value.getClass().getAnnotation(XmlAccessorType.class); if (at != null && at.value() == XmlAccessType.FIELD) { nullOutFieldsByFieldAccess(value, checkedObjectMap, checkedObjectCollisionMap, depth, serializationType); } else { nullOutFieldsByAccessors(value, checkedObjectMap, checkedObjectCollisionMap, depth, serializationType); } } else if (serializationType == SerializationType.SERIALIZATION) { nullOutFieldsByFieldAccess(value, checkedObjectMap, checkedObjectCollisionMap, depth, serializationType); } }
From source file:org.mobicents.servlet.restcomm.telephony.Call.java
private void sendBye(Hangup hangup) throws IOException, TransitionNotFoundException, TransitionFailedException, TransitionRollbackException {//from ww w . j a v a 2s. c o m final SipSession session = invite.getSession(); String sessionState = session.getState().name(); if (sessionState == SipSession.State.INITIAL.name() || (sessionState == SipSession.State.EARLY.name() && isInbound())) { final SipServletResponse resp = invite.createResponse(Response.SERVER_INTERNAL_ERROR); if (hangup.getMessage() != null && !hangup.getMessage().equals("")) { resp.addHeader("Reason", hangup.getMessage()); } addCustomHeaders(resp); resp.send(); fsm.transition(hangup, completed); return; } if (sessionState == SipSession.State.EARLY.name()) { final SipServletRequest cancel = invite.createCancel(); if (hangup.getMessage() != null && !hangup.getMessage().equals("")) { cancel.addHeader("Reason", hangup.getMessage()); } addCustomHeaders(cancel); cancel.send(); fsm.transition(hangup, completed); return; } else { final SipServletRequest bye = session.createRequest("BYE"); addCustomHeaders(bye); if (hangup.getMessage() != null && !hangup.getMessage().equals("")) { bye.addHeader("Reason", hangup.getMessage()); } SipURI realInetUri = (SipURI) session.getAttribute("realInetUri"); InetAddress byeRURI = InetAddress.getByName(((SipURI) bye.getRequestURI()).getHost()); // INVITE sip:+12055305520@107.21.247.251 SIP/2.0 // Record-Route: <sip:10.154.28.245:5065;transport=udp;lr;node_host=10.13.169.214;node_port=5080;version=0> // Record-Route: <sip:10.154.28.245:5060;transport=udp;lr;node_host=10.13.169.214;node_port=5080;version=0> // Record-Route: <sip:67.231.8.195;lr=on;ftag=gK0043eb81> // Record-Route: <sip:67.231.4.204;r2=on;lr=on;ftag=gK0043eb81> // Record-Route: <sip:192.168.6.219;r2=on;lr=on;ftag=gK0043eb81> // Accept: application/sdp // Allow: INVITE,ACK,CANCEL,BYE // Via: SIP/2.0/UDP 10.154.28.245:5065;branch=z9hG4bK1cdb.193075b2.058724zsd_0 // Via: SIP/2.0/UDP 10.154.28.245:5060;branch=z9hG4bK1cdb.193075b2.058724_0 // Via: SIP/2.0/UDP 67.231.8.195;branch=z9hG4bK1cdb.193075b2.0 // Via: SIP/2.0/UDP 67.231.4.204;branch=z9hG4bK1cdb.f9127375.0 // Via: SIP/2.0/UDP 192.168.16.114:5060;branch=z9hG4bK00B6ff7ff87ed50497f // From: <sip:+1302109762259@192.168.16.114>;tag=gK0043eb81 // To: <sip:12055305520@192.168.6.219> // Call-ID: 587241765_133360558@192.168.16.114 // CSeq: 393447729 INVITE // Max-Forwards: 67 // Contact: <sip:+1302109762259@192.168.16.114:5060> // Diversion: <sip:+112055305520@192.168.16.114:5060>;privacy=off;screen=no; reason=unknown; counter=1 // Supported: replaces // Content-Disposition: session;handling=required // Content-Type: application/sdp // Remote-Party-ID: <sip:+1302109762259@192.168.16.114:5060>;privacy=off;screen=no // X-Sip-Balancer-InitialRemoteAddr: 67.231.8.195 // X-Sip-Balancer-InitialRemotePort: 5060 // Route: <sip:10.13.169.214:5080;transport=udp;lr> // Content-Length: 340 ListIterator<String> recordRouteList = invite.getHeaders(RecordRouteHeader.NAME); if (invite.getHeader("X-Sip-Balancer-InitialRemoteAddr") != null) { if (logger.isInfoEnabled()) { logger.info( "We are behind LoadBalancer and will remove the first two RecordRoutes since they are the LB node"); } recordRouteList.next(); recordRouteList.remove(); recordRouteList.next(); recordRouteList.remove(); } if (recordRouteList.hasNext()) { if (logger.isInfoEnabled()) { logger.info("Record Route is set, wont change the Request URI"); } } else { if (logger.isInfoEnabled()) { logger.info("Checking RURI, realInetUri: " + realInetUri + " byeRURI: " + byeRURI); } if (logger.isDebugEnabled()) { logger.debug("byeRURI.isSiteLocalAddress(): " + byeRURI.isSiteLocalAddress()); logger.debug("byeRURI.isAnyLocalAddress(): " + byeRURI.isAnyLocalAddress()); logger.debug("byeRURI.isLoopbackAddress(): " + byeRURI.isLoopbackAddress()); } if (realInetUri != null && (byeRURI.isSiteLocalAddress() || byeRURI.isAnyLocalAddress() || byeRURI.isLoopbackAddress())) { if (logger.isInfoEnabled()) { logger.info("Using the real ip address of the sip client " + realInetUri.toString() + " as a request uri of the BYE request"); } bye.setRequestURI(realInetUri); } } if (logger.isInfoEnabled()) { logger.info("Will sent out BYE to: " + bye.getRequestURI()); } bye.send(); } }
From source file:org.commonjava.maven.ext.common.model.Project.java
private void resolvePlugins(MavenSessionHandler session, List<Plugin> plugins, HashMap<ProjectVersionRef, Plugin> resolvedPlugins) throws ManipulationException { ListIterator<Plugin> iterator = plugins.listIterator(plugins.size()); // Iterate in reverse order so later plugins take precedence while (iterator.hasPrevious()) { Plugin p = iterator.previous();//from ww w .ja va 2 s.c o m String g = PropertyResolver.resolveInheritedProperties(session, this, "${project.groupId}".equals(p.getGroupId()) ? getGroupId() : p.getGroupId()); String a = PropertyResolver.resolveInheritedProperties(session, this, "${project.artifactId}".equals(p.getArtifactId()) ? getArtifactId() : p.getArtifactId()); String v = PropertyResolver.resolveInheritedProperties(session, this, p.getVersion()); // Its possible the internal plugin list is either abbreviated or empty. Attempt to fill in default values for // comparison purposes. if (isEmpty(g)) { g = PLUGIN_DEFAULTS.getDefaultGroupId(a); } // Theoretically we could default an empty v via PLUGIN_DEFAULTS.getDefaultVersion( g, a ) but // this means managed plugins would be included which confuses things. if (isNotEmpty(g) && isNotEmpty(a) && isNotEmpty(v)) { SimpleProjectVersionRef spv = new SimpleProjectVersionRef(g, a, v); // If the GAV already exists within the map it means we have a duplicate entry. While Maven // technically allows this it does warn that this leads to unstable models. In PME case this breaks // the indexing as we don't have duplicate entries. Given they are exact matches, remove older duplicate. if (resolvedPlugins.containsKey(spv)) { logger.error("Found duplicate entry within plugin list. Key of {} and plugin {}", spv, p); iterator.remove(); } else { Plugin old = resolvedPlugins.put(spv, p); if (old != null) { logger.error("Internal project plugin resolution failure ; replaced {} in store by {}.", old, spv); throw new ManipulationException( "Internal project plugin resolution failure ; replaced " + old + " by " + spv); } } } } }
From source file:org.trnltk.experiment.morphology.ambiguity.DataDiffUtil.java
/** * Reorder and merge like edit sections. Merge equalities. * Any edit section can move as long as it doesn't cross an equality. * * @param diffs LinkedList of Diff objects. *//*from ww w .ja v a2 s.c o m*/ public void diff_cleanupMerge(LinkedList<Diff<T>> diffs) { diffs.add(new Diff<T>(Operation.EQUAL, new ArrayList<T>())); // Add a dummy entry at the end. ListIterator<Diff<T>> pointer = diffs.listIterator(); int count_delete = 0; int count_insert = 0; List<T> text_delete = new ArrayList<T>(); List<T> text_insert = new ArrayList<T>(); Diff thisDiff = pointer.next(); Diff prevEqual = null; int commonlength; while (thisDiff != null) { switch (thisDiff.operation) { case INSERT: count_insert++; text_insert = ListUtils.union(text_insert, thisDiff.text); prevEqual = null; break; case DELETE: count_delete++; text_delete = ListUtils.union(text_delete, thisDiff.text); prevEqual = null; break; case EQUAL: if (count_delete + count_insert > 1) { boolean both_types = count_delete != 0 && count_insert != 0; // Delete the offending records. pointer.previous(); // Reverse direction. while (count_delete-- > 0) { pointer.previous(); pointer.remove(); } while (count_insert-- > 0) { pointer.previous(); pointer.remove(); } if (both_types) { // Factor out any common prefixies. commonlength = diff_commonPrefix(text_insert, text_delete); if (commonlength != 0) { if (pointer.hasPrevious()) { thisDiff = pointer.previous(); assert thisDiff.operation == Operation.EQUAL : "Previous diff should have been an equality."; thisDiff.text = ListUtils.union(thisDiff.text, text_insert.subList(0, commonlength)); pointer.next(); } else { pointer.add(new Diff(Operation.EQUAL, text_insert.subList(0, commonlength))); } text_insert = text_insert.subList(commonlength, text_insert.size()); text_delete = text_delete.subList(commonlength, text_delete.size()); } // Factor out any common suffixies. commonlength = diff_commonSuffix(text_insert, text_delete); if (commonlength != 0) { thisDiff = pointer.next(); thisDiff.text = ListUtils.union( text_insert.subList(text_insert.size() - commonlength, text_insert.size()), thisDiff.text); text_insert = text_insert.subList(0, text_insert.size() - commonlength); text_delete = text_delete.subList(0, text_delete.size() - commonlength); pointer.previous(); } } // Insert the merged records. if (text_delete.size() != 0) { pointer.add(new Diff(Operation.DELETE, text_delete)); } if (text_insert.size() != 0) { pointer.add(new Diff(Operation.INSERT, text_insert)); } // Step forward to the equality. thisDiff = pointer.hasNext() ? pointer.next() : null; } else if (prevEqual != null) { // Merge this equality with the previous one. prevEqual.text = ListUtils.union(prevEqual.text, thisDiff.text); pointer.remove(); thisDiff = pointer.previous(); pointer.next(); // Forward direction } count_insert = 0; count_delete = 0; text_delete = new ArrayList<T>(); text_insert = new ArrayList<T>(); prevEqual = thisDiff; break; } thisDiff = pointer.hasNext() ? pointer.next() : null; } if (diffs.getLast().text.size() == 0) { diffs.removeLast(); // Remove the dummy entry at the end. } /* * Second pass: look for single edits surrounded on both sides by equalities * which can be shifted sideways to eliminate an equality. * e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC */ boolean changes = false; // Create a new iterator at the start. // (As opposed to walking the current one back.) pointer = diffs.listIterator(); Diff<T> prevDiff = pointer.hasNext() ? pointer.next() : null; thisDiff = pointer.hasNext() ? pointer.next() : null; Diff nextDiff = pointer.hasNext() ? pointer.next() : null; // Intentionally ignore the first and last element (don't need checking). while (nextDiff != null) { if (prevDiff.operation == Operation.EQUAL && nextDiff.operation == Operation.EQUAL) { // This is a single edit surrounded by equalities. if (endsWith(thisDiff.text, prevDiff.text)) { // Shift the edit over the previous equality. thisDiff.text = ListUtils.union(prevDiff.text, thisDiff.text.subList(0, thisDiff.text.size() - prevDiff.text.size())); nextDiff.text = ListUtils.union(prevDiff.text, nextDiff.text); pointer.previous(); // Walk past nextDiff. pointer.previous(); // Walk past thisDiff. pointer.previous(); // Walk past prevDiff. pointer.remove(); // Delete prevDiff. pointer.next(); // Walk past thisDiff. thisDiff = pointer.next(); // Walk past nextDiff. nextDiff = pointer.hasNext() ? pointer.next() : null; changes = true; } else if (startsWith(thisDiff.text, nextDiff.text)) { // Shift the edit over the next equality. prevDiff.text = ListUtils.union(prevDiff.text, nextDiff.text); thisDiff.text = ListUtils.union( thisDiff.text.subList(nextDiff.text.size(), thisDiff.text.size()), nextDiff.text); pointer.remove(); // Delete nextDiff. nextDiff = pointer.hasNext() ? pointer.next() : null; changes = true; } } prevDiff = thisDiff; thisDiff = nextDiff; nextDiff = pointer.hasNext() ? pointer.next() : null; } // If shifts were made, the diff needs reordering and another shift sweep. if (changes) { diff_cleanupMerge(diffs); } }
From source file:org.opennms.ng.services.collectd.Collectd.java
/** * Process the 'primarySnmpInterfaceChanged' event. Extract the old and * new primary SNMP interface addresses from the event parms. Any * CollectableService objects located in the collectable services list * which match the IP address of the old primary interface and have a * service name of "SNMP" are flagged for deletion. This will ensure that * the old primary interface is no longer collected against. Finally the * new primary SNMP interface is scheduled. The packages are examined and * new CollectableService objects are created, initialized and scheduled * for collection.// w ww . jav a 2 s. c o m * * @param event The event to process. * @throws InsufficientInformationException */ private void handlePrimarySnmpInterfaceChanged(Event event) throws InsufficientInformationException { EventUtils.checkNodeId(event); EventUtils.checkInterface(event); LOG.debug("primarySnmpInterfaceChangedHandler: processing primary SNMP interface changed event..."); // Currently only support SNMP data collection. // if (!event.getService().equals("SNMP")) { return; } // Extract the old and new primary SNMP interface addresses from the // event parms. // String oldPrimaryIfAddr = null; String parmName = null; Value parmValue = null; String parmContent = null; for (Parm parm : event.getParmCollection()) { parmName = parm.getParmName(); parmValue = parm.getValue(); if (parmValue == null) { continue; } else { parmContent = parmValue.getContent(); } // old primary SNMP interface (optional parameter) if (parmName.equals(EventConstants.PARM_OLD_PRIMARY_SNMP_ADDRESS)) { oldPrimaryIfAddr = parmContent; } } if (oldPrimaryIfAddr != null) { // Mark the service for deletion so that it will not be // rescheduled // for // collection. // // Iterate over the CollectableService objects in the service // updates map // and mark any which have the same interface address as the old // primary SNMP interface and a service name of "SNMP" for // deletion. // synchronized (getCollectableServices()) { CollectableService cSvc = null; ListIterator<CollectableService> liter = getCollectableServices().listIterator(); while (liter.hasNext()) { cSvc = liter.next(); final InetAddress addr = (InetAddress) cSvc.getAddress(); final String addrString = str(addr); if (addrString != null && addrString.equals(oldPrimaryIfAddr)) { synchronized (cSvc) { // Got a match! Retrieve the CollectorUpdates // object // associated // with this CollectableService. CollectorUpdates updates = cSvc.getCollectorUpdates(); // Now set the deleted flag updates.markForDeletion(); LOG.debug("primarySnmpInterfaceChangedHandler: marking {} as deleted for service SNMP.", oldPrimaryIfAddr); } // Now safe to remove the collectable service from // the collectable services list liter.remove(); } } } } // Now we can schedule the new service... // scheduleForCollection(event); LOG.debug( "primarySnmpInterfaceChangedHandler: processing of primarySnmpInterfaceChanged event for nodeid {} completed.", event.getNodeid()); }
From source file:org.opennms.ng.services.collectd.Collectd.java
private void unscheduleNodeAndMarkForDeletion(Long nodeId) { // Iterate over the collectable service list and mark any entries // which match the deleted nodeId for deletion. synchronized (getCollectableServices()) { CollectableService cSvc = null;/*from w w w . j a va 2 s . c o m*/ final ListIterator<CollectableService> liter = getCollectableServices().listIterator(); while (liter.hasNext()) { cSvc = liter.next(); // Only interested in entries with matching nodeId if (!(cSvc.getNodeId() == nodeId)) { continue; } synchronized (cSvc) { // Retrieve the CollectorUpdates object associated // with this CollectableService. CollectorUpdates updates = cSvc.getCollectorUpdates(); // Now set the update's deletion flag so the next // time it is selected for execution by the scheduler // the collection will be skipped and the service will not // be rescheduled. LOG.debug( "Marking CollectableService for deletion because a node was deleted: Service nodeid={}, deleted node:{}", cSvc.getNodeId(), nodeId); updates.markForDeletion(); } // Now safe to remove the collectable service from // the collectable services list liter.remove(); } } }
From source file:io.apiman.manager.api.es.EsStorage.java
/** * @see io.apiman.manager.api.core.IStorage#reorderPolicies(io.apiman.manager.api.beans.policies.PolicyType, java.lang.String, java.lang.String, java.lang.String, java.util.List) *///w w w. j a v a2 s . com @Override public void reorderPolicies(PolicyType type, String organizationId, String entityId, String entityVersion, List<Long> newOrder) throws StorageException { String docType = getPoliciesDocType(type); String pid = id(organizationId, entityId, entityVersion); Map<String, Object> source = getEntity(docType, pid); if (source == null) { return; } PoliciesBean policiesBean = EsMarshalling.unmarshallPolicies(source); List<PolicyBean> policies = policiesBean.getPolicies(); List<PolicyBean> reordered = new ArrayList<>(policies.size()); for (Long policyId : newOrder) { ListIterator<PolicyBean> iterator = policies.listIterator(); while (iterator.hasNext()) { PolicyBean policyBean = iterator.next(); if (policyBean.getId().equals(policyId)) { iterator.remove(); reordered.add(policyBean); break; } } } // Make sure we don't stealth-delete any policies. Put anything // remaining at the end of the list. for (PolicyBean policyBean : policies) { reordered.add(policyBean); } policiesBean.setPolicies(reordered); updateEntity(docType, pid, EsMarshalling.marshall(policiesBean)); }
From source file:org.opennms.netmgt.collectd.Collectd.java
private void unscheduleNodeAndMarkForDeletion(Long nodeId) { // Iterate over the collectable service list and mark any entries // which match the deleted nodeId for deletion. synchronized (getCollectableServices()) { CollectableService cSvc = null;/*ww w.j a va 2s .c om*/ final ListIterator<CollectableService> liter = getCollectableServices().listIterator(); while (liter.hasNext()) { cSvc = liter.next(); // Only interested in entries with matching nodeId if (!(cSvc.getNodeId() == nodeId)) continue; synchronized (cSvc) { // Retrieve the CollectorUpdates object associated // with this CollectableService. CollectorUpdates updates = cSvc.getCollectorUpdates(); // Now set the update's deletion flag so the next // time it is selected for execution by the scheduler // the collection will be skipped and the service will not // be rescheduled. LOG.debug( "Marking CollectableService for deletion because a node was deleted: Service nodeid={}, deleted node:{}", cSvc.getNodeId(), nodeId); updates.markForDeletion(); } // Now safe to remove the collectable service from // the collectable services list liter.remove(); } } }