List of usage examples for java.util LinkedList isEmpty
boolean isEmpty();
From source file:org.trnltk.util.DiffUtil.java
public static String[] diffLines(final String line1, final String line2, final boolean ignoreWhiteSpace) { final diff_match_patch dmp = new diff_match_patch(); final LinkedList<diff_match_patch.Diff> diffs = dmp.diff_main(line1, line2); if (CollectionUtils.isEmpty(diffs)) { return null; } else {// w w w . j a va 2 s. c om if (ignoreWhiteSpace) { final LinkedList<diff_match_patch.Diff> filteredDiffs = Lists .newLinkedList(Iterables.filter(diffs, new Predicate<diff_match_patch.Diff>() { @Override public boolean apply(diff_match_patch.Diff input) { if (input.operation.equals(diff_match_patch.Operation.EQUAL)) return false; else if (StringUtils.isBlank(input.text)) return false; return true; } })); if (filteredDiffs.isEmpty()) return null; } dmp.diff_cleanupSemantic(diffs); final String[] diffLines = diffToFormattedLines(diffs, ignoreWhiteSpace); if (ignoreWhiteSpace) { if (StringUtils.isBlank(diffLines[1]) && StringUtils.isBlank(diffLines[2])) return null; } return diffLines; } }
From source file:org.dcm4chee.storage.conf.DeepEquals.java
/** * Compare two objects with a 'deep' comparison. This will traverse the * Object graph and perform either a field-by-field comparison on each * object (if no .equals() method has been overridden from Object), or it * will call the customized .equals() method if it exists. This method will * allow object graphs loaded at different times (with different object ids) * to be reliably compared. Object.equals() / Object.hashCode() rely on the * object's identity, which would not consider to equivalent objects necessarily * equals. This allows graphs containing instances of Classes that did no * overide .equals() / .hashCode() to be compared. For example, testing for * existence in a cache. Relying on an objects identity will not locate an * object in cache, yet relying on it being equivalent will.<br/><br/> * * This method will handle cycles correctly, for example A->B->C->A. Suppose a and * a' are two separate instances of the A with the same values for all fields on * A, B, and C. Then a.deepEquals(a') will return true. It uses cycle detection * storing visited objects in a Set to prevent endless loops. * @param a Object one to compare/*from w w w .j a v a 2 s . c o m*/ * @param b Object two to compare * @return true if a is equivalent to b, false otherwise. Equivalent means that * all field values of both subgraphs are the same, either at the field level * or via the respectively encountered overridden .equals() methods during * traversal. */ public static boolean deepEquals(Object a, Object b) { Set visited = new HashSet<DualKey>(); LinkedList<DualKey> stack = new LinkedList<DualKey>(); stack.addFirst(new DualKey(a, b)); while (!stack.isEmpty()) { DualKey dualKey = stack.removeFirst(); lastDualKey = dualKey; visited.add(dualKey); if (dualKey._key1 == dualKey._key2) { // Same instance is always equal to itself. continue; } if (dualKey._key1 == null || dualKey._key2 == null) { // check if one is null and another is an empty array if (dualKey._key1 == null) { if (dualKey._key2.getClass().isArray() && ((Object[]) dualKey._key2).length == 0) continue; } if (dualKey._key2 == null) { if (dualKey._key1.getClass().isArray() && ((Object[]) dualKey._key1).length == 0) continue; } // If either one is null, not equal (both can't be null, due to above comparison). return false; } if (dualKey._key1 instanceof Map && dualKey._key2 instanceof Map) { // if they are maps - forget about comparing exact classes } else if (!dualKey._key1.getClass().equals(dualKey._key2.getClass())) { // Must be same class return false; } // Handle all [] types. In order to be equal, the arrays must be the same // length, be of the same type, be in the same order, and all elements within // the array must be deeply equivalent. if (dualKey._key1.getClass().isArray()) { if (!compareArrays(dualKey._key1, dualKey._key2, stack, visited)) { return false; } continue; } // Special handle SortedSets because they are fast to compare because their // elements must be in the same order to be equivalent Sets. if (dualKey._key1 instanceof SortedSet) { if (!compareOrderedCollection((Collection) dualKey._key1, (Collection) dualKey._key2, stack, visited)) { return false; } continue; } // Handled unordered Sets. This is a slightly more expensive comparison because order cannot // be assumed, a temporary Map must be created, however the comparison still runs in O(N) time. if (dualKey._key1 instanceof Set) { if (!compareUnorderedCollection((Collection) dualKey._key1, (Collection) dualKey._key2, stack, visited)) { return false; } continue; } // Check any Collection that is not a Set. In these cases, element order // matters, therefore this comparison is faster than using unordered comparison. if (dualKey._key1 instanceof Collection) { if (!compareOrderedCollection((Collection) dualKey._key1, (Collection) dualKey._key2, stack, visited)) { return false; } continue; } // Compare two SortedMaps. This takes advantage of the fact that these // Maps can be compared in O(N) time due to their ordering. if (dualKey._key1 instanceof SortedMap) { if (!compareSortedMap((Map) dualKey._key1, (Map) dualKey._key2, stack, visited)) { return false; } continue; } // Compare two Unordered Maps. This is a slightly more expensive comparison because // order cannot be assumed, therefore a temporary Map must be created, however the // comparison still runs in O(N) time. if (dualKey._key1 instanceof Map) { if (!compareUnorderedMap((Map) dualKey._key1, (Map) dualKey._key2, stack, visited)) { return false; } continue; } if (hasCustomEquals(dualKey._key1.getClass())) { if (!dualKey._key1.equals(dualKey._key2)) { return false; } continue; } lastObject = dualKey._key1; // check if we have a custom deepequals method for this class CustomDeepEquals de = customDeepEquals.get(dualKey._key1.getClass()); if (de != null) { if (!de.deepEquals(dualKey._key1, dualKey._key2)) return false; } else { Collection<Field> fields = getDeepDeclaredFields(dualKey._key1.getClass()); for (Field field : fields) { try { DualKey dk = new DualKey(field.get(dualKey._key1), field.get(dualKey._key2), field.getName()); if (!visited.contains(dk)) { stack.addFirst(dk); } } catch (Exception ignored) { } } } } return true; }
From source file:org.dcm4chee.archive.conf.defaults.DeepEquals.java
/** * Compare two objects with a 'deep' comparison. This will traverse the * Object graph and perform either a field-by-field comparison on each * object (if no .equals() method has been overridden from Object), or it * will call the customized .equals() method if it exists. This method will * allow object graphs loaded at different times (with different object ids) * to be reliably compared. Object.equals() / Object.hashCode() rely on the * object's identity, which would not consider to equivalent objects necessarily * equals. This allows graphs containing instances of Classes that did no * overide .equals() / .hashCode() to be compared. For example, testing for * existence in a cache. Relying on an objects identity will not locate an * object in cache, yet relying on it being equivalent will.<br/><br/> * * This method will handle cycles correctly, for example A->B->C->A. Suppose a and * a' are two separate instances of the A with the same values for all fields on * A, B, and C. Then a.deepEquals(a') will return true. It uses cycle detection * storing visited objects in a Set to prevent endless loops. * @param a Object one to compare//ww w . ja v a 2s. c o m * @param b Object two to compare * @return true if a is equivalent to b, false otherwise. Equivalent means that * all field values of both subgraphs are the same, either at the field level * or via the respectively encountered overridden .equals() methods during * traversal. */ public static boolean deepEquals(Object a, Object b) { Set visited = new HashSet<DualKey>(); LinkedList<DualKey> stack = new LinkedList<DualKey>(); stack.addFirst(new DualKey(a, b, null)); while (!stack.isEmpty()) { DualKey dualKey = stack.removeFirst(); lastDualKey = dualKey; visited.add(dualKey); if (dualKey._key1 == dualKey._key2) { // Same instance is always equal to itself. continue; } if (dualKey._key1 == null || dualKey._key2 == null) { // check if one is null and another is an empty array if (dualKey._key1 == null) { if (dualKey._key2.getClass().isArray() && ((Object[]) dualKey._key2).length == 0) continue; } if (dualKey._key2 == null) { if (dualKey._key1.getClass().isArray() && ((Object[]) dualKey._key1).length == 0) continue; } // If either one is null, not equal (both can't be null, due to above comparison). return false; } if (!dualKey._key1.getClass().equals(dualKey._key2.getClass())) { // Must be same class return false; } // Handle all [] types. In order to be equal, the arrays must be the same // length, be of the same type, be in the same order, and all elements within // the array must be deeply equivalent. if (dualKey._key1.getClass().isArray()) { if (!compareArrays(dualKey, stack, visited)) { return false; } continue; } // Special handle SortedSets because they are fast to compare because their // elements must be in the same order to be equivalent Sets. if (dualKey._key1 instanceof SortedSet) { if (!compareOrderedCollection(dualKey, stack, visited)) { return false; } continue; } // Handled unordered Sets. This is a slightly more expensive comparison because order cannot // be assumed, a temporary Map must be created, however the comparison still runs in O(N) time. if (dualKey._key1 instanceof Set) { if (!compareUnorderedCollection(dualKey, stack, visited)) { return false; } continue; } // Check any Collection that is not a Set. In these cases, element order // matters, therefore this comparison is faster than using unordered comparison. if (dualKey._key1 instanceof Collection) { if (!compareOrderedCollection(dualKey, stack, visited)) { return false; } continue; } // Compare two SortedMaps. This takes advantage of the fact that these // Maps can be compared in O(N) time due to their ordering. if (dualKey._key1 instanceof SortedMap) { if (!compareSortedMap(dualKey, stack, visited)) { return false; } continue; } // Compare two Unordered Maps. This is a slightly more expensive comparison because // order cannot be assumed, therefore a temporary Map must be created, however the // comparison still runs in O(N) time. if (dualKey._key1 instanceof Map) { if (!compareUnorderedMap(dualKey, stack, visited)) { return false; } continue; } if (hasCustomEquals(dualKey._key1.getClass())) { if (!dualKey._key1.equals(dualKey._key2)) { return false; } continue; } lastClass = dualKey._key1.getClass().toString(); // check if we have a custom deepequals method for this class CustomDeepEquals de = customDeepEquals.get(dualKey._key1.getClass()); if (de != null) { if (!de.deepEquals(dualKey._key1, dualKey._key2)) return false; } else { Collection<Field> fields = getDeepDeclaredFields(dualKey._key1.getClass()); for (Field field : fields) { try { DualKey dk = new DualKey(field.get(dualKey._key1), field.get(dualKey._key2), field.getName(), dualKey); if (!visited.contains(dk)) { stack.addFirst(dk); } } catch (Exception ignored) { } } } } return true; }
From source file:com.github.jknack.handlebars.internal.TemplateBuilder.java
/** * Creates a {@link Template} that detects recursively calls. * * @param source The template source.//from www . jav a 2s . c om * @param template The original template. * @return A new {@link Template} that detects recursively calls. */ private static Template infiniteLoop(final TemplateSource source, final BaseTemplate template) { return new ForwardingTemplate(template) { @Override protected void beforeApply(final Context context) { LinkedList<TemplateSource> invocationStack = context.data(Context.INVOCATION_STACK); invocationStack.addLast(source); } @Override protected void afterApply(final Context context) { LinkedList<TemplateSource> invocationStack = context.data(Context.INVOCATION_STACK); if (!invocationStack.isEmpty()) { invocationStack.removeLast(); } } }; }
From source file:org.apache.hadoop.hdfs.server.namenode.JournalSet.java
public static void chainAndMakeRedundantStreams(Collection<EditLogInputStream> outStreams, PriorityQueue<EditLogInputStream> allStreams, long fromTxId) { // We want to group together all the streams that start on the same start // transaction ID. To do this, we maintain an accumulator (acc) of all // the streams we've seen at a given start transaction ID. When we see a // higher start transaction ID, we select a stream from the accumulator and // clear it. Then we begin accumulating streams with the new, higher start // transaction ID. LinkedList<EditLogInputStream> acc = new LinkedList<EditLogInputStream>(); EditLogInputStream elis;//from w w w . j ava2 s . c o m while ((elis = allStreams.poll()) != null) { if (acc.isEmpty()) { acc.add(elis); } else { EditLogInputStream accFirst = acc.get(0); long accFirstTxId = accFirst.getFirstTxId(); if (accFirstTxId == elis.getFirstTxId()) { // if we have a finalized log segment available at this txid, // we should throw out all in-progress segments at this txid if (elis.isInProgress()) { if (accFirst.isInProgress()) { acc.add(elis); } } else { if (accFirst.isInProgress()) { acc.clear(); } acc.add(elis); } } else if (accFirstTxId < elis.getFirstTxId()) { // try to read from the local logs first since the throughput should // be higher Collections.sort(acc, LOCAL_LOG_PREFERENCE_COMPARATOR); outStreams.add(new RedundantEditLogInputStream(acc, fromTxId)); acc.clear(); acc.add(elis); } else if (accFirstTxId > elis.getFirstTxId()) { throw new RuntimeException("sorted set invariants violated! " + "Got stream with first txid " + elis.getFirstTxId() + ", but the last firstTxId was " + accFirstTxId); } } } if (!acc.isEmpty()) { Collections.sort(acc, LOCAL_LOG_PREFERENCE_COMPARATOR); outStreams.add(new RedundantEditLogInputStream(acc, fromTxId)); acc.clear(); } }
From source file:de._13ducks.cor.game.server.movement.SubSectorPathfinder.java
/** * Sucht einen Weg auf Freiflchen (FreePolygon) um ein Hindernis herum. * Beachtet weitere Hindernisse auf der "Umleitung". * Sucht die Route nur bis zum nchsten Ziel. * Der Mover darf sich nicht bereits auf einer Umleitung befinden, * diese muss ggf vorher gelscht worden sein. * @param mover/* w w w. j a v a2 s .co m*/ * @param obstacle * @return */ static List<SubSectorEdge> searchDiversion(Moveable mover, Moveable obstacle, SimplePosition target) { // Vorberprfung: Ist das Ziel berhaupt noch frei? List<Moveable> moversAroundTarget = Server.getInnerServer().moveMan.moveMap .moversAroundPoint(target.toFPP(), mover.getRadius() + 5); moversAroundTarget.remove(mover); // Falls drin for (Moveable m : moversAroundTarget) { if (m.getPrecisePosition().getDistance(target.toFPP()) < m.getRadius() + mover.getRadius() + ServerBehaviourMove.MIN_DISTANCE) { System.out.println("No div, target blocked!"); return null; } } /** * Wegsuche in 2 Schritten: * 1. Aufbauen eines geeigneten Graphen, der das gesamte Problem enthlt. * 2. Suchen einer Route in diesem Graphen mittels A* (A-Star). */ // Aufbauen des Graphen: ArrayList<SubSectorObstacle> graph = new ArrayList<SubSectorObstacle>(); // Der Graph selber LinkedList<Moveable> openObstacles = new LinkedList<Moveable>(); // Die Liste mit noch zu untersuchenden Knoten ArrayList<Moveable> closedObstacles = new ArrayList<Moveable>(); // Bearbeitete Knoten openObstacles.add(obstacle); // Startpunkt des Graphen. closedObstacles.add(mover); // Wird im Graphen nicht mitbercksichtigt. double radius = mover.getRadius() + ServerBehaviourMove.MIN_DISTANCE; while (!openObstacles.isEmpty()) { // Neues Element aus der Liste holen und als bearbeitet markieren. Moveable work = openObstacles.poll(); closedObstacles.add(work); SubSectorObstacle next = new SubSectorObstacle(work.getPrecisePosition().x(), work.getPrecisePosition().y(), work.getRadius()); // Zuerst alle Punkte des Graphen lschen, die jetzt nichtmehr erreichbar sind: for (SubSectorObstacle obst : graph) { obst.removeNearNodes(next, radius); } // Mit Graph vernetzen for (SubSectorObstacle node : graph) { if (node.inColRange(next, radius)) { // Schnittpunkte suchen SubSectorNode[] intersections = node.calcIntersections(next, radius); for (SubSectorNode n2 : intersections) { boolean reachable = true; for (SubSectorObstacle o : graph) { if (o.equals(node)) { continue; // Um den gehts jetzt ja gerade, natrlich liegen wir auf diesem Kreis } if (o.moveCircleContains(n2, radius)) { reachable = false; break; } } if (reachable) { // Schnittpunkt einbauen next.addNode(n2); node.addNode(n2); } } } } // Bearbeitetes selbst in Graph einfgen graph.add(next); // Weitere Hindernisse suchen, die jetzt relevant sind. List<Moveable> moversAround = Server.getInnerServer().moveMan.moveMap.moversAround(work, (work.getRadius() + radius) * 2); for (Moveable pmove : moversAround) { if (!closedObstacles.contains(pmove) && !openObstacles.contains(pmove)) { openObstacles.add(pmove); } } } // Jetzt drber laufen und Graph aufbauen: for (SubSectorObstacle obst : graph) { // Vorgensweise: // In jedem Hinderniss die Linie entlanglaufen und Knoten mit Kanten verbinden. // Ein Knoten darf auf einem Kreis immer nur in eine Richtung gehen. // (das sollte mithilfe seiner beiden, bekannten hindernisse recht einfach sein) // Die Lnge des Kreissegments lsst sich einfach mithilfe des winkels ausrechnen (Math.atan2(y,x) // Dann darf der A*. Bzw. Dijkstra, A* ist hier schon fast Overkill. // Alle Knoten ihrem Bogenma nach sortieren. obst.sortNodes(); obst.interConnectNodes(radius); } // Start- und Zielknoten einbauen und mit dem Graph vernetzten. SubSectorNode startNode = new SubSectorNode(mover.getPrecisePosition().x(), mover.getPrecisePosition().y()); SubSectorNode targetNode = new SubSectorNode(target.x(), target.y()); double min = Double.POSITIVE_INFINITY; SubSectorObstacle minObstacle = null; for (SubSectorObstacle obst : graph) { double newdist = Math.sqrt((obst.getX() - startNode.getX()) * (obst.getX() - startNode.getX()) + (obst.getY() - startNode.getY()) * (obst.getY() - startNode.getY())); newdist -= obst.getRadius() + radius; // Es interessiert uns der nchstmgliche Kreis, nicht das nchste Hinderniss if (newdist < min) { min = newdist; minObstacle = obst; } } // Punkt auf Laufkreis finden Vector direct = new Vector(startNode.getX() - minObstacle.getX(), startNode.getY() - minObstacle.getY()); direct = direct.normalize().multiply(minObstacle.getRadius() + radius); SubSectorNode minNode = new SubSectorNode(minObstacle.getX() + direct.getX(), minObstacle.getY() + direct.getY(), minObstacle); // In das Hinderniss integrieren: minObstacle.lateIntegrateNode(minNode); SubSectorEdge startEdge = new SubSectorEdge(startNode, minNode, min); if (!startNode.equals(minNode)) { startNode.addEdge(startEdge); minNode.addEdge(startEdge); } else { // Wir stehen schon auf dem minNode. // Die Einsprungkante ist nicht notwendig. startNode = minNode; } double min2 = Double.POSITIVE_INFINITY; SubSectorObstacle minObstacle2 = null; for (SubSectorObstacle obst : graph) { double newdist = Math.sqrt((obst.getX() - targetNode.getX()) * (obst.getX() - targetNode.getX()) + (obst.getY() - targetNode.getY()) * (obst.getY() - targetNode.getY())); newdist -= obst.getRadius() + radius; // Es interessiert uns der nchstmgliche Kreis, nicht das nchste Hinderniss if (newdist < min2) { min2 = newdist; minObstacle2 = obst; } } // Punkt auf Laufkreis finden Vector direct2 = new Vector(targetNode.getX() - minObstacle2.getX(), targetNode.getY() - minObstacle2.getY()); direct2 = direct2.normalize().multiply(minObstacle2.getRadius() + radius); SubSectorNode minNode2 = new SubSectorNode(minObstacle2.getX() + direct2.getX(), minObstacle2.getY() + direct2.getY(), minObstacle2); // In das Hinderniss integrieren: minObstacle2.lateIntegrateNode(minNode2); SubSectorEdge targetEdge = new SubSectorEdge(minNode2, targetNode, min2); if (!targetNode.equals(minNode2)) { targetNode.addEdge(targetEdge); minNode2.addEdge(targetEdge); } else { // Das Ziel ist schon auf dem Laufkreis. // Die Aussprungkante ist nicht ntig. targetNode = minNode2; } /** * Hier jetzt einen Weg suchen von startNode nach targetNode. * Die Kanten sind in node.myEdges * Die Ziele bekommt man mit edge.getOther(startNode) * Die Lnge (Wegkosten) stehen in edge.length (vorsicht: double-Wert!) */ PriorityBuffer open = new PriorityBuffer(); // Liste fr entdeckte Knoten LinkedHashSet<SubSectorNode> containopen = new LinkedHashSet<SubSectorNode>(); // Auch fr entdeckte Knoten, hiermit kann viel schneller festgestellt werden, ob ein bestimmter Knoten schon enthalten ist. LinkedHashSet<SubSectorNode> closed = new LinkedHashSet<SubSectorNode>(); // Liste fr fertig bearbeitete Knoten double cost_t = 0; //Movement Kosten (gerade 5, diagonal 7, wird spter festgelegt) open.add(startNode); while (open.size() > 0) { SubSectorNode current = (SubSectorNode) open.remove(); containopen.remove(current); if (current.equals(targetNode)) { //Abbruch, weil Weg von Start nach Ziel gefunden wurde //targetNode.setParent(current.getParent()); //"Vorgngerfeld" von Ziel bekannt break; } // Aus der open wurde current bereits gelscht, jetzt in die closed verschieben closed.add(current); ArrayList<SubSectorEdge> neighbors = current.getMyEdges(); for (SubSectorEdge edge : neighbors) { SubSectorNode node = edge.getOther(current); if (closed.contains(node)) { continue; } // Kosten dort hin berechnen cost_t = edge.getLength(); if (containopen.contains(node)) { //Wenn sich der Knoten in der openlist befindet, muss berechnet werden, ob es einen krzeren Weg gibt if (current.getCost() + cost_t < node.getCost()) { //krzerer Weg gefunden? node.setCost(current.getCost() + cost_t); //-> Wegkosten neu berechnen //node.setValF(node.cost + node.getHeuristic()); //F-Wert, besteht aus Wegkosten vom Start + Luftlinie zum Ziel node.setParent(current); //aktuelles Feld wird zum Vorgngerfeld } } else { node.setCost(current.getCost() + cost_t); //node.setHeuristic(Math.sqrt(Math.pow(Math.abs((targetNode.getX() - node.getX())), 2) + Math.pow(Math.abs((targetNode.getY() - node.getY())), 2))); // geschtzte Distanz zum Ziel //Die Zahl am Ende der Berechnung ist der Aufwand der Wegsuche //5 ist schnell, 4 normal, 3 dauert lange node.setParent(current); // Parent ist die RogPosition, von dem der aktuelle entdeckt wurde //node.setValF(node.cost + node.getHeuristic()); //F-Wert, besteht aus Wegkosten vom Start aus + Luftlinie zum Ziel open.add(node); // in openlist hinzufgen containopen.add(node); } } } if (targetNode.getParent() == null) { //kein Weg gefunden return null; } ArrayList<SubSectorNode> pathrev = new ArrayList<SubSectorNode>(); //Pfad aus parents erstellen, von Ziel nach Start while (!targetNode.equals(startNode)) { pathrev.add(targetNode); targetNode = targetNode.getParent(); } pathrev.add(startNode); ArrayList<SubSectorNode> path = new ArrayList<SubSectorNode>(); //Pfad umkehren, sodass er von Start nach Ziel ist for (int k = pathrev.size() - 1; k >= 0; k--) { path.add(pathrev.get(k)); } // Nachbearbeitung: // Wir brauchen eine Kanten-Liste mit arc/direct Informationen ArrayList<SubSectorEdge> finalPath = new ArrayList<SubSectorEdge>(); for (int i = 0; i < path.size() - 1; i++) { SubSectorNode from = path.get(i); SubSectorNode to = path.get(i + 1); SubSectorEdge edge = shortestCommonEdge(from, to); if (edge != null) { finalPath.add(edge); } else { throw new RuntimeException("ERROR Cannot find edge from " + from + " to " + to + " but it is part of the calculated path!!!"); } } return finalPath; //Pfad zurckgeben }
From source file:org.opencastproject.videoeditor.impl.VideoEditorServiceImpl.java
private static List<VideoClip> sortSegments(List<VideoClip> edits) { LinkedList<VideoClip> ll = new LinkedList<VideoClip>(); List<VideoClip> clips = new ArrayList<VideoClip>(); Iterator<VideoClip> it = edits.iterator(); VideoClip clip;//from w w w. j a va 2 s .c om VideoClip nextclip; while (it.hasNext()) { // Check for legal durations clip = it.next(); if (clip.getDuration() > 2) { // Keep segments at least 2 seconds long ll.add(clip); } } clip = ll.pop(); // initialize while (!ll.isEmpty()) { // Check that 2 consecutive segments from same src are at least 2 secs apart if (ll.peek() != null) { nextclip = ll.pop(); // check next consecutive segment if ((nextclip.getSrc() == clip.getSrc()) && (nextclip.getStart() - clip.getEnd()) < 2) { // collapse two segments into one clip.setEnd(nextclip.getEnd()); // by using inpt of seg 1 and outpoint of seg 2 } else { clips.add(clip); // keep last segment clip = nextclip; // check next segment } } } clips.add(clip); // add last segment return clips; }
From source file:de.vanita5.twittnuker.util.net.ssl.AbstractCheckSignatureVerifier.java
/** * Extracts the array of SubjectAlt DNS or IP names from an X509Certificate. * Returns null if there aren't any.// w w w. jav a 2s .co m * * @param cert X509Certificate * @param hostname * @return Array of SubjectALT DNS or IP names stored in the certificate. */ private static String[] getSubjectAlts(final X509Certificate cert, final String hostname) { final int subjectType; if (isIPAddress(hostname)) { subjectType = 7; } else { subjectType = 2; } final LinkedList<String> subjectAltList = new LinkedList<String>(); Collection<List<?>> c = null; try { c = cert.getSubjectAlternativeNames(); } catch (final CertificateParsingException cpe) { } if (c != null) { for (final List<?> aC : c) { final List<?> list = aC; final int type = ((Integer) list.get(0)).intValue(); if (type == subjectType) { final String s = (String) list.get(1); subjectAltList.add(s); } } } if (!subjectAltList.isEmpty()) { final String[] subjectAlts = new String[subjectAltList.size()]; subjectAltList.toArray(subjectAlts); return subjectAlts; } else return null; }
From source file:org.apache.olingo.server.core.requests.DataRequest.java
static String buildNavPath(UriHelper helper, EdmEntityType rootType, LinkedList<UriResourceNavigation> navigations, boolean includeLastPredicates) throws SerializerException { if (navigations.isEmpty()) { return null; }/*from w ww. ja v a 2 s. c o m*/ StringBuilder sb = new StringBuilder(); boolean containsTarget = false; EdmEntityType type = rootType; for (UriResourceNavigation nav : navigations) { String name = nav.getProperty().getName(); EdmNavigationProperty property = type.getNavigationProperty(name); if (property.containsTarget()) { containsTarget = true; } type = nav.getProperty().getType(); } if (containsTarget) { for (int i = 0; i < navigations.size(); i++) { UriResourceNavigation nav = navigations.get(i); if (i > 0) { sb.append("/"); } sb.append(nav.getProperty().getName()); boolean skipKeys = false; if (navigations.size() == i + 1 && !includeLastPredicates) { skipKeys = true; } if (!skipKeys && !nav.getKeyPredicates().isEmpty()) { sb.append("("); sb.append(helper.buildContextURLKeyPredicate(nav.getKeyPredicates())); sb.append(")"); } if (nav.getTypeFilterOnCollection() != null) { sb.append("/").append( nav.getTypeFilterOnCollection().getFullQualifiedName().getFullQualifiedNameAsString()); } else if (nav.getTypeFilterOnEntry() != null) { sb.append("/").append( nav.getTypeFilterOnEntry().getFullQualifiedName().getFullQualifiedNameAsString()); } } } return sb.toString(); }
From source file:com.epam.reportportal.apache.http.conn.ssl.AbstractVerifier.java
/** * Extracts the array of SubjectAlt DNS or IP names from an X509Certificate. * Returns null if there aren't any.//from ww w . jav a 2s . c o m * * @param cert X509Certificate * @param hostname * @return Array of SubjectALT DNS or IP names stored in the certificate. */ private static String[] getSubjectAlts(final X509Certificate cert, final String hostname) { final int subjectType; if (isIPAddress(hostname)) { subjectType = 7; } else { subjectType = 2; } final LinkedList<String> subjectAltList = new LinkedList<String>(); Collection<List<?>> c = null; try { c = cert.getSubjectAlternativeNames(); } catch (final CertificateParsingException cpe) { } if (c != null) { for (final List<?> aC : c) { final List<?> list = aC; final int type = ((Integer) list.get(0)).intValue(); if (type == subjectType) { final String s = (String) list.get(1); subjectAltList.add(s); } } } if (!subjectAltList.isEmpty()) { final String[] subjectAlts = new String[subjectAltList.size()]; subjectAltList.toArray(subjectAlts); return subjectAlts; } else { return null; } }