List of usage examples for java.util Collection addAll
boolean addAll(Collection<? extends E> c);
From source file:de.science.hack.Main.java
private Collection<File> getInputFiles(CommandLine commandLine) { Collection<File> files = new ArrayList<>(); if (commandLine.hasOption(CliArg.F.getShortKey())) { String fileName = commandLine.getOptionValue(CliArg.F.getShortKey()); files.add(new File(fileName)); } else if (commandLine.hasOption(CliArg.D.getShortKey())) { String dirName = commandLine.getOptionValue(CliArg.D.getShortKey()); File directory = new File(dirName); files.addAll(listFiles(directory, EXT, false)); }//from ww w . ja v a2s . c om return files; }
From source file:edu.brown.profilers.ProfileMeasurement.java
public Collection<Long> getHistory(Collection<Long> to_fill) { to_fill.addAll(this.history); return (to_fill); }
From source file:graph.inference.module.DisjointWithWorker.java
private void siblingDisjointViaModule(DAGNode atomic, QueryObject queryObj) { if (queryObj.isProof()) { DAGNode otherNode = (DAGNode) queryObj.getNode(2); // Find the unique parents for each set Collection<Node> atomicParents = CommonQuery.ALLGENLS.runQuery(dag_, atomic); Collection<Node> otherParents = CommonQuery.ALLGENLS.runQuery(dag_, otherNode); Collection<Node> commonParents = CollectionUtils.retainAll(atomicParents, otherParents); atomicParents.removeAll(commonParents); otherParents.removeAll(commonParents); // Find the sibling disjoint collections for atomics Collection<DAGNode> atomicSibCols = new HashSet<>(); for (Node atomicParent : atomicParents) if (atomicParent instanceof DAGNode) atomicSibCols.addAll(sibModule_.getSiblingDisjointParents((DAGNode) atomicParent)); // Search for match in others for (Node otherParent : otherParents) { if (otherParent instanceof DAGNode) if (CollectionUtils.containsAny(atomicSibCols, sibModule_.getSiblingDisjointParents((DAGNode) otherParent))) { // A match! if (!isException(atomic, otherNode)) { queryObj.addResult(new Substitution()); // processSiblingJustification(, // sub.getSubstitution(transTwo), // sub.getSubstitution(queryVar), queryObj); }// w w w.j a v a 2s. co m return; } } } else { } }
From source file:graph.module.OntologyEdgeModule.java
/** * Gets the function edges (or edges containing the node NOT in functions). * //from w ww .ja v a 2 s .c o m * @param n * The node to get edges for. * @param functionOnly * If only getting edges in which the node is in the function, or * getting non-function edges. * @return The set of all edges (function or not). */ private Collection<Edge> getFunctionEdges(Node n, boolean functionOnly) { Collection<Edge> funcEdges = new HashSet<>(); MultiMap<Object, Edge> nodeEdges = relatedEdges_.get(n); if (nodeEdges == null) return funcEdges; for (Object key : nodeEdges.keySet()) { if (functionOnly && key.toString().contains(FUNC_SPLIT)) funcEdges.addAll(nodeEdges.get(key)); else if (!functionOnly && !key.toString().contains(FUNC_SPLIT)) funcEdges.addAll(nodeEdges.get(key)); } return funcEdges; }
From source file:ch.flashcard.HibernateDetachUtility.java
/** * @param value the object needing to be detached/scrubbed. * @param checkedObjectMap This maps identityHashCodes to Objects we've already detached. In that way we can * quickly determine if we've already done the work for the incoming value and avoid taversing it again. This * works well almost all of the time, but it is possible that two different objects can have the same identity hash * (conflicts are always possible with a hash). In that case we utilize the checkedObjectCollisionMap (see below). * @param checkedObjectCollisionMap checkedObjectMap maps the identityhash to the *first* object with that hash. In * most cases there will only be mapping for one hash, but it is possible to encounter the same hash for multiple * objects, especially on 32bit or IBM JVMs. It is important to know if an object has already been detached * because if it is somehow self-referencing, we have to stop the recursion. This map holds the 2nd..Nth mapping * for a single hash and is used to ensure we never try to detach an object already processed. * @param depth used to stop infinite recursion, defaults to a depth we don't expectto see, but it is configurable. * @param serializationType//from w w w . j av a 2s .co m * @throws Exception if a problem occurs * @throws IllegalStateException if the recursion depth limit is reached */ private static void nullOutUninitializedFields(Object value, Map<Integer, Object> checkedObjectMap, Map<Integer, List<Object>> checkedObjectCollisionMap, int depth, SerializationType serializationType) throws Exception { if (depth > depthAllowed) { String warningMessage = "Recursed too deep [" + depth + " > " + depthAllowed + "], will not attempt to detach object of type [" + ((value != null) ? value.getClass().getName() : "N/A") + "]. This may cause serialization errors later. " + "You can try to work around this by setting the system property [" + DEPTH_ALLOWED_SYSPROP + "] to a value higher than [" + depth + "] or you can set the system property [" + THROW_EXCEPTION_ON_DEPTH_LIMIT_SYSPROP + "] to 'false'"; LOG.warn(warningMessage); if (throwExceptionOnDepthLimit) { throw new IllegalStateException(warningMessage); } return; } if (null == value) { return; } // System.identityHashCode is a hash code, and therefore not guaranteed to be unique. And we've seen this // be the case. So, we use it to try and avoid duplicating work, but handle the case when two objects may // have an identity crisis. Integer valueIdentity = hashCodeGenerator.getHashCode(value); Object checkedObject = checkedObjectMap.get(valueIdentity); if (null == checkedObject) { // if we have not yet encountered an object with this hash, store it in our map and start scrubbing checkedObjectMap.put(valueIdentity, value); } else if (value == checkedObject) { // if we have scrubbed this already, no more work to be done return; } else { // we have a situation where multiple objects have the same identity hashcode, work with our // collision map to decide whether it needs to be scrubbed and add if necessary. // Note that this code block is infrequently hit, it is by design that we've pushed the extra // work, map, etc, involved for this infrequent case into its own block. The standard cases must // be as fast and lean as possible. boolean alreadyDetached = false; List<Object> collisionObjects = checkedObjectCollisionMap.get(valueIdentity); if (null == collisionObjects) { // if this is the 2nd occurrence for this hash, create a new map entry collisionObjects = new ArrayList<Object>(1); checkedObjectCollisionMap.put(valueIdentity, collisionObjects); } else { // if we have scrubbed this already, no more work to be done for (Object collisionObject : collisionObjects) { if (value == collisionObject) { alreadyDetached = true; break; } } } if (LOG.isDebugEnabled()) { StringBuilder message = new StringBuilder("\n\tIDENTITY HASHCODE COLLISION [hash="); message.append(valueIdentity); message.append(", alreadyDetached="); message.append(alreadyDetached); message.append("]"); message.append("\n\tCurrent : "); message.append(value.getClass().getName()); message.append("\n\t "); message.append(value); message.append("\n\tPrevious : "); message.append(checkedObject.getClass().getName()); message.append("\n\t "); message.append(checkedObject); for (Object collisionObject : collisionObjects) { message.append("\n\tPrevious : "); message.append(collisionObject.getClass().getName()); message.append("\n\t "); message.append(collisionObject); } LOG.debug(message); } // now that we've done our logging, if already detached we're done. Otherwise add to the list of collision // objects for this hash, and start scrubbing if (alreadyDetached) { return; } collisionObjects.add(value); } // Perform the detaching if (value instanceof Object[]) { Object[] objArray = (Object[]) value; for (int i = 0; i < objArray.length; i++) { Object listEntry = objArray[i]; Object replaceEntry = replaceObject(listEntry); if (replaceEntry != null) { objArray[i] = replaceEntry; } nullOutUninitializedFields(objArray[i], checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } } else if (value instanceof List) { // Null out any entries in initialized collections ListIterator i = ((List) value).listIterator(); while (i.hasNext()) { Object val = i.next(); Object replace = replaceObject(val); if (replace != null) { val = replace; i.set(replace); } nullOutUninitializedFields(val, checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } } else if (value instanceof Collection) { Collection collection = (Collection) value; Collection itemsToBeReplaced = new ArrayList(); Collection replacementItems = new ArrayList(); for (Object item : collection) { Object replacementItem = replaceObject(item); if (replacementItem != null) { itemsToBeReplaced.add(item); replacementItems.add(replacementItem); item = replacementItem; } nullOutUninitializedFields(item, checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } collection.removeAll(itemsToBeReplaced); collection.addAll(replacementItems); // watch out! if this collection is a Set, HashMap$MapSet doesn't support addAll. See BZ 688000 } else if (value instanceof Map) { Map originalMap = (Map) value; HashMap<Object, Object> replaceMap = new HashMap<Object, Object>(); for (Iterator i = originalMap.keySet().iterator(); i.hasNext();) { // get original key and value - these might be hibernate proxies Object originalKey = i.next(); Object originalKeyValue = originalMap.get(originalKey); // replace with non-hibernate classes, if appropriate (will be null otherwise) Object replaceKey = replaceObject(originalKey); Object replaceValue = replaceObject(originalKeyValue); // if either original key or original value was a hibernate proxy object, we have to // remove it from the original map, and remember the replacement objects for later if (replaceKey != null || replaceValue != null) { Object newKey = (replaceKey != null) ? replaceKey : originalKey; Object newValue = (replaceValue != null) ? replaceValue : originalKeyValue; replaceMap.put(newKey, newValue); i.remove(); } } // all hibernate proxies have been removed, we need to replace them with their // non-proxy object representations that we got from replaceObject() calls originalMap.putAll(replaceMap); // now go through each item in the map and null out their internal fields for (Object key : originalMap.keySet()) { nullOutUninitializedFields(originalMap.get(key), checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); nullOutUninitializedFields(key, checkedObjectMap, checkedObjectCollisionMap, depth + 1, serializationType); } } else if (value instanceof Enum) { // don't need to detach enums, treat them as special objects return; } if (serializationType == SerializationType.JAXB) { XmlAccessorType at = (XmlAccessorType) value.getClass().getAnnotation(XmlAccessorType.class); if (at != null && at.value() == XmlAccessType.FIELD) { nullOutFieldsByFieldAccess(value, checkedObjectMap, checkedObjectCollisionMap, depth, serializationType); } else { nullOutFieldsByAccessors(value, checkedObjectMap, checkedObjectCollisionMap, depth, serializationType); } } else if (serializationType == SerializationType.SERIALIZATION) { nullOutFieldsByFieldAccess(value, checkedObjectMap, checkedObjectCollisionMap, depth, serializationType); } }
From source file:com.khs.sherpa.processor.RestfulRequestProcessor.java
public String getEndpoint(HttpServletRequest request) { Map<String, Object> map = applicationContext.getEndpointTypes(); Collection<Method> methods = new HashSet<Method>(); for (Entry<String, Object> entry : map.entrySet()) { Collection<Method> m = Reflections.getAllMethods(entry.getValue().getClass(), Predicates.and(ReflectionUtils.withAnnotation(Action.class), SherpaPredicates.withActionMappingPattern(UrlUtil.getPath(request)))); methods.addAll(m); }/*from w w w .j a v a 2 s. c o m*/ method = MethodUtil.validateHttpMethods(methods.toArray(new Method[] {}), request.getMethod()); if (method != null) { Class<?> type = method.getDeclaringClass(); if (type.isAnnotationPresent(Endpoint.class)) { if (StringUtils.isNotEmpty(type.getAnnotation(Endpoint.class).value())) { return type.getAnnotation(Endpoint.class).value(); } } return type.getSimpleName(); } throw new SherpaEndpointNotFoundException("no endpoint for url [" + UrlUtil.getPath(request) + "]"); }
From source file:com.imaginary.home.cloud.api.call.CommandCall.java
@Override public void get(@Nonnull String requestId, @Nullable String userId, @Nonnull String[] path, @Nonnull HttpServletRequest req, @Nonnull HttpServletResponse resp, @Nonnull Map<String, Object> headers, @Nonnull Map<String, Object> parameters) throws RestException, IOException { try {//from w w w . j a v a2s . c o m ArrayList<Map<String, Object>> list = new ArrayList<Map<String, Object>>(); Boolean hasCommands = null; if (userId == null) { String apiKey = (String) headers.get(RestApi.API_KEY); ControllerRelay relay = ControllerRelay.getRelay(apiKey); if (relay == null) { throw new RestException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, RestException.INTERNAL_ERROR, "Relay was lost"); } for (PendingCommand cmd : PendingCommand.getCommandsToSend(relay, true)) { list.add(toJSON(cmd)); } hasCommands = PendingCommand.hasCommands(relay); } else { User user = User.getUserByUserId(userId); if (user == null) { throw new RestException(HttpServletResponse.SC_FORBIDDEN, RestException.NO_SUCH_USER, "Invalid user access to location"); } String locationId = req.getParameter("locationId"); Collection<ControllerRelay> relays; if (locationId == null) { relays = new ArrayList<ControllerRelay>(); for (Location location : user.getLocations()) { relays.addAll(ControllerRelay.findRelaysInLocation(location)); } } else { boolean mine = false; for (String lid : user.getLocationIds()) { if (lid.equals(locationId)) { mine = true; break; } } Location location = Location.getLocation(locationId); if (location == null || (!mine && !userId.equals(location.getOwnerId()))) { throw new RestException(HttpServletResponse.SC_BAD_REQUEST, RestException.INVALID_PARAMETER, "No such location: " + locationId); } relays = ControllerRelay.findRelaysInLocation(location); } for (ControllerRelay relay : relays) { for (PendingCommand cmd : PendingCommand.getCommands(relay)) { list.add(toJSON(cmd)); } } } if (hasCommands != null) { resp.setHeader("x-imaginary-has-commands", String.valueOf(hasCommands)); } resp.setStatus(HttpServletResponse.SC_OK); resp.getWriter().println((new JSONArray(list)).toString()); resp.getWriter().flush(); } catch (PersistenceException e) { throw new RestException(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, RestException.INTERNAL_ERROR, e.getMessage()); } }
From source file:com.all.landownloader.LanNetworkingService.java
public int send(LanDownloaderMessage message) { int succesfulReceivers = 0; Collection<String> receivers = new ArrayList<String>(); synchronized (currentNodes) { receivers.addAll(currentNodes.keySet()); }/* ww w. ja va 2 s.co m*/ for (String receiver : receivers) { if (sendTo(message, getConnection(receiver))) { succesfulReceivers++; } } return succesfulReceivers; }
From source file:io.hops.transaction.context.INodeContext.java
@Override public void prepare(TransactionLocks lks) throws TransactionContextException, StorageException { // if the list is not empty then check for the lock types // lock type is checked after when list length is checked // because some times in the tx handler the acquire lock // function is empty and in that case tlm will throw // null pointer exceptions Collection<INode> removed = getRemoved(); Collection<INode> added = new ArrayList<>(getAdded()); added.addAll(renamedInodes); Collection<INode> modified = getModified(); if (lks.containsLock(Lock.Type.INode)) { BaseINodeLock hlk = (BaseINodeLock) lks.getLock(Lock.Type.INode); if (!removed.isEmpty()) { for (INode inode : removed) { TransactionLockTypes.INodeLockType lock = hlk.getLockedINodeLockType(inode); if (lock != null && lock != TransactionLockTypes.INodeLockType.WRITE && lock != TransactionLockTypes.INodeLockType.WRITE_ON_TARGET_AND_PARENT) { throw new LockUpgradeException( "Trying to remove inode id=" + inode.getId() + " acquired lock was " + lock); }//from ww w . j a v a 2s.c om } } if (!modified.isEmpty()) { for (INode inode : modified) { TransactionLockTypes.INodeLockType lock = hlk.getLockedINodeLockType(inode); if (lock != null && lock != TransactionLockTypes.INodeLockType.WRITE && lock != TransactionLockTypes.INodeLockType.WRITE_ON_TARGET_AND_PARENT) { throw new LockUpgradeException( "Trying to update inode id=" + inode.getId() + " acquired lock was " + lock); } } } } dataAccess.prepare(removed, added, modified); }