List of usage examples for java.util Queue add
boolean add(E e);
From source file:org.apache.jackrabbit.oak.run.SegmentUtils.java
private static void debugFileStore(FileStore store) { Map<SegmentId, List<SegmentId>> idmap = Maps.newHashMap(); int dataCount = 0; long dataSize = 0; int bulkCount = 0; long bulkSize = 0; ((Logger) getLogger(SegmentTracker.class)).setLevel(Level.OFF); RecordUsageAnalyser analyser = new RecordUsageAnalyser(); for (SegmentId id : store.getSegmentIds()) { if (id.isDataSegmentId()) { Segment segment = id.getSegment(); dataCount++;/*w ww . ja v a2 s . com*/ dataSize += segment.size(); idmap.put(id, segment.getReferencedIds()); analyseSegment(segment, analyser); } else if (id.isBulkSegmentId()) { bulkCount++; bulkSize += id.getSegment().size(); idmap.put(id, Collections.<SegmentId>emptyList()); } } System.out.println("Total size:"); System.out.format("%s in %6d data segments%n", byteCountToDisplaySize(dataSize), dataCount); System.out.format("%s in %6d bulk segments%n", byteCountToDisplaySize(bulkSize), bulkCount); System.out.println(analyser.toString()); Set<SegmentId> garbage = newHashSet(idmap.keySet()); Queue<SegmentId> queue = Queues.newArrayDeque(); queue.add(store.getHead().getRecordId().getSegmentId()); while (!queue.isEmpty()) { SegmentId id = queue.remove(); if (garbage.remove(id)) { queue.addAll(idmap.get(id)); } } dataCount = 0; dataSize = 0; bulkCount = 0; bulkSize = 0; for (SegmentId id : garbage) { if (id.isDataSegmentId()) { dataCount++; dataSize += id.getSegment().size(); } else if (id.isBulkSegmentId()) { bulkCount++; bulkSize += id.getSegment().size(); } } System.out.format("%nAvailable for garbage collection:%n"); System.out.format("%s in %6d data segments%n", byteCountToDisplaySize(dataSize), dataCount); System.out.format("%s in %6d bulk segments%n", byteCountToDisplaySize(bulkSize), bulkCount); System.out.format("%n%s", new PCMAnalyser(store).toString()); }
From source file:com.googlecode.fightinglayoutbugs.helpers.ImageHelper.java
/** * Find the outlines of all areas where <code>pixels[x][y]</code> is <code>true</code>. */// www. jav a 2 s.com public static boolean[][] findOutlines(boolean[][] pixels) { int w = pixels.length; int h = pixels[0].length; int w1 = w - 1; int h1 = h - 1; boolean[][] outlines = new boolean[w][h]; // Find starting point ... int x0 = 0; int y0 = 0; // Look for starting point on top border ... while (x0 < w && pixels[x0][y0]) { // ... and bottom border ... if (!pixels[x0][h1]) { y0 = h1; break; } ++x0; } if (x0 == w) { // Look for starting point on left border ... x0 = 1; // ... and right border ... while (y0 < h && pixels[x0][y0]) { if (!pixels[w1][y0]) { x0 = w1; break; } ++y0; } } if (y0 == h) { // No starting point found, therefore ... return outlines; } // Find outlines ... Queue<Point> todo = new LinkedList<Point>(); todo.add(new Point(x0, y0)); boolean[][] visited = new boolean[w][h]; while (!todo.isEmpty()) { Point p = todo.poll(); int x = p.x; int y = p.y; if (!visited[x][y]) { visited[x][y] = true; if (!pixels[x][y]) { // Compare with pixel above ... if (y > 0) { int y1 = y - 1; if (pixels[x][y1]) { outlines[x][y] = true; } else if (!visited[x][y1]) { todo.add(new Point(x, y1)); } } // Compare with pixel to the right ... if (x < w1) { int x1 = x + 1; if (pixels[x1][y]) { outlines[x][y] = true; } else if (!visited[x1][y]) { todo.add(new Point(x1, y)); } } // Compare with pixel below ... if (y < h1) { int y1 = y + 1; if (pixels[x][y1]) { outlines[x][y] = true; } else if (!visited[x][y1]) { todo.add(new Point(x, y1)); } } // Compare with pixel to the left ... if (x > 0) { int x1 = x - 1; if (pixels[x1][y]) { outlines[x][y] = true; } else if (!visited[x1][y]) { todo.add(new Point(x1, y)); } } } } } return outlines; }
From source file:org.apache.jackrabbit.oak.run.SegmentTarUtils.java
private static void debugFileStore(FileStore store) { Map<SegmentId, List<SegmentId>> idmap = Maps.newHashMap(); int dataCount = 0; long dataSize = 0; int bulkCount = 0; long bulkSize = 0; ((Logger) getLogger(SegmentTracker.class)).setLevel(Level.OFF); RecordUsageAnalyser analyser = new RecordUsageAnalyser(store.getReader()); for (SegmentId id : store.getSegmentIds()) { if (id.isDataSegmentId()) { Segment segment = id.getSegment(); dataCount++;/*from w ww. j a v a 2 s . c o m*/ dataSize += segment.size(); idmap.put(id, segment.getReferencedIds()); analyseSegment(segment, analyser); } else if (id.isBulkSegmentId()) { bulkCount++; bulkSize += id.getSegment().size(); idmap.put(id, Collections.<SegmentId>emptyList()); } } System.out.println("Total size:"); System.out.format("%s in %6d data segments%n", byteCountToDisplaySize(dataSize), dataCount); System.out.format("%s in %6d bulk segments%n", byteCountToDisplaySize(bulkSize), bulkCount); System.out.println(analyser.toString()); Set<SegmentId> garbage = newHashSet(idmap.keySet()); Queue<SegmentId> queue = Queues.newArrayDeque(); queue.add(store.getRevisions().getHead().getSegmentId()); while (!queue.isEmpty()) { SegmentId id = queue.remove(); if (garbage.remove(id)) { queue.addAll(idmap.get(id)); } } dataCount = 0; dataSize = 0; bulkCount = 0; bulkSize = 0; for (SegmentId id : garbage) { if (id.isDataSegmentId()) { dataCount++; dataSize += id.getSegment().size(); } else if (id.isBulkSegmentId()) { bulkCount++; bulkSize += id.getSegment().size(); } } System.out.format("%nAvailable for garbage collection:%n"); System.out.format("%s in %6d data segments%n", byteCountToDisplaySize(dataSize), dataCount); System.out.format("%s in %6d bulk segments%n", byteCountToDisplaySize(bulkSize), bulkCount); }
From source file:org.kuali.rice.krad.uif.lifecycle.ViewLifecycleUtils.java
/** * Get nested elements of the type specified one layer deep; this defers from * getElementsOfTypeShallow because it does NOT include itself as a match if it also matches the * type being requested.// w ww . j av a 2 s .c o m * * @param element instance to get children for * @param elementType type for element to return * @param <T> type of element that will be returned * @return list of child elements with the given type */ public static <T extends LifecycleElement> List<T> getNestedElementsOfTypeShallow(LifecycleElement element, Class<T> elementType) { if (element == null) { return Collections.emptyList(); } List<T> elements = Collections.emptyList(); @SuppressWarnings("unchecked") Queue<LifecycleElement> elementQueue = RecycleUtils.getInstance(LinkedList.class); try { elementQueue.add(element); while (!elementQueue.isEmpty()) { LifecycleElement currentElement = elementQueue.poll(); if (currentElement == null) { continue; } if (elementType.isInstance(currentElement) && currentElement != element) { if (elements.isEmpty()) { elements = new ArrayList<T>(); } elements.add(elementType.cast(currentElement)); } for (LifecycleElement nestedElement : getElementsForLifecycle(currentElement).values()) { if (!(nestedElement instanceof Component)) { elementQueue.offer(nestedElement); } } } } finally { elementQueue.clear(); RecycleUtils.recycle(elementQueue); } return elements; }
From source file:org.apache.tajo.rpc.RpcChannelFactory.java
/** * This function return eventloopgroup by key. Fetcher client will have one or more eventloopgroup for its throughput. * * @param clientId// w w w . j av a 2 s .c om * @param workerNum * @return */ public static EventLoopGroup getSharedClientEventloopGroup(ClientChannelId clientId, int workerNum) { Queue<EventLoopGroup> eventLoopGroupQueue; EventLoopGroup returnEventLoopGroup; synchronized (lockObjectForLoopGroup) { eventLoopGroupQueue = eventLoopGroupPool.get(clientId); if (eventLoopGroupQueue == null) { eventLoopGroupQueue = createClientEventloopGroups(clientId, workerNum); } returnEventLoopGroup = eventLoopGroupQueue.poll(); if (isEventLoopGroupShuttingDown(returnEventLoopGroup)) { returnEventLoopGroup = createClientEventloopGroup(clientId.name(), workerNum); } eventLoopGroupQueue.add(returnEventLoopGroup); } return returnEventLoopGroup; }
From source file:org.apache.gobblin.HttpTestUtils.java
public static Queue<BufferedRecord<GenericRecord>> createQueue(int size, boolean isHttpOperation) { Queue<BufferedRecord<GenericRecord>> queue = new ArrayDeque<>(size); for (int i = 0; i < size; i++) { Map<String, String> keys = new HashMap<>(); keys.put("part1", i + "1"); keys.put("part2", i + "2"); Map<String, String> queryParams = new HashMap<>(); queryParams.put("param1", i + "1"); GenericRecord record = isHttpOperation ? new HttpOperation() : new MockGenericRecord(); record.put("keys", keys); record.put("queryParams", queryParams); record.put("body", "{\"id\":\"id" + i + "\"}"); BufferedRecord<GenericRecord> item = new BufferedRecord<>(record, null); queue.add(item); }/* ww w . j a v a 2 s .co m*/ return queue; }
From source file:org.exoplatform.services.cms.impl.Utils.java
/** * //www. j a va2s .c om * @param : node * @param : keepInTrash true if the link will be move to trash, otherwise set by false * @throws : Exception * @Objective : Remove all the link of a deleted node * @Author : Nguyen The Vinh from ECM of eXoPlatform * vinh.nguyen@exoplatform.com */ public static void removeDeadSymlinks(Node node, boolean keepInTrash) throws Exception { if (isInTrash(node)) { return; } LinkManager linkManager = WCMCoreUtils.getService(LinkManager.class); TrashService trashService = WCMCoreUtils.getService(TrashService.class); SessionProvider sessionProvider = SessionProvider.createSystemProvider(); Queue<Node> queue = new LinkedList<Node>(); queue.add(node); try { while (!queue.isEmpty()) { node = queue.poll(); if (!node.isNodeType(EXO_SYMLINK)) { try { List<Node> symlinks = linkManager.getAllLinks(node, EXO_SYMLINK); // Before removing symlinks, We order symlinks by name descending, index descending. // Example: symlink[3],symlink[2], symlink[1] to avoid the case that // the index of same name symlink automatically changed to increasing one by one Collections.sort(symlinks, new Comparator<Node>() { @Override public int compare(Node node1, Node node2) { try { String name1 = node1.getName(); String name2 = node2.getName(); if (name1.equals(name2)) { int index1 = node1.getIndex(); int index2 = node2.getIndex(); return -1 * ((Integer) index1).compareTo(index2); } return -1 * name1.compareTo(name2); } catch (RepositoryException e) { return 0; } } }); for (Node symlink : symlinks) { synchronized (symlink) { if (keepInTrash) { trashService.moveToTrash(symlink, sessionProvider, 1); } else { symlink.remove(); } } } } catch (Exception e) { if (LOG.isWarnEnabled()) { LOG.warn(e.getMessage()); } } for (NodeIterator iter = node.getNodes(); iter.hasNext();) { queue.add(iter.nextNode()); } } } } catch (Exception e) { if (LOG.isWarnEnabled()) { LOG.warn(e.getMessage()); } } finally { sessionProvider.close(); } }
From source file:au.org.ala.delta.intkey.ui.UIUtils.java
/** * Adds the supplied filename to the top of the most recently used files. * /*from w w w . ja v a 2s . co m*/ * @param filename */ public static void addFileToMRU(String filename, String title, List<Pair<String, String>> existingFiles) { // Strip any RTF formatting, and characters used as separators in the MRU text from the title. title = RTFUtils.stripFormatting(title); title = title.replace(MRU_ITEM_SEPARATOR, " "); title = title.replace(MRU_FILES_SEPARATOR, " "); Queue<String> q = new LinkedList<String>(); String newFilePathAndTitle; if (StringUtils.isEmpty(title)) { newFilePathAndTitle = filename + MRU_ITEM_SEPARATOR + filename; } else { newFilePathAndTitle = filename + MRU_ITEM_SEPARATOR + title; } q.add(newFilePathAndTitle); if (existingFiles != null) { for (Pair<String, String> existingFile : existingFiles) { String existingFilePathAndTitle = existingFile.getFirst() + MRU_ITEM_SEPARATOR + existingFile.getSecond(); if (!q.contains(existingFilePathAndTitle)) { q.add(existingFilePathAndTitle); } } } StringBuilder b = new StringBuilder(); for (int i = 0; i < MAX_SIZE_MRU && q.size() > 0; ++i) { if (i > 0) { b.append(MRU_FILES_SEPARATOR); } b.append(q.poll()); } Preferences prefs = Preferences.userNodeForPackage(Intkey.class); prefs.put(MRU_FILES_PREF_KEY, b.toString()); try { prefs.sync(); } catch (BackingStoreException e) { throw new RuntimeException(e); } }
From source file:com.cinchapi.concourse.lang.Parser.java
/** * Convert a valid and well-formed list of {@link Symbol} objects into a * Queue in postfix notation./*from w w w . j a v a 2 s . c om*/ * <p> * NOTE: This method will group non-conjunctive symbols into * {@link Expression} objects. * </p> * * @param symbols * @return the symbols in postfix notation */ public static Queue<PostfixNotationSymbol> toPostfixNotation(List<Symbol> symbols) { Deque<Symbol> stack = new ArrayDeque<Symbol>(); Queue<PostfixNotationSymbol> queue = new LinkedList<PostfixNotationSymbol>(); symbols = groupExpressions(symbols); for (Symbol symbol : symbols) { if (symbol instanceof ConjunctionSymbol) { while (!stack.isEmpty()) { Symbol top = stack.peek(); if (symbol == ConjunctionSymbol.OR && (top == ConjunctionSymbol.OR || top == ConjunctionSymbol.AND)) { queue.add((PostfixNotationSymbol) stack.pop()); } else { break; } } stack.push(symbol); } else if (symbol == ParenthesisSymbol.LEFT) { stack.push(symbol); } else if (symbol == ParenthesisSymbol.RIGHT) { boolean foundLeftParen = false; while (!stack.isEmpty()) { Symbol top = stack.peek(); if (top == ParenthesisSymbol.LEFT) { foundLeftParen = true; break; } else { queue.add((PostfixNotationSymbol) stack.pop()); } } if (!foundLeftParen) { throw new SyntaxException( MessageFormat.format("Syntax error in {0}: Mismatched parenthesis", symbols)); } else { stack.pop(); } } else { queue.add((PostfixNotationSymbol) symbol); } } while (!stack.isEmpty()) { Symbol top = stack.peek(); if (top instanceof ParenthesisSymbol) { throw new SyntaxException( MessageFormat.format("Syntax error in {0}: Mismatched parenthesis", symbols)); } else { queue.add((PostfixNotationSymbol) stack.pop()); } } return queue; }
From source file:org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager.java
/** * @param conf Hive configuration * @param zkpClient The ZooKeeper client * @param key The object to be compared against - if key is null, then get all locks **//* w w w.ja v a 2 s .c om*/ private static List<HiveLock> getLocks(HiveConf conf, HiveLockObject key, String parent, boolean verifyTablePartition, boolean fetchData) throws LockException { List<HiveLock> locks = new ArrayList<HiveLock>(); List<String> children; boolean recurse = true; String commonParent; try { if (key != null) { commonParent = "/" + parent + "/" + key.getName(); children = curatorFramework.getChildren().forPath(commonParent); recurse = false; } else { commonParent = "/" + parent; children = curatorFramework.getChildren().forPath(commonParent); } } catch (Exception e) { // no locks present return locks; } Queue<String> childn = new LinkedList<String>(); if (children != null && !children.isEmpty()) { for (String child : children) { childn.add(commonParent + "/" + child); } } while (true) { String curChild = childn.poll(); if (curChild == null) { return locks; } if (recurse) { try { children = curatorFramework.getChildren().forPath(curChild); for (String child : children) { childn.add(curChild + "/" + child); } } catch (Exception e) { // nothing to do } } HiveLockMode mode = getLockMode(curChild); if (mode == null) { continue; } HiveLockObjectData data = null; // set the lock object with a dummy data, and then do a set if needed. HiveLockObject obj = getLockObject(conf, curChild, mode, data, parent, verifyTablePartition); if (obj == null) { continue; } if ((key == null) || (obj.getName().equals(key.getName()))) { if (fetchData) { try { data = new HiveLockObjectData( new String(curatorFramework.getData().watched().forPath(curChild))); data.setClientIp(clientIp); } catch (Exception e) { LOG.error("Error in getting data for " + curChild, e); // ignore error } } obj.setData(data); HiveLock lck = (HiveLock) (new ZooKeeperHiveLock(curChild, obj, mode)); locks.add(lck); } } }