List of usage examples for java.util Queue add
boolean add(E e);
From source file:net.dv8tion.jda.core.audio.AudioConnection.java
private synchronized void setupReceiveThread() { if (receiveThread == null) { receiveThread = new Thread(AudioManagerImpl.AUDIO_THREADS, threadIdentifier + " Receiving Thread") { @Override/*from ww w . j a v a 2 s . c o m*/ public void run() { try { udpSocket.setSoTimeout(1000); } catch (SocketException e) { LOG.log(e); } while (!udpSocket.isClosed() && !this.isInterrupted()) { DatagramPacket receivedPacket = new DatagramPacket(new byte[1920], 1920); try { udpSocket.receive(receivedPacket); if (receiveHandler != null && (receiveHandler.canReceiveUser() || receiveHandler.canReceiveCombined()) && webSocket.getSecretKey() != null) { if (!couldReceive) { couldReceive = true; sendSilentPackets(); } AudioPacket decryptedPacket = AudioPacket.decryptAudioPacket(receivedPacket, webSocket.getSecretKey()); int ssrc = decryptedPacket.getSSRC(); final long userId = ssrcMap.get(ssrc); Decoder decoder = opusDecoders.get(ssrc); if (userId == ssrcMap.getNoEntryValue()) { byte[] audio = decryptedPacket.getEncodedAudio(); //If the bytes are silence, then this was caused by a User joining the voice channel, // and as such, we haven't yet received information to pair the SSRC with the UserId. if (!Arrays.equals(audio, silenceBytes)) LOG.debug("Received audio data with an unknown SSRC id. Ignoring"); continue; } if (decoder == null) { decoder = new Decoder(ssrc); opusDecoders.put(ssrc, decoder); } if (!decoder.isInOrder(decryptedPacket.getSequence())) { LOG.trace("Got out-of-order audio packet. Ignoring."); continue; } User user = getJDA().getUserById(userId); if (user == null) LOG.warn( "Received audio data with a known SSRC, but the userId associate with the SSRC is unknown to JDA!"); else { // if (decoder.wasPacketLost(decryptedPacket.getSequence())) // { // LOG.debug("Packet(s) missed. Using Opus packetloss-compensation."); // short[] decodedAudio = decoder.decodeFromOpus(null); // receiveHandler.handleUserAudio(new UserAudio(user, decodedAudio)); // } short[] decodedAudio = decoder.decodeFromOpus(decryptedPacket); //If decodedAudio is null, then the Opus decode failed, so throw away the packet. if (decodedAudio == null) { LOG.trace( "Received audio data but Opus failed to properly decode, instead it returned an error"); } else { if (receiveHandler.canReceiveUser()) { receiveHandler.handleUserAudio(new UserAudio(user, decodedAudio)); } if (receiveHandler.canReceiveCombined()) { Queue<Pair<Long, short[]>> queue = combinedQueue.get(user); if (queue == null) { queue = new ConcurrentLinkedQueue<>(); combinedQueue.put(user, queue); } queue.add(Pair.<Long, short[]>of(System.currentTimeMillis(), decodedAudio)); } } } } else if (couldReceive) { couldReceive = false; sendSilentPackets(); } } catch (SocketTimeoutException e) { //Ignore. We set a low timeout so that we wont block forever so we can properly shutdown the loop. } catch (SocketException e) { //The socket was closed while we were listening for the next packet. //This is expected. Ignore the exception. The thread will exit during the next while // iteration because the udpSocket.isClosed() will return true. } catch (Exception e) { LOG.log(e); } } } }; receiveThread.setDaemon(true); receiveThread.start(); } if (receiveHandler.canReceiveCombined()) { setupCombinedExecutor(); } }
From source file:nl.b3p.viewer.config.services.WMSService.java
/** * Update the tree structure of Layers by following the tree structure and * setting the parent and children accordingly. Reuses entities for layers * which are UNMODIFIED or UPDATED and inserts new entities for NEW layers. * <p>//from ww w. j ava2s . co m * Because virtual layers with null name cannot be updated, those are always * recreated and user set properties are lost, except those set on the top * layer which are preserved. * <p> * Interface should disallow setting user properties (especially authorizations) * on virtual layers. */ private void updateLayerTree(final WMSService update, final UpdateResult result) { Layer newTopLayer; String topLayerName = update.getTopLayer().getName(); if (topLayerName == null) { // Start with a new no name topLayer newTopLayer = update.getTopLayer().pluckCopy(); } else { // Old persistent top layer or new plucked copy from updated service newTopLayer = result.getLayerStatus().get(topLayerName).getLeft(); } // Copy user set stuff over from old toplayer, even if name was changed // or topLayer has no name newTopLayer.copyUserModifiedProperties(getTopLayer()); newTopLayer.setParent(null); newTopLayer.setService(this); newTopLayer.getChildren().clear(); setTopLayer(newTopLayer); // Do a breadth-first traversal to set the parent and fill the children // list of all layers. // For the breadth-first traversal save layers from updated service to // visit with their (possibly persistent) parent layers from this service // XXX why did we need BFS? Queue<Pair<Layer, Layer>> q = new LinkedList(); // Start at children of topLayer from updated service, topLayer handled // above for (Layer child : update.getTopLayer().getChildren()) { q.add(new ImmutablePair(child, newTopLayer)); } Set<String> visitedLayerNames = new HashSet(); do { // Remove from head of queue Pair<Layer, Layer> p = q.remove(); Layer updateLayer = p.getLeft(); // layer from updated service Layer parent = p.getRight(); // parent layer from this Layer thisLayer; String layerName = updateLayer.getName(); if (layerName == null) { // 'New' no name layer - we can't possibly guess if it is // the same as an already existing no name layer so always // new entity thisLayer = updateLayer.pluckCopy(); } else { if (visitedLayerNames.contains(layerName)) { // Duplicate layer in updated service -- ignore this one thisLayer = null; } else { // Find possibly already persistent updated layer // (depth first) - if new already a pluckCopy() thisLayer = result.getLayerStatus().get(layerName).getLeft(); visitedLayerNames.add(layerName); } } if (thisLayer != null) { thisLayer.setService(this); thisLayer.setParent(parent); parent.getChildren().add(thisLayer); } for (Layer child : updateLayer.getChildren()) { // Add add end of queue q.add(new ImmutablePair(child, thisLayer)); } } while (!q.isEmpty()); }
From source file:org.kuali.kra.award.awardhierarchy.AwardHierarchyServiceImpl.java
public Map<String, AwardHierarchy> getAwardHierarchy(AwardHierarchy anyNode, List<String> order) { Map<String, AwardHierarchy> result = new HashMap<String, AwardHierarchy>(); if (anyNode == null) { return result; }/*from w w w. j av a 2 s . c om*/ Map<String, Object> values = new HashMap<String, Object>(); //find all hierarchy BOs for the root award number. If the anyNode was got is the root, the award number //will be 'DEFAULT_AWARD_NUMBER' and therefore we will use the award number, otherwise, the root award number String rootAwardNumber = StringUtils.equals(Award.DEFAULT_AWARD_NUMBER, anyNode.getRootAwardNumber()) ? anyNode.getAwardNumber() : anyNode.getRootAwardNumber(); values.put("rootAwardNumber", rootAwardNumber); values.put("active", true); List<AwardHierarchy> hierarchyList = (List<AwardHierarchy>) legacyDataAdapter .findMatchingOrderBy(AwardHierarchy.class, values, "awardNumber", true); if (!hierarchyList.isEmpty()) { for (AwardHierarchy hierarchy : hierarchyList) { result.put(hierarchy.getAwardNumber(), hierarchy); //clear children in case this was already called and cached BOs were returned from OJB. hierarchy.getChildren().clear(); } AwardHierarchy rootNode = result.get(rootAwardNumber); for (AwardHierarchy hierarchy : result.values()) { hierarchy.setRoot(rootNode); AwardHierarchy parent = result.get(hierarchy.getParentAwardNumber()); if (parent != null) { parent.getChildren().add(hierarchy); hierarchy.setParent(parent); } } for (AwardHierarchy hierarchy : result.values()) { Collections.sort(hierarchy.getChildren(), new Comparator<AwardHierarchy>() { public int compare(AwardHierarchy arg0, AwardHierarchy arg1) { return arg0.getAwardNumber().compareTo(arg1.getAwardNumber()); } }); } Queue<AwardHierarchy> queue = new LinkedList<AwardHierarchy>(); queue.add(rootNode); while (!queue.isEmpty()) { AwardHierarchy node = queue.poll(); order.add(node.getAwardNumber()); queue.addAll(node.getChildren()); } } return result; }
From source file:org.exoplatform.ecm.webui.component.explorer.rightclick.manager.DeleteManageComponent.java
private void processRemoveOrMoveToTrash(String nodePath, Node node, Event<?> event, boolean isMultiSelect, boolean checkToMoveToTrash) throws Exception { if (!checkToMoveToTrash || Utils.isInTrash(node)) processRemoveNode(nodePath, node, event, isMultiSelect); else {// w w w. j a va2 s. c o m WCMComposer wcmComposer = WCMCoreUtils.getService(WCMComposer.class); List<Node> categories = WCMCoreUtils.getService(TaxonomyService.class).getAllCategories(node); String parentPath = node.getParent().getPath(); String parentWSpace = node.getSession().getWorkspace().getName(); wcmComposer.updateContent(parentWSpace, node.getPath(), new HashMap<String, String>()); boolean isNodeReferenceable = Utils.isReferenceable(node); String nodeUUID = null; if (isNodeReferenceable) nodeUUID = node.getUUID(); boolean moveOK = moveToTrash(nodePath, node, event, isMultiSelect); if (moveOK) { for (Node categoryNode : categories) { wcmComposer.updateContents(categoryNode.getSession().getWorkspace().getName(), categoryNode.getPath(), new HashMap<String, String>()); } PortletRequestContext pcontext = (PortletRequestContext) WebuiRequestContext.getCurrentInstance(); PortletPreferences portletPref = pcontext.getRequest().getPreferences(); String trashWorkspace = portletPref.getValue(Utils.TRASH_WORKSPACE, ""); if (isNodeReferenceable) { wcmComposer.updateContent(trashWorkspace, nodeUUID, new HashMap<String, String>()); } wcmComposer.updateContents(parentWSpace, parentPath, new HashMap<String, String>()); //Broadcast the event when user move node to Trash ListenerService listenerService = WCMCoreUtils.getService(ListenerService.class); ActivityCommonService activityService = WCMCoreUtils.getService(ActivityCommonService.class); Node parent = node.getParent(); if (node.getPrimaryNodeType().getName().equals(NodetypeConstant.NT_FILE)) { if (activityService.isBroadcastNTFileEvents(node)) { listenerService.broadcast(ActivityCommonService.FILE_REMOVE_ACTIVITY, parent, node); } } else if (!isDocumentNodeType(node)) { Queue<Node> queue = new LinkedList<Node>(); queue.add(node); //Broadcast event to remove file activities Node tempNode = null; try { while (!queue.isEmpty()) { tempNode = queue.poll(); if (isDocumentNodeType(tempNode) || tempNode.getPrimaryNodeType().getName().equals(NodetypeConstant.NT_FILE)) { listenerService.broadcast(ActivityCommonService.FILE_REMOVE_ACTIVITY, tempNode.getParent(), tempNode); } else { for (NodeIterator iter = tempNode.getNodes(); iter.hasNext();) { Node childNode = iter.nextNode(); if (isDocumentNodeType(childNode) || childNode.isNodeType(NodetypeConstant.NT_UNSTRUCTURED) || childNode.isNodeType(NodetypeConstant.NT_FOLDER)) queue.add(childNode); } } } } catch (Exception e) { if (LOG.isWarnEnabled()) { LOG.warn(e.getMessage()); } } } } } }
From source file:org.glassfish.jersey.examples.sseitemstore.ItemStoreResourceTest.java
/** * Test the item addition, addition event broadcasting and item retrieval from {@link ItemStoreResource}. * * @throws Exception in case of a test failure. *//*from www. j a v a 2 s . co m*/ @Test public void testItemsStore() throws Exception { final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz")); final WebTarget itemsTarget = target("items"); final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events final List<Queue<Integer>> indexQueues = new ArrayList<Queue<Integer>>(MAX_LISTENERS); final EventSource[] sources = new EventSource[MAX_LISTENERS]; final AtomicInteger sizeEventsCount = new AtomicInteger(0); for (int i = 0; i < MAX_LISTENERS; i++) { final int id = i; final EventSource es = EventSource.target(itemsTarget.path("events")).named("SOURCE " + id).build(); sources[id] = es; final Queue<Integer> indexes = new ConcurrentLinkedQueue<Integer>(); indexQueues.add(indexes); es.register(new EventListener() { @SuppressWarnings("MagicNumber") @Override public void onEvent(InboundEvent inboundEvent) { try { if (inboundEvent.getName() == null) { final String data = inboundEvent.readData(); LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data=" + data); indexes.add(items.indexOf(data)); } else if ("size".equals(inboundEvent.getName())) { sizeEventsCount.incrementAndGet(); } } catch (Exception ex) { LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex); indexes.add(-999); } finally { latch.countDown(); } } }); } try { open(sources); for (String item : items) { postItem(itemsTarget, item); } assertTrue("Waiting to receive all events has timed out.", latch.await( (1000 + MAX_LISTENERS * EventSource.RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(), TimeUnit.MILLISECONDS)); // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection sendCommand(itemsTarget, "disconnect"); } finally { close(sources); } String postedItems = itemsTarget.request().get(String.class); for (String item : items) { assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item)); } int queueId = 0; for (Queue<Integer> indexes : indexQueues) { for (int i = 0; i < items.size(); i++) { assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId, indexes.contains(i)); } assertEquals("Not received the expected number of events in queue " + queueId, items.size(), indexes.size()); queueId++; } assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS, sizeEventsCount.get()); }
From source file:io.hops.ha.common.TransactionStateImpl.java
private void persistFiCaSchedulerNodeToRemove(ResourceDataAccess resourceDA, FiCaSchedulerNodeDataAccess ficaNodeDA, RMContainerDataAccess rmcontainerDA, LaunchedContainersDataAccess launchedContainersDA) throws StorageException { if (!ficaSchedulerNodeInfoToRemove.isEmpty()) { Queue<FiCaSchedulerNode> toRemoveFiCaSchedulerNodes = new ConcurrentLinkedQueue<FiCaSchedulerNode>(); for (String nodeId : ficaSchedulerNodeInfoToRemove.keySet()) { toRemoveFiCaSchedulerNodes.add(new FiCaSchedulerNode(nodeId)); }/* ww w .j av a2s . co m*/ ficaNodeDA.removeAll(toRemoveFiCaSchedulerNodes); } }
From source file:com.streamsets.pipeline.lib.jdbc.multithread.TestMultithreadedTableProvider.java
@NotNull private MultithreadedTableProvider createTableProvider(int numThreads, TableContext table, BatchTableStrategy batchTableStrategy) { Map<String, TableContext> tableContextMap = new HashMap<>(); String qualifiedName = table.getQualifiedName(); tableContextMap.put(qualifiedName, table); Queue<String> sortedTableOrder = new LinkedList<>(); sortedTableOrder.add(qualifiedName); Map<Integer, Integer> threadNumToMaxTableSlots = new HashMap<>(); return new MultithreadedTableProvider(tableContextMap, sortedTableOrder, threadNumToMaxTableSlots, numThreads, batchTableStrategy); }
From source file:org.shaman.terrain.polygonal.GraphToHeightmap.java
private void calculateBaseElevation() { //assign elevation to oceans for (Graph.Corner c : graph.corners) { if (c.ocean) { c.elevation = -1;//from w w w. j a v a2 s . c o m } } Queue<Graph.Corner> q = new ArrayDeque<>(); for (Graph.Corner c : graph.corners) { if (c.coast) { q.add(c); } } while (!q.isEmpty()) { Graph.Corner c = q.poll(); for (Graph.Corner r : c.adjacent) { float h = Math.max(-1, c.elevation - 0.2f); if (r.ocean && r.elevation < h) { r.elevation = h; q.add(r); } } } assignCenterElevations(); //render Geometry geom = createElevationGeometry(); Heightmap tmp = new Heightmap(size); render(tmp.getRawData(), geom, ColorRGBA.Black, -1, 1); //scale for (int x = 0; x < size; ++x) { for (int y = 0; y < size; ++y) { float h = tmp.getHeightAt(x, y); h = (float) (Math.signum(h) * Math.pow(Math.abs(h), HEIGHT_SCALING)); tmp.setHeightAt(x, y, h); } } //distort Noise distortionNoise = new Noise(rand.nextLong()); for (int x = 0; x < size; ++x) { for (int y = 0; y < size; ++y) { float s = x / (float) size; float t = y / (float) size; float ss = (float) (s + DISTORTION_AMPLITUDE * 2 * distortionNoise.noise(s * DISTORTION_FREQUENCY, t * DISTORTION_FREQUENCY, 0)); float tt = (float) (t + DISTORTION_AMPLITUDE * 2 * distortionNoise.noise(s * DISTORTION_FREQUENCY, t * DISTORTION_FREQUENCY, 3.4)); float v = tmp.getHeightInterpolating(ss * size, tt * size); heightmap.setHeightAt(x, y, v); } } //smooth for (int i = 0; i < SMOOTHING_STEPS; ++i) { smooth(heightmap); } //reset height for (Graph.Corner c : graph.corners) { if (c.ocean) { c.elevation = 0; } } assignCenterElevations(); LOG.info("base elevation assigned"); }
From source file:org.glassfish.jersey.examples.sseitemstore.ItemStoreResourceTest.java
/** * Test the {@link EventSource} reconnect feature. * * @throws Exception in case of a test failure. *//*from w w w . j ava 2s .c o m*/ @Test public void testEventSourceReconnect() throws Exception { final WebTarget itemsTarget = target("items"); final CountDownLatch latch = new CountDownLatch(MAX_ITEMS * MAX_LISTENERS * 2); // countdown only on new item events final List<Queue<String>> receivedQueues = new ArrayList<Queue<String>>(MAX_LISTENERS); final EventSource[] sources = new EventSource[MAX_LISTENERS]; for (int i = 0; i < MAX_LISTENERS; i++) { final int id = i; final EventSource es = EventSource.target(itemsTarget.path("events")).named("SOURCE " + id).build(); sources[id] = es; final Queue<String> received = new ConcurrentLinkedQueue<String>(); receivedQueues.add(received); es.register(new EventListener() { @Override public void onEvent(InboundEvent inboundEvent) { try { if (inboundEvent.getName() == null) { latch.countDown(); final String data = inboundEvent.readData(); LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data=" + data); received.add(data); } } catch (Exception ex) { LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex); received.add("[data processing error]"); } } }); } final String[] postedItems = new String[MAX_ITEMS * 2]; try { open(sources); for (int i = 0; i < MAX_ITEMS; i++) { final String item = String.format("round-1-%02d", i); postItem(itemsTarget, item); postedItems[i] = item; sendCommand(itemsTarget, "disconnect"); Thread.sleep(100); } final int reconnectDelay = 1; sendCommand(itemsTarget, "reconnect " + reconnectDelay); sendCommand(itemsTarget, "disconnect"); Thread.sleep(reconnectDelay * 1000); for (int i = 0; i < MAX_ITEMS; i++) { final String item = String.format("round-2-%02d", i); postedItems[i + MAX_ITEMS] = item; postItem(itemsTarget, item); } sendCommand(itemsTarget, "reconnect now"); assertTrue("Waiting to receive all events has timed out.", latch.await( (1 + MAX_LISTENERS * (MAX_ITEMS + 1) * reconnectDelay) * getAsyncTimeoutMultiplier(), TimeUnit.SECONDS)); // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection sendCommand(itemsTarget, "disconnect"); } finally { close(sources); } final String storedItems = itemsTarget.request().get(String.class); for (String item : postedItems) { assertThat("Posted item '" + item + "' stored on server", storedItems, containsString(item)); } int sourceId = 0; for (Queue<String> queue : receivedQueues) { assertThat("Received events in source " + sourceId, queue, describedAs("Collection containing %0", hasItems(postedItems), Arrays.asList(postedItems).toString())); assertThat("Size of received queue for source " + sourceId, queue.size(), equalTo(postedItems.length)); sourceId++; } }
From source file:org.photovault.common.SchemaUpdateAction.java
/** Convert folder hiearchy from old schema to the new one *///from ww w .j a va 2s . c o m private void convertFolders() throws SQLException { SchemaUpdateOperation oper = new SchemaUpdateOperation("COnverting folders"); log.debug("Starting to convert folders to new schema"); Session s = HibernateUtil.getSessionFactory().openSession(); Session sqlSess = HibernateUtil.getSessionFactory().openSession(); Connection conn = sqlSess.connection(); Queue<Integer> waiting = new LinkedList<Integer>(); Map<Integer, PhotoFolder> foldersById = new HashMap<Integer, PhotoFolder>(); waiting.add(1); HibernateDAOFactory df = (HibernateDAOFactory) DAOFactory.instance(HibernateDAOFactory.class); df.setSession(s); PhotoFolderDAO folderDao = df.getPhotoFolderDAO(); DTOResolverFactory rf = df.getDTOResolverFactory(); PhotoFolder topFolder = folderDao.findByUUID(PhotoFolder.ROOT_UUID); if (topFolder == null) { topFolder = folderDao.create(PhotoFolder.ROOT_UUID, null); topFolder.setName("Top"); } foldersById.put(1, topFolder); Statement countStmt = conn.createStatement(); ResultSet countRs = countStmt.executeQuery("select count(*) from photo_collections"); int folderCount = -1; if (countRs.next()) { folderCount = countRs.getInt(1); } countRs.close(); countStmt.close(); int convertedCount = 0; PreparedStatement stmt = conn.prepareStatement("select * from photo_collections where parent = ?"); while (!waiting.isEmpty()) { int parentId = waiting.remove(); PhotoFolder parent = foldersById.get(parentId); log.debug("Querying for folders with parent " + parentId); stmt.setInt(1, parentId); ResultSet rs = stmt.executeQuery(); while (rs.next()) { // Create the folder /* TODO: should the UUID be created algorithmically? Or better, how to ensure that UUIDs for folders that are part of external volume will always be the same? */ fireStatusChangeEvent(new SchemaUpdateEvent(oper, convertedCount * 100 / folderCount)); String uuidStr = rs.getString("collection_uuid"); int id = rs.getInt("collection_id"); log.debug("Creating folder with old id " + id + ", uuid " + uuidStr); UUID uuid = (uuidStr != null) ? UUID.fromString(uuidStr) : UUID.randomUUID(); if (id == 1) { uuid = PhotoFolder.ROOT_UUID; } PhotoFolder f = folderDao.create(uuid, parent); VersionedObjectEditor<PhotoFolder> e = f.editor(rf); FolderEditor fe = (FolderEditor) e.getProxy(); fe.setName(rs.getString("collection_name")); fe.setDescription(rs.getString("collection_desc")); e.apply(); /* TODO: how to set the create time & last modified time without exposing them to others? */ log.debug("folder saved"); foldersById.put(id, f); folderUuids.put(id, uuid); waiting.add(id); convertedCount++; } try { rs.close(); } catch (SQLException e) { log.error("Error closing result set", e); } } s.flush(); sqlSess.close(); s.close(); fireStatusChangeEvent(new SchemaUpdateEvent(oper, convertedCount * 100 / folderCount)); }