List of usage examples for java.util Queue add
boolean add(E e);
From source file:edu.umn.cs.spatialHadoop.nasa.StockQuadTree.java
/** * Constructs a stock quad tree for the given resolution * @param resolution/*from w w w. java 2 s. c o m*/ */ StockQuadTree(int resolution) { this.resolution = resolution; this.r = new int[resolution * resolution]; final int[] z = new int[resolution * resolution]; // The list of all nodes Vector<Node> nodes = new Vector<Node>(); // Compute the Z-order of all values for (int i = 0; i < z.length; i++) { short x = (short) (i % resolution); short y = (short) (i / resolution); int zorder = AggregateQuadTree.computeZOrder(x, y); z[i] = zorder; r[i] = i; } // Sort ArrayToZOrder1200 by Z-Order and keep the original position of // each element by mirroring all swaps to ZOrderToArray1200 new QuickSort().sort(new IndexedSortable() { @Override public void swap(int i, int j) { int temp; // Swap z-values (which are to be sorted) temp = z[i]; z[i] = z[j]; z[j] = temp; // Swap their relative positions in the other array temp = r[i]; r[i] = r[j]; r[j] = temp; } @Override public int compare(int i, int j) { return z[i] - z[j]; } }, 0, z.length); // Construct the structure of the quad tree based on Z-values // Maximum number of values per node. Set it to a very small number to // construct as many levels as possible. Notice that when quad trees // are aggregated, a single value might become 366 values in the same pos. final int capacity = 100; Node root = new Node(); root.startPosition = 0; root.endPosition = z.length; root.id = 1; Queue<Node> nodesToCheckForSplit = new ArrayDeque<Node>(); nodesToCheckForSplit.add(root); int numOfSignificantBitsInTree = getNumOfSignificantBits(resolution * resolution - 1); if ((numOfSignificantBitsInTree & 1) == 1) numOfSignificantBitsInTree++; // Round to next even value int maxId = 0; while (!nodesToCheckForSplit.isEmpty()) { Node nodeToCheckForSplit = nodesToCheckForSplit.poll(); boolean needsToSplit = nodeToCheckForSplit.getNumOfElements() > capacity; if (nodeToCheckForSplit.id > maxId) maxId = nodeToCheckForSplit.id; nodes.add(nodeToCheckForSplit); if (needsToSplit) { // Need to split // Determine split points based on the Z-order values of the first and // last elements in this node int depth = nodeToCheckForSplit.id == 0 ? 0 : (getNumOfSignificantBits(nodeToCheckForSplit.id - 1) / 2 + 1); depth = (getNumOfSignificantBits(nodeToCheckForSplit.id) - 1) / 2; int numOfSignificantBitsInNode = numOfSignificantBitsInTree - depth * 2; // Create four child nodes under this node int zOrderCommonBits = z[nodeToCheckForSplit.startPosition] & (0xffffffff << numOfSignificantBitsInNode); int childStartPosition = nodeToCheckForSplit.startPosition; for (int iChild = 0; iChild < 4; iChild++) { int zOrderUpperBound = zOrderCommonBits + ((iChild + 1) << (numOfSignificantBitsInNode - 2)); int childEndPosition = Arrays.binarySearch(z, childStartPosition, nodeToCheckForSplit.endPosition, zOrderUpperBound); if (childEndPosition < 0) childEndPosition = -(childEndPosition + 1); Node child = new Node(); child.startPosition = childStartPosition; child.endPosition = childEndPosition; child.id = nodeToCheckForSplit.id * 4 + iChild; nodesToCheckForSplit.add(child); // Prepare for next iteration childStartPosition = childEndPosition; } if (childStartPosition != nodeToCheckForSplit.endPosition) throw new RuntimeException(); } } // Convert nodes to column format for memory efficiency nodesID = new int[nodes.size()]; nodesStartPosition = new int[nodes.size()]; nodesEndPosition = new int[nodes.size()]; for (int i = 0; i < nodes.size(); i++) { Node node = nodes.get(i); nodesID[i] = node.id; nodesStartPosition[i] = node.startPosition; nodesEndPosition[i] = node.endPosition; } }
From source file:com.datatorrent.stram.webapp.TypeGraph.java
private void removeSubGraph(TypeGraphVertex v) { // Can't recursively remove because it will get into concurrent modification // Use queue to delete all nodes Queue<TypeGraphVertex> removingQueue = new LinkedList<>(); removingQueue.add(v); while (!removingQueue.isEmpty()) { TypeGraphVertex tgv = removingQueue.poll(); if (typeGraph.get(tgv.typeName) == null) { // skip node that's been removed already. // It comes from common descendants continue; }//from ww w . j a va 2s. c o m // put all the descendants to waiting queue for (TypeGraphVertex child : tgv.descendants) { removingQueue.offer(child); } // remove from global hashmap typeGraph.remove(tgv.typeName); // remove from instantiable descendants list of all the (in)direct ancestors if (!tgv.allInstantiableDescendants.isEmpty() && !tgv.ancestors.isEmpty()) { for (TypeGraphVertex p : tgv.ancestors) { removeFromInstantiableDescendants(p, tgv.allInstantiableDescendants); } } // cut links from parent to child for (TypeGraphVertex parent : tgv.ancestors) { parent.descendants.remove(tgv); } // cut links form child to parent tgv.ancestors.clear(); } }
From source file:org.apache.http2.impl.client.AuthenticationStrategyAdaptor.java
public Queue<AuthOption> select(final Map<String, Header> challenges, final HttpHost authhost, final HttpResponse response, final HttpContext context) throws MalformedChallengeException { if (challenges == null) { throw new IllegalArgumentException("Map of auth challenges may not be null"); }/* w w w . j a v a2s . com*/ if (authhost == null) { throw new IllegalArgumentException("Host may not be null"); } if (response == null) { throw new IllegalArgumentException("HTTP response may not be null"); } if (context == null) { throw new IllegalArgumentException("HTTP context may not be null"); } Queue<AuthOption> options = new LinkedList<AuthOption>(); CredentialsProvider credsProvider = (CredentialsProvider) context .getAttribute(ClientContext.CREDS_PROVIDER); if (credsProvider == null) { this.log.debug("Credentials provider not set in the context"); return options; } AuthScheme authScheme; try { authScheme = this.handler.selectScheme(challenges, response, context); } catch (AuthenticationException ex) { if (this.log.isWarnEnabled()) { this.log.warn(ex.getMessage(), ex); } return options; } String id = authScheme.getSchemeName(); Header challenge = challenges.get(id.toLowerCase(Locale.US)); authScheme.processChallenge(challenge); AuthScope authScope = new AuthScope(authhost.getHostName(), authhost.getPort(), authScheme.getRealm(), authScheme.getSchemeName()); Credentials credentials = credsProvider.getCredentials(authScope); if (credentials != null) { options.add(new AuthOption(authScheme, credentials)); } return options; }
From source file:org.batoo.jpa.benchmark.BenchmarkTest.java
private void test(final EntityManagerFactory emf, Queue<Runnable> workQueue, int length) { final CriteriaBuilder cb = emf.getCriteriaBuilder(); final CriteriaQuery<Address> cq = cb.createQuery(Address.class); final Root<Person> r = cq.from(Person.class); final Join<Person, Address> a = r.join("addresses"); a.fetch("country", JoinType.LEFT); a.fetch("person", JoinType.LEFT); cq.select(a);/*from w ww . j ava 2s . co m*/ final ParameterExpression<Person> p = cb.parameter(Person.class); cq.where(cb.equal(r, p)); for (int i = 0; i < length; i++) { workQueue.add(new Runnable() { @Override public void run() { try { BenchmarkTest.this.singleTest(emf, BenchmarkTest.this.createPersons(), cq, p); } catch (final Exception e) { BenchmarkTest.LOG.error(e, "Error while running the test"); } } }); } }
From source file:org.apache.http.impl.client.AuthenticationStrategyAdaptor.java
public Queue<AuthOption> select(final Map<String, Header> challenges, final HttpHost authhost, final HttpResponse response, final HttpContext context) throws MalformedChallengeException { Args.notNull(challenges, "Map of auth challenges"); Args.notNull(authhost, "Host"); Args.notNull(response, "HTTP response"); Args.notNull(context, "HTTP context"); final Queue<AuthOption> options = new LinkedList<AuthOption>(); final CredentialsProvider credsProvider = (CredentialsProvider) context .getAttribute(ClientContext.CREDS_PROVIDER); if (credsProvider == null) { this.log.debug("Credentials provider not set in the context"); return options; }/*from ww w .j av a2 s . co m*/ final AuthScheme authScheme; try { authScheme = this.handler.selectScheme(challenges, response, context); } catch (final AuthenticationException ex) { if (this.log.isWarnEnabled()) { this.log.warn(ex.getMessage(), ex); } return options; } final String id = authScheme.getSchemeName(); final Header challenge = challenges.get(id.toLowerCase(Locale.US)); authScheme.processChallenge(challenge); final AuthScope authScope = new AuthScope(authhost.getHostName(), authhost.getPort(), authScheme.getRealm(), authScheme.getSchemeName()); final Credentials credentials = credsProvider.getCredentials(authScope); if (credentials != null) { options.add(new AuthOption(authScheme, credentials)); } return options; }
From source file:it.geosolutions.geobatch.opensdi.ndvi.NDVIStatsAction.java
/** * Execute process//w w w. j a v a 2s . c o m */ public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException { // return object final Queue<EventObject> ret = new LinkedList<EventObject>(); while (events.size() > 0) { final EventObject ev; try { if ((ev = events.remove()) != null) { if (LOGGER.isTraceEnabled()) { LOGGER.trace("Working on incoming event: " + ev.getSource()); } if (ev instanceof FileSystemEvent) { FileSystemEvent fileEvent = (FileSystemEvent) ev; File file = fileEvent.getSource(); processXMLFile(file); } // add the event to the return ret.add(ev); } else { if (LOGGER.isErrorEnabled()) { LOGGER.error("Encountered a NULL event: SKIPPING..."); } continue; } } catch (Exception ioe) { final String message = "Unable to produce the output: " + ioe.getLocalizedMessage(); if (LOGGER.isErrorEnabled()) LOGGER.error(message, ioe); throw new ActionException(this, message); } } return ret; }
From source file:replicatorg.app.gcode.GCodeParser.java
private void buildTCodes(GCodeCommand gcode, Queue<DriverCommand> commands) throws GCodeException { // M6 was historically used to wait for toolheads to get up to temperature, so // you may wish to avoid using M6 by using T if (driver instanceof MultiTool && ((MultiTool) driver).supportsSimultaneousTools()) throw new GCodeException("the current driver" + driver.toString() + " does not support multipleTools"); tool = (int) gcode.getCodeValue('T'); commands.add(new replicatorg.drivers.commands.SelectTool(tool)); currentOffset = driver.getOffset(tool + 1); }
From source file:playground.sergioo.ptsim2013.qnetsimengine.PTQLink.java
void registerDriverAgentWaitingForCar(final MobsimDriverAgent agent) { final Id<Vehicle> vehicleId = agent.getPlannedVehicleId(); Queue<MobsimDriverAgent> queue = driversWaitingForCars.get(vehicleId); if (queue == null) { queue = new LinkedList<MobsimDriverAgent>(); driversWaitingForCars.put(vehicleId, queue); }/* w ww . ja v a2 s .c o m*/ queue.add(agent); }
From source file:replicatorg.app.gcode.GCodeParser.java
/** * Function parses a line of GCode, packages that line into an executable event * for the s3g driver code to execute, and queues the event for execution * * @param cmd a single line of GCode to parse, package, and send to the driver. *///w w w.j a va2 s . com public boolean parse(String cmd, Queue<DriverCommand> commandQueue) { // First, parse the GCode string into an object we can query. GCodeCommand gcode = new GCodeCommand(cmd); // Now, convert the GCode instruction into a series of driver commands, // that will be executed by execute() // If our driver is in pass-through mode, just put the string in a buffer and we are done. if (driver.isPassthroughDriver()) { commandQueue.add(new replicatorg.drivers.commands.GCodePassthrough(gcode.getCommand())); } else { try { if (gcode.hasCode('G')) { buildGCodes(gcode, commandQueue); } else if (gcode.hasCode('M')) { buildMCodes(gcode, commandQueue); } else if (gcode.hasCode('T')) { buildTCodes(gcode, commandQueue); } } catch (GCodeException e) { // TODO Auto-generated catch block e.printStackTrace(); } } return true; }
From source file:edu.vt.middleware.ldap.pool.AbstractLdapPool.java
/** {@inheritDoc} */ public void validate() { this.poolLock.lock(); try {//from www .j a va 2 s .c o m if (this.active.size() == 0) { if (this.poolConfig.isValidatePeriodically()) { if (this.logger.isDebugEnabled()) { this.logger.debug("validate for pool of size " + this.available.size()); } final Queue<PooledLdap<T>> remove = new LinkedList<PooledLdap<T>>(); for (PooledLdap<T> pl : this.available) { if (this.logger.isTraceEnabled()) { this.logger.trace("validating " + pl.getLdap()); } if (this.ldapFactory.validate(pl.getLdap())) { if (this.logger.isTraceEnabled()) { this.logger.trace("ldap object passed validation: " + pl.getLdap()); } } else { if (this.logger.isWarnEnabled()) { this.logger.warn("ldap object failed validation: " + pl.getLdap()); } remove.add(pl); } } for (PooledLdap<T> pl : remove) { if (this.logger.isTraceEnabled()) { this.logger.trace("removing " + pl.getLdap() + " from the pool"); } this.available.remove(pl); this.ldapFactory.destroy(pl.getLdap()); } } this.initializePool(); if (this.logger.isDebugEnabled()) { this.logger.debug("pool size after validation is " + this.available.size()); } } else { if (this.logger.isDebugEnabled()) { this.logger.debug("pool is currently active, " + "no validation performed"); } } } finally { this.poolLock.unlock(); } }