List of usage examples for java.util TreeSet first
public E first()
From source file:odcplot.OdcPlot.java
/** * Using previously set up object members verify the channel and get needed info * @return true if we got what we needed, else return after we've printed errors. *///from w w w . ja va 2 s . c om private boolean getChanInfo() throws SQLException { boolean ret; long strt = System.currentTimeMillis(); { int n; if (channelName == null || channelName.isEmpty()) { throw new IllegalArgumentException("No Channel specified"); } if (server == null || server.isEmpty()) { n = chanTbl.getBestMatch(channelName); chanInfo = chanTbl.getChanInfo(n); } else { TreeSet<ChanInfo> chSet = chanTbl.getAsSet(server, channelName, "raw", 10); if (!chSet.isEmpty()) { chanInfo = chSet.first(); } } if (chanInfo == null) { System.err.println("Channel requested was not found: " + channelName); } sampleRate = chanInfo.getRate(); String dtyp = chanInfo.getdType(); if (dtyp.equalsIgnoreCase("INT-16")) { bytesPerSample = 2; } else if (dtyp.equalsIgnoreCase("INT-32") || dtyp.equalsIgnoreCase("UINT-32")) { bytesPerSample = 4; } else if (dtyp.equalsIgnoreCase("INT-64")) { bytesPerSample = 8; } else if (dtyp.equalsIgnoreCase("FLT-32")) { bytesPerSample = 4; } else if (dtyp.equalsIgnoreCase("FLT-64")) { bytesPerSample = 8; } else if (dtyp.equalsIgnoreCase("CPX-64")) { bytesPerSample = 8; } if (server == null || server.isEmpty()) { server = chanInfo.getServer(); } ret = true; String sRateStr = sampleRate < 1 ? String.format("%1$.3f", sampleRate) : String.format("%1$.0f", sampleRate); System.out.format("\nChan: %1$s, sample rate: %2$s, bytes per sample: %3$d\n", channelName, sRateStr, bytesPerSample); float dur = (System.currentTimeMillis() - strt) / 1000.f; System.out.format("Get channel info took %1$.1f sec.\n", dur); } return ret; }
From source file:pt.scanner.server.data.Line.java
public Line expandedLine(Size size) { Coordinate left = lineIntersection(new Line(0, 0, 0, size.height())); Coordinate right = lineIntersection(new Line(size.width(), 0, size.width(), size.height())); Coordinate top = lineIntersection(new Line(0, 0, size.width(), 0)); Coordinate bottom = lineIntersection(new Line(0, size.height(), size.width(), size.height())); TreeSet<Coordinate> coords = new TreeSet<>(); if (left != null && Utils.between(0.0, (double) FastMath.round(left.x), (double) size.width()) && Utils.between(0.0, (double) FastMath.round(left.y), (double) size.height())) { coords.add(new Coordinate(FastMath.round(left.x), FastMath.round(left.y))); }//from w w w . java2s . c o m if (right != null && Utils.between(0.0, (double) FastMath.round(right.x), (double) size.width()) && Utils.between(0.0, (double) FastMath.round(right.y), (double) size.height())) { coords.add(new Coordinate(FastMath.round(right.x), FastMath.round(right.y))); } if (top != null && Utils.between(0.0, (double) FastMath.round(top.x), (double) size.width()) && Utils.between(0.0, (double) FastMath.round(top.y), (double) size.height())) { coords.add(new Coordinate(FastMath.round(top.x), FastMath.round(top.y))); } if (bottom != null && Utils.between(0.0, (double) FastMath.round(bottom.x), (double) size.width()) && Utils.between(0.0, (double) FastMath.round(bottom.y), (double) size.height())) { coords.add(new Coordinate(FastMath.round(bottom.x), FastMath.round(bottom.y))); } Line newLine = new Line(coords.first(), coords.last()); return newLine; }
From source file:org.rhwlab.BHC.BHCTree.java
public Nucleus[] cutToN(int n, double minVolume, double maxProb) { int cutN = n; TreeSet<NucleusLogNode> volReducedCut; ArrayList<Nucleus> retList = new ArrayList<>(); while (true) { TreeSet<NucleusLogNode> cut = cutToExactlyN_Nodes(cutN); // cuts to exactly cutN volReducedCut = new TreeSet<>(); int i = 1; retList.clear();/* ww w.jav a2s . c om*/ for (NucleusLogNode logNode : cut) { BHCNucleusData nucData = BHCNucleusData.factory(logNode, time); if (nucData != null && nucData.getVolume() >= minVolume) { volReducedCut.add(logNode); retList.add(new Nucleus(nucData)); ++i; } } double prob = Math.exp(cut.first().getLogPosterior()); if (prob <= maxProb) { ++cutN; } else { break; } } return retList.toArray(new Nucleus[0]); }
From source file:org.wso2.andes.kernel.slot.SlotManager.java
/** * Record Slot's last message ID related to a particular queue * * @param queueName name of the queue which this message ID belongs to * @param lastMessageIdInTheSlot last message ID of the slot * @param startMessageIdInTheSlot start message ID of the slot * @param nodeId Node ID of the node that is sending the request. *//*from ww w . j a va 2 s . c o m*/ public void updateMessageID(String queueName, String nodeId, long startMessageIdInTheSlot, long lastMessageIdInTheSlot) { // Read message Id set for slots from hazelcast TreeSet<Long> messageIdSet = new TreeSet<Long>(); TreeSetLongWrapper wrapper = slotIDMap.get(queueName); if (wrapper == null) { wrapper = new TreeSetLongWrapper(); wrapper.setLongTreeSet(messageIdSet); slotIDMap.putIfAbsent(queueName, wrapper); } messageIdSet = wrapper.getLongTreeSet(); String lockKey = queueName + SlotManager.class; synchronized (lockKey.intern()) { Long lastAssignedMessageId = queueToLastAssignedIDMap.get(queueName); // Check if input slot's start message ID is less than last assigned message ID if ((null != lastAssignedMessageId) && startMessageIdInTheSlot < lastAssignedMessageId) { if (log.isDebugEnabled()) { log.debug("Found overlapping slots during slot submit: " + startMessageIdInTheSlot + " to : " + lastMessageIdInTheSlot + ". Comparing to lastAssignedID : " + lastAssignedMessageId); } // Find overlapping slots TreeSet<Slot> overlappingSlots = getOverlappedAssignedSlots(queueName, startMessageIdInTheSlot, lastMessageIdInTheSlot); if (overlappingSlots.size() > 0) { if (log.isDebugEnabled()) { log.debug("Found " + overlappingSlots.size() + " overlapping slots."); } // Following means that we have a piece of the slot exceeding the earliest // assigned slot. breaking that piece and adding it as a new,unassigned slot. if (startMessageIdInTheSlot < overlappingSlots.first().getStartMessageId()) { Slot leftExtraSlot = new Slot(startMessageIdInTheSlot, overlappingSlots.first().getStartMessageId() - 1, queueName); if (log.isDebugEnabled()) { log.debug("LeftExtra Slot in overlapping slots : " + leftExtraSlot); } } // This means that we have a piece of the slot exceeding the latest assigned slot. // breaking that piece and adding it as a new,unassigned slot. if (lastMessageIdInTheSlot > overlappingSlots.last().getEndMessageId()) { Slot rightExtraSlot = new Slot(overlappingSlots.last().getEndMessageId() + 1, lastMessageIdInTheSlot, queueName); if (log.isDebugEnabled()) { log.debug("RightExtra in overlapping slot : " + rightExtraSlot); } //Update last message ID - expand ongoing slot to cater this leftover part. messageIdSet.add(lastMessageIdInTheSlot); wrapper.setLongTreeSet(messageIdSet); slotIDMap.set(queueName, wrapper); if (log.isDebugEnabled()) { log.debug(lastMessageIdInTheSlot + " added to slotIdMap " + "(RightExtraSlot). Current values in " + "map " + messageIdSet); } //record last published message id nodeToLastPublishedIDMap.set(nodeId, lastMessageIdInTheSlot); } } } else { /** * Update the slotIDMap only if the last assigned message ID is less than the new start message ID */ messageIdSet.add(lastMessageIdInTheSlot); wrapper.setLongTreeSet(messageIdSet); slotIDMap.set(queueName, wrapper); if (log.isDebugEnabled()) { log.debug("No overlapping slots found during slot submit " + startMessageIdInTheSlot + " to : " + lastMessageIdInTheSlot + ". Added msgID " + lastMessageIdInTheSlot + " to slotIDMap"); } //record last published message ID nodeToLastPublishedIDMap.set(nodeId, lastMessageIdInTheSlot); } } }
From source file:org.apache.lens.driver.jdbc.ColumnarSQLRewriter.java
public String getClause() { if (clauseName == null) { TreeSet<String> ks = new TreeSet<String>(qb.getParseInfo().getClauseNames()); clauseName = ks.first(); }/*from w w w . j av a2 s . com*/ return clauseName; }
From source file:org.gvsig.framework.web.service.impl.OGCInfoServiceImpl.java
public List<String> getLayersBoundingBox(String urlServer, String typeLayer, String crs, TreeSet<String> layers) { List<String> boundingBox = null; if ("wms".equalsIgnoreCase(typeLayer)) { boundingBox = getWMSLayersBoundingBox(urlServer, crs, layers); } else {//from www . j a v a 2s .c o m if ("wmts".equalsIgnoreCase(typeLayer)) { // in wmts only have one layer boundingBox = getWMTSLayersBoundingBox(urlServer, crs, layers.first()); } } return boundingBox; }
From source file:org.apache.lens.driver.jdbc.ColumnarSQLRewriter.java
/** * Analyze internal./*from www .j ava 2s. com*/ * * @throws SemanticException the semantic exception */ public void analyzeInternal(Configuration conf, HiveConf hconf) throws SemanticException { CubeSemanticAnalyzer c1 = new CubeSemanticAnalyzer(conf, hconf); QB qb = new QB(null, null, false); if (!c1.doPhase1(ast, qb, c1.initPhase1Ctx(), null)) { return; } if (!qb.getSubqAliases().isEmpty()) { log.warn("Subqueries in from clause is not supported by {} Query : {}", this, this.query); throw new SemanticException( "Subqueries in from clause is not supported by " + this + " Query : " + this.query); } // Get clause name TreeSet<String> ks = new TreeSet<String>(qb.getParseInfo().getClauseNames()); clauseName = ks.first(); // Split query into trees if (qb.getParseInfo().getWhrForClause(clauseName) != null) { this.whereTree = HQLParser.getString(qb.getParseInfo().getWhrForClause(clauseName)); this.whereAST = qb.getParseInfo().getWhrForClause(clauseName); } if (qb.getParseInfo().getHavingForClause(clauseName) != null) { this.havingTree = HQLParser.getString(qb.getParseInfo().getHavingForClause(clauseName)); this.havingAST = qb.getParseInfo().getHavingForClause(clauseName); } if (qb.getParseInfo().getOrderByForClause(clauseName) != null) { this.orderByTree = HQLParser.getString(qb.getParseInfo().getOrderByForClause(clauseName)); this.orderByAST = qb.getParseInfo().getOrderByForClause(clauseName); } if (qb.getParseInfo().getGroupByForClause(clauseName) != null) { this.groupByTree = HQLParser.getString(qb.getParseInfo().getGroupByForClause(clauseName)); this.groupByAST = qb.getParseInfo().getGroupByForClause(clauseName); } if (qb.getParseInfo().getSelForClause(clauseName) != null) { this.selectTree = HQLParser.getString(qb.getParseInfo().getSelForClause(clauseName)); this.selectAST = qb.getParseInfo().getSelForClause(clauseName); } this.joinTree = HQLParser.getString(qb.getParseInfo().getJoinExpr()); this.joinAST = qb.getParseInfo().getJoinExpr(); this.fromAST = HQLParser.findNodeByPath(ast, TOK_FROM); this.fromTree = HQLParser.getString(fromAST); }
From source file:org.apache.lens.cube.parse.CubeQueryContext.java
public void print() { if (!log.isDebugEnabled()) { return;//from w w w . j ava 2 s .c o m } StringBuilder builder = new StringBuilder().append("ASTNode:").append(ast.dump()).append("\n").append("QB:") .append("\n numJoins:").append(qb.getNumJoins()).append("\n numGbys:").append(qb.getNumGbys()) .append("\n numSels:").append(qb.getNumSels()).append("\n numSelDis:").append(qb.getNumSelDi()) .append("\n aliasToTabs:"); Set<String> tabAliases = qb.getTabAliases(); for (String alias : tabAliases) { builder.append("\n\t").append(alias).append(":").append(qb.getTabNameForAlias(alias)); } builder.append("\n aliases:"); for (String alias : qb.getAliases()) { builder.append(alias); builder.append(", "); } builder.append("id:").append(qb.getId()).append("isQuery:").append(qb.getIsQuery()) .append("\n QBParseInfo"); QBParseInfo parseInfo = qb.getParseInfo(); builder.append("\n isSubQ: ").append(parseInfo.getIsSubQ()).append("\n alias: ") .append(parseInfo.getAlias()); if (parseInfo.getJoinExpr() != null) { builder.append("\n joinExpr: ").append(parseInfo.getJoinExpr().dump()); } builder.append("\n hints: ").append(parseInfo.getHints()); builder.append("\n aliasToSrc: "); for (String alias : tabAliases) { builder.append("\n\t").append(alias).append(": ").append(parseInfo.getSrcForAlias(alias).dump()); } TreeSet<String> clauses = new TreeSet<String>(parseInfo.getClauseNames()); for (String clause : clauses) { builder.append("\n\t").append(clause).append(": ").append(parseInfo.getClauseNamesForDest()); } String clause = clauses.first(); if (parseInfo.getWhrForClause(clause) != null) { builder.append("\n whereexpr: ").append(parseInfo.getWhrForClause(clause).dump()); } if (parseInfo.getGroupByForClause(clause) != null) { builder.append("\n groupby expr: ").append(parseInfo.getGroupByForClause(clause).dump()); } if (parseInfo.getSelForClause(clause) != null) { builder.append("\n sel expr: ").append(parseInfo.getSelForClause(clause).dump()); } if (parseInfo.getHavingForClause(clause) != null) { builder.append("\n having expr: ").append(parseInfo.getHavingForClause(clause).dump()); } if (parseInfo.getDestLimit(clause) != null) { builder.append("\n limit: ").append(parseInfo.getDestLimit(clause)); } if (parseInfo.getAllExprToColumnAlias() != null && !parseInfo.getAllExprToColumnAlias().isEmpty()) { builder.append("\n exprToColumnAlias:"); for (Map.Entry<ASTNode, String> entry : parseInfo.getAllExprToColumnAlias().entrySet()) { builder.append("\n\t expr: ").append(entry.getKey().dump()).append(" ColumnAlias: ") .append(entry.getValue()); } } if (parseInfo.getAggregationExprsForClause(clause) != null) { builder.append("\n aggregateexprs:"); for (Map.Entry<String, ASTNode> entry : parseInfo.getAggregationExprsForClause(clause).entrySet()) { builder.append("\n\t key: ").append(entry.getKey()).append(" expr: ") .append(entry.getValue().dump()); } } if (parseInfo.getDistinctFuncExprsForClause(clause) != null) { builder.append("\n distinctFuncExprs:"); for (ASTNode entry : parseInfo.getDistinctFuncExprsForClause(clause)) { builder.append("\n\t expr: ").append(entry.dump()); } } if (qb.getQbJoinTree() != null) { builder.append("\n\n JoinTree"); QBJoinTree joinTree = qb.getQbJoinTree(); printJoinTree(joinTree, builder); } if (qb.getParseInfo().getDestForClause(clause) != null) { builder.append("\n Destination:").append("\n\t dest expr:") .append(qb.getParseInfo().getDestForClause(clause).dump()); } log.debug(builder.toString()); }
From source file:edu.internet2.middleware.subject.provider.LdapSourceAdapter.java
/** * {@inheritDoc}/*from www .j a va 2 s . c o m*/ */ @Override public Set search(String searchValue) { Comparator cp = new LdapComparator(); TreeSet result = new TreeSet(cp); Search search = getSearch("search"); if (search == null) { log.error("searchType: \"search\" not defined."); return result; } Search searchA = getSearch("searchAttributes"); boolean noAttrSearch = true; if (searchA != null) noAttrSearch = false; Iterator<SearchResult> ldapResults = getLdapResults(search, searchValue, allAttributeNames); if (ldapResults == null) { return result; } while (ldapResults.hasNext()) { SearchResult si = (SearchResult) ldapResults.next(); Attributes attributes = si.getAttributes(); Subject subject = createSubject(attributes); if (noAttrSearch) ((LdapSubject) subject).setAttributesGotten(true); result.add(subject); } log.debug("set has " + result.size() + " subjects"); if (result.size() > 0) log.debug("first is " + ((Subject) result.first()).getName()); return result; }
From source file:org.apache.cassandra.concurrent.LongSharedExecutorPoolTest.java
private void testPromptnessOfExecution(long intervalNanos, float loadIncrement) throws InterruptedException, ExecutionException { final int executorCount = 4; int threadCount = 8; int maxQueued = 1024; final WeibullDistribution workTime = new WeibullDistribution(3, 200000); final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1); final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1); final int[] threadCounts = new int[executorCount]; final WeibullDistribution[] workCount = new WeibullDistribution[executorCount]; final ExecutorService[] executors = new ExecutorService[executorCount]; for (int i = 0; i < executors.length; i++) { executors[i] = SharedExecutorPool.SHARED.newExecutor(threadCount, maxQueued, "test" + i, "test" + i); threadCounts[i] = threadCount;/* w ww .j a va2s . co m*/ workCount[i] = new WeibullDistribution(2, maxQueued); threadCount *= 2; maxQueued *= 2; } long runs = 0; long events = 0; final TreeSet<Batch> pending = new TreeSet<>(); final BitSet executorsWithWork = new BitSet(executorCount); long until = 0; // basic idea is to go through different levels of load on the executor service; initially is all small batches // (mostly within max queue size) of very short operations, moving to progressively larger batches // (beyond max queued size), and longer operations for (float multiplier = 0f; multiplier < 2.01f;) { if (System.nanoTime() > until) { System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f, events * 0.000001f)); events = 0; until = System.nanoTime() + intervalNanos; multiplier += loadIncrement; System.out.println(String.format("Running for %ds with load multiplier %.1f", TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier)); } // wait a random amount of time so we submit new tasks in various stages of long timeout; if (pending.isEmpty()) timeout = 0; else if (Math.random() > 0.98) timeout = Long.MAX_VALUE; else if (pending.size() == executorCount) timeout = pending.first().timeout; else timeout = (long) (Math.random() * pending.last().timeout); while (!pending.isEmpty() && timeout > System.nanoTime()) { Batch first = pending.first(); boolean complete = false; try { for (Result result : first.results.descendingSet()) result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS); complete = true; } catch (TimeoutException e) { } if (!complete && System.nanoTime() > first.timeout) { for (Result result : first.results) if (!result.future.isDone()) throw new AssertionError(); complete = true; } if (complete) { pending.pollFirst(); executorsWithWork.clear(first.executorIndex); } } // if we've emptied the executors, give all our threads an opportunity to spin down if (timeout == Long.MAX_VALUE) Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS); // submit a random batch to the first free executor service int executorIndex = executorsWithWork.nextClearBit(0); if (executorIndex >= executorCount) continue; executorsWithWork.set(executorIndex); ExecutorService executor = executors[executorIndex]; TreeSet<Result> results = new TreeSet<>(); int count = (int) (workCount[executorIndex].sample() * multiplier); long targetTotalElapsed = 0; long start = System.nanoTime(); long baseTime; if (Math.random() > 0.5) baseTime = 2 * (long) (workTime.sample() * multiplier); else baseTime = 0; for (int j = 0; j < count; j++) { long time; if (baseTime == 0) time = (long) (workTime.sample() * multiplier); else time = (long) (baseTime * Math.random()); if (time < minWorkTime) time = minWorkTime; if (time > maxWorkTime) time = maxWorkTime; targetTotalElapsed += time; Future<?> future = executor.submit(new WaitTask(time)); results.add(new Result(future, System.nanoTime() + time)); } long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex]) + TimeUnit.MILLISECONDS.toNanos(100L); long now = System.nanoTime(); if (runs++ > executorCount && now > end) throw new AssertionError(); events += results.size(); pending.add(new Batch(results, end, executorIndex)); // System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start))); } }