List of usage examples for java.util LinkedList getFirst
public E getFirst()
From source file:org.saiku.web.rest.resources.QueryResource.java
@DELETE @Consumes("application/x-www-form-urlencoded") @Path("/{queryname}/axis/{axis}/dimension/{dimension}/member/") public Response removeMembers(@PathParam("queryname") String queryName, @PathParam("axis") String axisName, @PathParam("dimension") String dimensionName, MultivaluedMap<String, String> formParams) { try {// www .ja v a2s . c o m if (log.isDebugEnabled()) { log.debug("TRACK\t" + "\t/query/" + queryName + "/axis/" + axisName + "/dimension/" + dimensionName + "\tPUT"); } if (formParams.containsKey("selections")) { LinkedList<String> sels = (LinkedList<String>) formParams.get("selections"); String selectionJSON = (String) sels.getFirst(); ObjectMapper mapper = new ObjectMapper(); // can reuse, share globally List<SelectionRestObject> selections = mapper.readValue(selectionJSON, mapper.getTypeFactory() .constructCollectionType(ArrayList.class, SelectionRestObject.class)); for (SelectionRestObject member : selections) { removeMember("MEMBER", queryName, axisName, dimensionName, member.getUniquename()); } return Response.ok().build(); } throw new Exception("Form did not contain 'selections' parameter"); } catch (Exception e) { log.error("Cannot updates selections for query (" + queryName + ")", e); return Response.serverError().entity(e.getMessage()).status(Status.INTERNAL_SERVER_ERROR).build(); } }
From source file:org.commoncrawl.service.queryserver.master.S3Helper.java
public static ArcFileItem retrieveArcFileItem(ArchiveInfo archiveInfo, EventLoop eventLoop) throws IOException { // the default bucket id String bucketId = "commoncrawl-crawl-002"; //ok, see if we need to switch buckets if (archiveInfo.getCrawlNumber() == 1) { bucketId = "commoncrawl"; }/*from ww w . j a v a 2 s .co m*/ S3Downloader downloader = new S3Downloader(bucketId, "", "", false); // now activate the segment log ... final Semaphore downloadCompleteSemaphore = new Semaphore(0); final StreamingArcFileReader arcFileReader = new StreamingArcFileReader(false); //arcFileReader.setArcFileHasHeaderItemFlag(false); // create a buffer list we will append incoming content into ... final LinkedList<ByteBuffer> bufferList = new LinkedList<ByteBuffer>(); downloader.initialize(new S3Downloader.Callback() { @Override public boolean contentAvailable(int itemId, String itemKey, NIOBufferList contentBuffer) { LOG.info("ContentQuery contentAvailable called for Item:" + itemKey + " totalBytesAvailable:" + contentBuffer.available()); try { while (contentBuffer.available() != 0) { bufferList.add(contentBuffer.read()); } return true; } catch (IOException e) { LOG.error(CCStringUtils.stringifyException(e)); return false; } } @Override public void downloadComplete(int itemId, String itemKey) { LOG.info("S3 Download Complete for item:" + itemKey); downloadCompleteSemaphore.release(); } @Override public void downloadFailed(int itemId, String itemKey, String errorCode) { LOG.info("S3 Download Failed for item:" + itemKey); downloadCompleteSemaphore.release(); } @Override public boolean downloadStarting(int itemId, String itemKey, int contentLength) { LOG.info("ContentQuery DownloadStarting for Item:" + itemKey + " contentLength:" + contentLength); return true; } }, eventLoop); LOG.info("Starting request for Item:" + hdfsNameToS3ArcFileName(archiveInfo.getArcfileDate(), archiveInfo.getArcfileIndex()) + " Offset:" + archiveInfo.getArcfileOffset()); int sizeToRetrieve = (archiveInfo.getCompressedSize() != 0) ? archiveInfo.getCompressedSize() : 30000; sizeToRetrieve += 10; downloader.fetchPartialItem( hdfsNameToS3ArcFileName(archiveInfo.getArcfileDate(), archiveInfo.getArcfileIndex()), archiveInfo.getArcfileOffset() - 10, sizeToRetrieve); downloadCompleteSemaphore.acquireUninterruptibly(); if (bufferList.size() == 0) { return null; } ByteBuffer firstBuffer = bufferList.getFirst(); if (firstBuffer != null) { int offsetToGZIPHeader = scanForGZIPHeader(firstBuffer.duplicate()); if (offsetToGZIPHeader != -1) { firstBuffer.position(offsetToGZIPHeader); LOG.info("*** Offset to GZIP Header:" + offsetToGZIPHeader); } else { LOG.error("*** Failed to find GZIP Header offset"); } } // now try to decode content if possible for (ByteBuffer buffer : bufferList) { LOG.info("Adding Buffer of Size:" + buffer.remaining() + " Position:" + buffer.position() + " Limit:" + buffer.limit()); arcFileReader.available(buffer); } ArcFileItem item = arcFileReader.getNextItem(); if (item != null) { LOG.info("Request Returned item:" + item.getUri()); LOG.info("Uncompressed Size:" + item.getContent().getCount()); } return item; }
From source file:io.apptik.widget.MultiSlider.java
private Thumb getMostMovable(LinkedList<Thumb> thumbs, MotionEvent event) { Thumb res = null;/* www . j av a 2s .c om*/ int maxChange = 0; if (thumbs != null && !thumbs.isEmpty()) { if (thumbs.getFirst().getValue() == getValue(event, thumbs.getFirst())) return null; for (Thumb thumb : thumbs) { int optValue = (getValue(event, thumbs.getFirst()) > thumb.getValue()) ? mScaleMax : mScaleMin; int currChange = Math.abs(thumb.getValue() - optThumbValue(thumb, optValue)); if (currChange > maxChange) { maxChange = currChange; res = thumb; } } } return res; }
From source file:org.apache.fop.layoutmgr.list.ListItemLayoutManager.java
/** * Add the areas for the break points./*from ww w .j a v a 2s. c o m*/ * * @param parentIter the position iterator * @param layoutContext the layout context for adding areas */ @Override public void addAreas(PositionIterator parentIter, LayoutContext layoutContext) { getParentArea(null); addId(); LayoutContext lc = new LayoutContext(0); Position firstPos = null; Position lastPos = null; // "unwrap" the NonLeafPositions stored in parentIter LinkedList<Position> positionList = new LinkedList<Position>(); Position pos; while (parentIter.hasNext()) { pos = parentIter.next(); if (pos.getIndex() >= 0) { if (firstPos == null) { firstPos = pos; } lastPos = pos; } if (pos instanceof NonLeafPosition && pos.getPosition() != null) { // pos contains a ListItemPosition created by this ListBlockLM positionList.add(pos.getPosition()); } } addMarkersToPage(true, isFirst(firstPos), isLast(lastPos)); // use the first and the last ListItemPosition to determine the // corresponding indexes in the original labelList and bodyList int labelFirstIndex = ((ListItemPosition) positionList.getFirst()).getLabelFirstIndex(); int labelLastIndex = ((ListItemPosition) positionList.getLast()).getLabelLastIndex(); int bodyFirstIndex = ((ListItemPosition) positionList.getFirst()).getBodyFirstIndex(); int bodyLastIndex = ((ListItemPosition) positionList.getLast()).getBodyLastIndex(); //Determine previous break if any (in item label list) int previousBreak = ElementListUtils.determinePreviousBreak(labelList, labelFirstIndex); SpaceResolver.performConditionalsNotification(labelList, labelFirstIndex, labelLastIndex, previousBreak); //Determine previous break if any (in item body list) previousBreak = ElementListUtils.determinePreviousBreak(bodyList, bodyFirstIndex); SpaceResolver.performConditionalsNotification(bodyList, bodyFirstIndex, bodyLastIndex, previousBreak); // add label areas if (labelFirstIndex <= labelLastIndex) { KnuthPossPosIter labelIter = new KnuthPossPosIter(labelList, labelFirstIndex, labelLastIndex + 1); lc.setFlags(LayoutContext.FIRST_AREA, layoutContext.isFirstArea()); lc.setFlags(LayoutContext.LAST_AREA, layoutContext.isLastArea()); // set the space adjustment ratio lc.setSpaceAdjust(layoutContext.getSpaceAdjust()); // TO DO: use the right stack limit for the label lc.setStackLimitBP(layoutContext.getStackLimitBP()); label.addAreas(labelIter, lc); } // add body areas if (bodyFirstIndex <= bodyLastIndex) { KnuthPossPosIter bodyIter = new KnuthPossPosIter(bodyList, bodyFirstIndex, bodyLastIndex + 1); lc.setFlags(LayoutContext.FIRST_AREA, layoutContext.isFirstArea()); lc.setFlags(LayoutContext.LAST_AREA, layoutContext.isLastArea()); // set the space adjustment ratio lc.setSpaceAdjust(layoutContext.getSpaceAdjust()); // TO DO: use the right stack limit for the body lc.setStackLimitBP(layoutContext.getStackLimitBP()); body.addAreas(bodyIter, lc); } // after adding body areas, set the maximum area bpd int childCount = curBlockArea.getChildAreas().size(); assert childCount >= 1 && childCount <= 2; int itemBPD = ((Block) curBlockArea.getChildAreas().get(0)).getAllocBPD(); if (childCount == 2) { itemBPD = Math.max(itemBPD, ((Block) curBlockArea.getChildAreas().get(1)).getAllocBPD()); } curBlockArea.setBPD(itemBPD); addMarkersToPage(false, isFirst(firstPos), isLast(lastPos)); // We are done with this area add the background TraitSetter.addBackground(curBlockArea, getListItemFO().getCommonBorderPaddingBackground(), this); TraitSetter.addSpaceBeforeAfter(curBlockArea, layoutContext.getSpaceAdjust(), effSpaceBefore, effSpaceAfter); flush(); curBlockArea = null; resetSpaces(); checkEndOfLayout(lastPos); }
From source file:org.lambdamatic.analyzer.LambdaExpressionAnalyzer.java
/** * Simplify the given {@link Statement} keeping all branches that end with a "return 1" node, and * combining the remaining ones in an {@link CompoundExpression}. * /* w w w. ja v a 2 s .c om*/ * @param statement the statement to thin out * @return the resulting "thined out" {@link Statement} */ private static Statement thinOut(final Statement statement) { LOGGER.debug("About to simplify \n\t{}", NodeUtils.prettyPrint(statement)); if (statement.getStatementType() == StatementType.EXPRESSION_STMT) { return statement; } else { // find branches that end with 'return 1' final ReturnTruePathFilter filter = new ReturnTruePathFilter(); statement.accept(filter); final List<ReturnStatement> returnStmts = filter.getReturnStmts(); final List<Expression> expressions = new ArrayList<>(); for (ReturnStatement returnStmt : returnStmts) { final LinkedList<Expression> relevantExpressions = new LinkedList<>(); // current node being evaluated Statement currentStmt = returnStmt; // previous node evaluated, because it is important to remember // the path that was taken (in case of ConditionalStatements) Statement previousStmt = null; while (currentStmt != null) { switch (currentStmt.getStatementType()) { case CONTROL_FLOW_STMT: final ControlFlowStatement controlFlowStatement = (ControlFlowStatement) currentStmt; final Expression controlFlowExpression = controlFlowStatement.getControlFlowExpression(); // if we come from the "eval true" path on this // condition if (controlFlowStatement.getThenStatements().contains(previousStmt)) { relevantExpressions.add(0, controlFlowExpression); } else { relevantExpressions.add(0, controlFlowExpression.inverse()); } break; case RETURN_STMT: final Expression returnExpression = ((ReturnStatement) currentStmt).getExpression(); if (returnExpression.getExpressionType() == ExpressionType.METHOD_INVOCATION) { relevantExpressions.add(0, returnExpression); } break; default: LOGGER.trace("Ignoring node '{}'", currentStmt); break; } previousStmt = currentStmt; currentStmt = currentStmt.getParent(); } if (relevantExpressions.size() > 1) { expressions.add(new CompoundExpression(CompoundExpressionOperator.CONDITIONAL_AND, relevantExpressions)); } else if (!relevantExpressions.isEmpty()) { expressions.add(relevantExpressions.getFirst()); } } if (expressions.isEmpty()) { return statement; } final Statement result = (expressions.size() > 1) ? new ReturnStatement( new CompoundExpression(CompoundExpressionOperator.CONDITIONAL_OR, expressions)) : new ReturnStatement(expressions.get(0)); LOGGER.debug("Thinned out expression: {}", result.toString()); return result; } }
From source file:eu.stratosphere.nephele.multicast.MulticastManager.java
/** * Reads a hard-coded tree topology from file and creates a tree according to the hard-coded * topology from the file./* w w w. ja v a2 s . c o m*/ * * @param nodes * @return */ private MulticastForwardingTable createHardCodedTree(LinkedList<TreeNode> nodes) { try { FileInputStream fstream = new FileInputStream(this.hardCodedTreeFilePath); DataInputStream in = new DataInputStream(fstream); BufferedReader br = new BufferedReader(new InputStreamReader(in)); String strLine; while ((strLine = br.readLine()) != null) { String[] values = strLine.split(" "); String actualhostname = values[0]; for (TreeNode n : nodes) { if (n.toString().equals(actualhostname)) { // we found the node.. connect the children for (int i = 1; i < values.length; i++) { for (TreeNode childnode : nodes) { if (childnode.toString().equals(values[i])) { n.addChild(childnode); } } } } } } br.close(); // First node is root.. create tree. easy return nodes.getFirst().createForwardingTable(); } catch (Exception e) { System.out.println("Error reading hard-coded topology file for multicast tree: " + e.getMessage()); return null; } }
From source file:net.jenet.Host.java
Event dispatchIncomingCommands() { Event result = new Event(); Peer currentPeer;//from w w w .j av a2s . c o m LinkedList<Peer> peersList = new LinkedList<Peer>(peers.values()); if (peers.size() == 0) return result; /* * Simply calling containsKey( lastServicedPeer.getIncomingPeerId() ) will * not be sufficient because the peerID of lastServicedPeer may have been * reassigned. The get operation is quicker than containsValue because * it does not have to search through all the peers. * * lastServicedPeer.isDisconnected() may be sufficient, but this feels more robust. */ if (lastServicedPeer == null || peers.get(lastServicedPeer.getIncomingPeerID()) != lastServicedPeer) lastServicedPeer = peersList.getFirst(); else while (peersList.getLast() != lastServicedPeer) peersList.addLast(peersList.removeFirst()); do { currentPeer = peersList.removeFirst(); peersList.addLast(currentPeer); if (currentPeer.isZombie()) { recalculateBandwithLimits = true; currentPeer.reset(); result.setType(Event.TYPE.DISCONNECTED); result.setPeer(currentPeer); lastServicedPeer = currentPeer; return result; } if (!currentPeer.isConnected()) continue; for (byte channelID : currentPeer.getChannels().keySet()) { Channel channel = currentPeer.getChannels().get(channelID); if (channel.getIncomingReliableCommands().isEmpty() && channel.getIncomingUnreliableCommands().isEmpty()) continue; Packet packet = currentPeer.receive(channelID); result.setPacket(packet); if (packet == null) continue; result.setType(Event.TYPE.RECEIVED); result.setPeer(currentPeer); result.setChannelID(channelID); result.setPacket(packet); lastServicedPeer = currentPeer; return result; } } while (currentPeer != lastServicedPeer); return result; }
From source file:com.hp.alm.ali.idea.services.EntityService.java
public void requestCachedEntity(final EntityRef ref, final List<String> properties, final EntityListener callback) { ApplicationUtil.executeOnPooledThread(new Runnable() { public void run() { final LinkedList<Entity> done = new LinkedList<Entity>(); listeners.fire(new WeakListeners.Action<EntityListener>() { public void fire(EntityListener listener) { if (done.isEmpty() && listener instanceof CachingEntityListener) { Entity cached = ((CachingEntityListener) listener).lookup(ref); if (cached != null) { for (String property : properties) { if (!cached.isInitialized(property)) { return; }/*from www. j av a 2 s . c o m*/ } done.add(cached); } } } }); if (done.isEmpty()) { // all properties are fetched. possible optimization is to request only properties from the // current request + properties initialized in cached value (if any) getEntityAsync(ref, callback); } else { callback.entityLoaded(done.getFirst(), EntityListener.Event.CACHE); } } }); }
From source file:net.relet.freimap.LinkInfo.java
public void setFlowProfile(LinkedList<FlowData> lp) { XYSeries packets = new XYSeries("packets"); XYSeries bytes = new XYSeries("bytes"); XYSeries icmp = new XYSeries("icmp"); XYSeries tcp = new XYSeries("tcp"); XYSeries udp = new XYSeries("udp"); XYSeries other = new XYSeries("other"); XYSeriesCollection data1 = new XYSeriesCollection(bytes); XYSeriesCollection data2 = new XYSeriesCollection(packets); XYSeriesCollection data3 = new XYSeriesCollection(icmp); data3.addSeries(tcp);/*w ww . j ava 2 s . com*/ data3.addSeries(udp); data3.addSeries(other); //linkChart = ChartFactory.createXYLineChart("packets, bytes\r\nicmp, tcp, udp other", "time", "count", data1, PlotOrientation.VERTICAL, false, false, false); ValueAxis domain = new DateAxis(); ValueAxis range1 = new NumberAxis(); ValueAxis range2 = new NumberAxis(); ValueAxis range3 = new NumberAxis(); CombinedDomainXYPlot plot = new CombinedDomainXYPlot(domain); plot.add(new XYPlot(data2, domain, range1, new XYLineAndShapeRenderer(true, false))); plot.add(new XYPlot(data1, domain, range2, new XYLineAndShapeRenderer(true, false))); plot.add(new XYPlot(data3, domain, range1, new XYLineAndShapeRenderer(true, false))); linkChart = new JFreeChart(plot); linkChart.setTitle(""); sexupLayout(linkChart); long min = lp.getFirst().begin, max = lp.getLast().end; for (float i = 0.0f; i < 1000.0f; i += 1.0f) { long cur = min + (long) ((max - min) * (i / 1000.0)); long cpackets = 0; long cbytes = 0; long cicmp = 0; long ctcp = 0; long cudp = 0; long cother = 0; Iterator<FlowData> li = lp.iterator(); while (li.hasNext()) { FlowData data = li.next(); if (data.begin > cur) break; if (data.end < cur) continue; cpackets += data.packets; cbytes += data.bytes; switch (data.protocol) { case 1: { cicmp += data.packets; break; } case 6: { ctcp += data.packets; break; } case 17: { cudp += data.packets; break; } default: { cother += data.packets; break; } } } packets.add(cur, cpackets); bytes.add(cur, cbytes); icmp.add(cur, cicmp); tcp.add(cur, ctcp); udp.add(cur, cudp); other.add(cur, cother); } status = STATUS_AVAILABLE; }
From source file:com.hipu.bdb.util.FileUtils.java
/** * Retrieve a number of lines from the file around the given * position, as when paging forward or backward through a file. * //from w w w. j a v a2s . c om * @param file File to retrieve lines * @param position offset to anchor lines * @param signedDesiredLineCount lines requested; if negative, * want this number of lines ending with a line containing * the position; if positive, want this number of lines, * all starting at or after position. * @param lines List<String> to insert found lines * @param lineEstimate int estimate of line size, 0 means use default * of 128 * @return LongRange indicating the file offsets corresponding to * the beginning of the first line returned, and the point * after the end of the last line returned * @throws IOException */ @SuppressWarnings("unchecked") public static LongRange pagedLines(File file, long position, int signedDesiredLineCount, List<String> lines, int lineEstimate) throws IOException { // consider negative positions as from end of file; -1 = last byte if (position < 0) { position = file.length() + position; } // calculate a reasonably sized chunk likely to have all desired lines if (lineEstimate == 0) { lineEstimate = 128; } int desiredLineCount = Math.abs(signedDesiredLineCount); long startPosition; long fileEnd = file.length(); int bufferSize = (desiredLineCount + 5) * lineEstimate; if (signedDesiredLineCount > 0) { // reading forward; include previous char in case line-end startPosition = position - 1; } else { // reading backward startPosition = position - bufferSize + (2 * lineEstimate); } if (startPosition < 0) { startPosition = 0; } if (startPosition + bufferSize > fileEnd) { bufferSize = (int) (fileEnd - startPosition); } // read that reasonable chunk FileInputStream fis = new FileInputStream(file); fis.getChannel().position(startPosition); byte[] buf = new byte[bufferSize]; IOUtils.closeQuietly(fis); // find all line starts fully in buffer // (positions after a line-end, per line-end definition in // BufferedReader.readLine) LinkedList<Integer> lineStarts = new LinkedList<Integer>(); if (startPosition == 0) { lineStarts.add(0); } boolean atLineEnd = false; boolean eatLF = false; int i; for (i = 0; i < bufferSize; i++) { if ((char) buf[i] == '\n' && eatLF) { eatLF = false; continue; } if (atLineEnd) { atLineEnd = false; lineStarts.add(i); if (signedDesiredLineCount < 0 && startPosition + i > position) { // reached next line past position, read no more break; } } if ((char) buf[i] == '\r') { atLineEnd = true; eatLF = true; continue; } if ((char) buf[i] == '\n') { atLineEnd = true; } } if (startPosition + i == fileEnd) { // add phantom lineStart after end lineStarts.add(bufferSize); } int foundFullLines = lineStarts.size() - 1; // if found no lines if (foundFullLines < 1) { if (signedDesiredLineCount > 0) { if (startPosition + bufferSize == fileEnd) { // nothing more to read: return nothing return new LongRange(fileEnd, fileEnd); } else { // retry with larger lineEstimate return pagedLines(file, position, signedDesiredLineCount, lines, Math.max(bufferSize, lineEstimate)); } } else { // try again with much larger line estimate // TODO: fail gracefully before growing to multi-MB buffers return pagedLines(file, position, signedDesiredLineCount, lines, bufferSize); } } // trim unneeded lines while (signedDesiredLineCount > 0 && startPosition + lineStarts.getFirst() < position) { // discard lines starting before desired position lineStarts.removeFirst(); } while (lineStarts.size() > desiredLineCount + 1) { if (signedDesiredLineCount < 0 && (startPosition + lineStarts.get(1) <= position)) { // discard from front until reach line containing target position lineStarts.removeFirst(); } else { lineStarts.removeLast(); } } int firstLine = lineStarts.getFirst(); int partialLine = lineStarts.getLast(); LongRange range = new LongRange(startPosition + firstLine, startPosition + partialLine); List<String> foundLines = IOUtils .readLines(new ByteArrayInputStream(buf, firstLine, partialLine - firstLine)); if (foundFullLines < desiredLineCount && signedDesiredLineCount < 0 && startPosition > 0) { // if needed and reading backward, read more lines from earlier range = expandRange(range, pagedLines(file, range.getMinimumLong() - 1, signedDesiredLineCount + foundFullLines, lines, bufferSize / foundFullLines)); } lines.addAll(foundLines); if (signedDesiredLineCount < 0 && range.getMaximumLong() < position) { // did not get line containining start position range = expandRange(range, pagedLines(file, partialLine, 1, lines, bufferSize / foundFullLines)); } if (signedDesiredLineCount > 0 && foundFullLines < desiredLineCount && range.getMaximumLong() < fileEnd) { // need more forward lines range = expandRange(range, pagedLines(file, range.getMaximumLong(), desiredLineCount - foundFullLines, lines, bufferSize / foundFullLines)); } return range; }