List of usage examples for java.util ListIterator previous
E previous();
From source file:com.robonobo.eon.SEONConnection.java
/** All packets in the retrans queue are regarded as lost */ private void markInTransitPktsAsLost() { // Add everything in retransQ to lostQ, inserting at the correct places - both lists are already sorted so this // isn't too bad ListIterator<SEONPacket> lostIter = lostQ.listIterator(); SEONPacket curLostPkt = lostIter.hasNext() ? lostIter.next() : null; while (retransQ.size() > 0) { SEONPacket newPkt = retransQ.removeFirst(); while (true) { if (curLostPkt == null) { lostQ.addLast(newPkt);// w w w .ja va2s . c o m break; } // If our newly-added packet comes before this one, add it // before if (mod.lt(newPkt.getSequenceNumber(), curLostPkt.getSequenceNumber())) { lostIter.previous(); lostIter.add(newPkt); lostIter.next(); break; } curLostPkt = lostIter.hasNext() ? lostIter.next() : null; } } }
From source file:com.emc.ecs.sync.target.S3Target.java
@Override public void filter(SyncObject obj) { try {/*w w w. j a v a2s .c o m*/ // skip the root of the bucket since it obviously exists if ("".equals(rootKey + obj.getRelativePath())) { log.debug("Target is bucket root; skipping"); return; } // some sync objects lazy-load their metadata (i.e. AtmosSyncObject) // since this may be a timed operation, ensure it loads outside of other timed operations if (!(obj instanceof S3ObjectVersion) || !((S3ObjectVersion) obj).isDeleteMarker()) obj.getMetadata(); // Compute target key String targetKey = getTargetKey(obj); obj.setTargetIdentifier(AwsS3Util.fullPath(bucketName, targetKey)); if (includeVersions) { ListIterator<S3ObjectVersion> sourceVersions = s3Source.versionIterator((S3SyncObject) obj); ListIterator<S3ObjectVersion> targetVersions = versionIterator(obj); boolean newVersions = false, replaceVersions = false; if (force) { replaceVersions = true; } else { // special workaround for bug where objects are listed, but they have no versions if (sourceVersions.hasNext()) { // check count and etag/delete-marker to compare version chain while (sourceVersions.hasNext()) { S3ObjectVersion sourceVersion = sourceVersions.next(); if (targetVersions.hasNext()) { S3ObjectVersion targetVersion = targetVersions.next(); if (sourceVersion.isDeleteMarker()) { if (!targetVersion.isDeleteMarker()) replaceVersions = true; } else { if (targetVersion.isDeleteMarker()) replaceVersions = true; else if (!sourceVersion.getETag().equals(targetVersion.getETag())) replaceVersions = true; // different checksum } } else if (!replaceVersions) { // source has new versions, but existing target versions are ok newVersions = true; sourceVersions.previous(); // back up one putIntermediateVersions(sourceVersions, targetKey); // add any new intermediary versions (current is added below) } } if (targetVersions.hasNext()) replaceVersions = true; // target has more versions if (!newVersions && !replaceVersions) { log.info("Source and target versions are the same. Skipping {}", obj.getRelativePath()); return; } } } // something's off; must delete all versions of the object if (replaceVersions) { log.info( "[{}]: version history differs between source and target; re-placing target version history with that from source.", obj.getRelativePath()); // collect versions in target List<DeleteObjectsRequest.KeyVersion> deleteVersions = new ArrayList<>(); while (targetVersions.hasNext()) targetVersions.next(); // move cursor to end while (targetVersions.hasPrevious()) { // go in reverse order S3ObjectVersion version = targetVersions.previous(); deleteVersions.add(new DeleteObjectsRequest.KeyVersion(targetKey, version.getVersionId())); } // batch delete all versions in target log.debug("[{}]: deleting all versions in target", obj.getRelativePath()); s3.deleteObjects(new DeleteObjectsRequest(bucketName).withKeys(deleteVersions)); // replay version history in target while (sourceVersions.hasPrevious()) sourceVersions.previous(); // move cursor to beginning putIntermediateVersions(sourceVersions, targetKey); } } else { // normal sync (no versions) Date sourceLastModified = obj.getMetadata().getModificationTime(); long sourceSize = obj.getMetadata().getContentLength(); // Get target metadata. ObjectMetadata destMeta = null; try { destMeta = s3.getObjectMetadata(bucketName, targetKey); } catch (AmazonS3Exception e) { if (e.getStatusCode() != 404) throw new RuntimeException("Failed to check target key '" + targetKey + "' : " + e, e); } if (!force && obj.getFailureCount() == 0 && destMeta != null) { // Check overwrite Date destLastModified = destMeta.getLastModified(); long destSize = destMeta.getContentLength(); if (destLastModified.equals(sourceLastModified) && sourceSize == destSize) { log.info("Source and target the same. Skipping {}", obj.getRelativePath()); return; } if (destLastModified.after(sourceLastModified)) { log.info("Target newer than source. Skipping {}", obj.getRelativePath()); return; } } } // at this point we know we are going to write the object // Put [current object version] if (obj instanceof S3ObjectVersion && ((S3ObjectVersion) obj).isDeleteMarker()) { // object has version history, but is currently deleted log.debug("[{}]: deleting object in target to replicate delete marker in source.", obj.getRelativePath()); s3.deleteObject(bucketName, targetKey); } else { putObject(obj, targetKey); // if object has new metadata after the stream (i.e. encryption checksum), we must update S3 again if (obj.requiresPostStreamMetadataUpdate()) { log.debug("[{}]: updating metadata after sync as required", obj.getRelativePath()); CopyObjectRequest cReq = new CopyObjectRequest(bucketName, targetKey, bucketName, targetKey); cReq.setNewObjectMetadata(AwsS3Util.s3MetaFromSyncMeta(obj.getMetadata())); s3.copyObject(cReq); } } } catch (Exception e) { throw new RuntimeException("Failed to store object: " + e, e); } }
From source file:org.rifidi.emulator.reader.thingmagic.commandobjects.UpdateCommand.java
public UpdateCommand(String command, ThingMagicReaderSharedResources tmsr) throws CommandCreationException { // TODO Auto-generated constructor stub this.command = command; this.tmsr = tmsr; List<String> tokens = tokenizer(command); logger.debug(tokens);/*from w w w . ja va 2 s. co m*/ ListIterator<String> tokenIterator = tokens.listIterator(); String token = tokenIterator.next(); if (!token.equals("update")) throw new CommandCreationException("Error 0100: syntax error at '" + token + "'"); try { token = tokenIterator.next(); if (!token.matches(WHITE_SPACE)) throw new CommandCreationException("Error 0100: syntax error at '" + token + "'"); token = tokenIterator.next(); table = token; if (!table.matches(A_WORD)) throw new CommandCreationException("Error 0100: syntax error at '" + table + "'"); token = tokenIterator.next(); if (!token.matches(WHITE_SPACE)) throw new CommandCreationException("Error 0100: syntax error at '" + token + "'"); token = tokenIterator.next(); if (!token.equals("set")) throw new CommandCreationException("Error 0100: syntax error at '" + token + "'"); token = tokenIterator.next(); parseKeyValuePairs(tokenIterator); // check if the command correctly ends in a semicolon if (tokenIterator.hasNext()) { token = tokenIterator.next(); if (token.matches(WHITE_SPACE)) { token = tokenIterator.next(); } if (!token.equals(";")) { throw new CommandCreationException("Error 0100: syntax error at '" + token + "'"); } } else { throw new CommandCreationException("Error 0100: syntax error at '\n'"); } } catch (NoSuchElementException e) { /* * if we get here... we run out of tokens prematurely... Our job now * is to walk backwards to find the last non space tokens and throw * an exception saying that there is an syntax error at that point. */ /* * look for the last offending command block that is not a series of * whitespaces. */ token = tokenIterator.previous(); while (token.matches(WHITE_SPACE)) { token = tokenIterator.previous(); } logger.debug("Premature end of token list detected."); throw new CommandCreationException("Error 0100: syntax error at '" + token + "'"); } IDBTable tableImpl = tmsr.getDataBase().getTable(table); if (tableImpl == null) { throw new CommandCreationException("Error 0100: syntax error at '" + table + "'"); } for (int x = 0; x < tableImpl.size(); x++) { IDBRow row = tableImpl.get(x); for (String column : keyValuePairs.keySet()) { if (!row.containsColumn(column)) { throw new CommandCreationException("Error 0100: Unknown " + column); } // if (!row.isReadable(column)) { // throw new CommandCreationExeption( // "Error 0100: Could not read from '" + column // + "' in '" + table + "'"); // } } } }
From source file:com.robonobo.eon.SEONConnection.java
private void storePktForLater(SEONPacket pkt) throws EONException { // If the packet is after our last one, put it at the end, otherwise // search where to put it if (recvdPkts.size() == 0) recvdPkts.add(pkt);/*from w w w . java 2 s .c om*/ else if (mod.gt(pkt.getSequenceNumber(), recvdPkts.getLast().getSequenceNumber())) recvdPkts.addLast(pkt); else { // TODO If this is still too slow, we could cut out the iterator // creation by creating a custom list class with a resettable // iterator... ListIterator<SEONPacket> iter = recvdPkts.listIterator(); while (iter.hasNext()) { SEONPacket itPkt = iter.next(); if (pkt.getSequenceNumber() == itPkt.getSequenceNumber()) { if (itPkt.getPayloadSize() == 0) { // This is a data pkt coming after a bare ack - replace it with the data pkt iter.remove(); iter.add(pkt); } // Otherwise this is just a duplicate pkt - ignore break; } else if (mod.lt(pkt.getSequenceNumber(), itPkt.getSequenceNumber())) { // Insert the pkt *before* the current pkt iter.previous(); iter.add(pkt); break; } } } }
From source file:com.emc.ecs.sync.target.EcsS3Target.java
@Override public void filter(SyncObject obj) { try {//from w w w. j a va 2s . c o m // skip the root of the bucket since it obviously exists if ("".equals(rootKey + obj.getRelativePath())) { log.debug("Target is bucket root; skipping"); return; } // some sync objects lazy-load their metadata (i.e. AtmosSyncObject) // since this may be a timed operation, ensure it loads outside of other timed operations if (!(obj instanceof EcsS3ObjectVersion) || !((EcsS3ObjectVersion) obj).isDeleteMarker()) obj.getMetadata(); // Compute target key final String targetKey = getTargetKey(obj); obj.setTargetIdentifier(AwsS3Util.fullPath(bucketName, targetKey)); if (includeVersions) { ListIterator<EcsS3ObjectVersion> sourceVersions = s3Source.versionIterator((EcsS3SyncObject) obj); ListIterator<EcsS3ObjectVersion> targetVersions = versionIterator(obj); boolean newVersions = false, replaceVersions = false; if (force) { replaceVersions = true; } else { // special workaround for bug where objects are listed, but they have no versions if (sourceVersions.hasNext()) { // check count and etag/delete-marker to compare version chain while (sourceVersions.hasNext()) { EcsS3ObjectVersion sourceVersion = sourceVersions.next(); if (targetVersions.hasNext()) { EcsS3ObjectVersion targetVersion = targetVersions.next(); if (sourceVersion.isDeleteMarker()) { if (!targetVersion.isDeleteMarker()) replaceVersions = true; } else { if (targetVersion.isDeleteMarker()) replaceVersions = true; else if (!sourceVersion.getETag().equals(targetVersion.getETag())) replaceVersions = true; // different checksum } } else if (!replaceVersions) { // source has new versions, but existing target versions are ok newVersions = true; sourceVersions.previous(); // back up one putIntermediateVersions(sourceVersions, targetKey); // add any new intermediary versions (current is added below) } } if (targetVersions.hasNext()) replaceVersions = true; // target has more versions if (!newVersions && !replaceVersions) { log.info("Source and target versions are the same. Skipping {}", obj.getRelativePath()); return; } } } // something's off; must delete all versions of the object if (replaceVersions) { log.info( "[{}]: version history differs between source and target; re-placing target version history with that from source.", obj.getRelativePath()); // collect versions in target final List<ObjectKey> deleteVersions = new ArrayList<>(); while (targetVersions.hasNext()) targetVersions.next(); // move cursor to end while (targetVersions.hasPrevious()) { // go in reverse order EcsS3ObjectVersion version = targetVersions.previous(); deleteVersions.add(new ObjectKey(targetKey, version.getVersionId())); } // batch delete all versions in target log.debug("[{}]: deleting all versions in target", obj.getRelativePath()); time(new Function<Void>() { @Override public Void call() { s3.deleteObjects(new DeleteObjectsRequest(bucketName).withKeys(deleteVersions)); return null; } }, OPERATION_DELETE_VERSIONS); // replay version history in target while (sourceVersions.hasPrevious()) sourceVersions.previous(); // move cursor to beginning putIntermediateVersions(sourceVersions, targetKey); } } else { // normal sync (no versions) Date sourceLastModified = obj.getMetadata().getModificationTime(); long sourceSize = obj.getMetadata().getContentLength(); // Get target metadata. S3ObjectMetadata destMeta = null; try { destMeta = time(new Function<S3ObjectMetadata>() { @Override public S3ObjectMetadata call() { return s3.getObjectMetadata(bucketName, targetKey); } }, OPERATION_GET_METADATA); } catch (S3Exception e) { if (e.getHttpCode() != 404) { throw new RuntimeException("Failed to check target key '" + targetKey + "' : " + e, e); } } if (!force && obj.getFailureCount() == 0 && destMeta != null) { // Check overwrite Date destLastModified = destMeta.getLastModified(); long destSize = destMeta.getContentLength(); if (destLastModified.equals(sourceLastModified) && sourceSize == destSize) { log.info("Source and target the same. Skipping {}", obj.getRelativePath()); return; } if (destLastModified.after(sourceLastModified)) { log.info("Target newer than source. Skipping {}", obj.getRelativePath()); return; } } } // at this point we know we are going to write the object // Put [current object version] if (obj instanceof S3ObjectVersion && ((S3ObjectVersion) obj).isDeleteMarker()) { // object has version history, but is currently deleted log.debug("[{}]: deleting object in target to replicate delete marker in source.", obj.getRelativePath()); time(new Function<Void>() { @Override public Void call() { s3.deleteObject(bucketName, targetKey); return null; } }, OPERATION_DELETE_OBJECT); } else { putObject(obj, targetKey); // if object has new metadata after the stream (i.e. encryption checksum), we must update S3 again if (obj.requiresPostStreamMetadataUpdate()) { log.debug("[{}]: updating metadata after sync as required", obj.getRelativePath()); final CopyObjectRequest cReq = new CopyObjectRequest(bucketName, targetKey, bucketName, targetKey); cReq.setObjectMetadata(EcsS3Util.s3MetaFromSyncMeta(obj.getMetadata())); time(new Function<Void>() { @Override public Void call() { s3.copyObject(cReq); return null; } }, OPERATION_UPDATE_METADATA); } } } catch (Exception e) { throw new RuntimeException("Failed to store object: " + e, e); } }
From source file:org.apache.fop.layoutmgr.AbstractBreaker.java
/** * Justifies the boxes and returns them as a new KnuthSequence. * @param blockList block list to justify * @param alg reference to the algorithm instance * @param availableBPD the available BPD * @return the effective list//from w ww . j a v a 2 s .c om */ private BlockSequence justifyBoxes // CSOK: MethodLength (BlockSequence blockList, PageBreakingAlgorithm alg, int availableBPD) { int optimalPageCount; alg.setConstantLineWidth(availableBPD); optimalPageCount = alg.findBreakingPoints(blockList, /*availableBPD,*/ 1, true, BreakingAlgorithm.ALL_BREAKS); log.debug("PLM> optimalPageCount= " + optimalPageCount); // ListIterator<KnuthElement> sequenceIterator = blockList.listIterator(); ListIterator<PageBreakPosition> breakIterator = alg.getPageBreaks().listIterator(); KnuthElement thisElement = null; PageBreakPosition thisBreak; int adjustedDiff; // difference already adjusted while (breakIterator.hasNext()) { thisBreak = breakIterator.next(); if (log.isDebugEnabled()) { log.debug("| first page: break= " + thisBreak.getLeafPos() + " difference= " + thisBreak.difference + " ratio= " + thisBreak.bpdAdjust); } adjustedDiff = 0; // glue and penalty items at the beginning of the page must // be ignored: // the first element returned by sequenceIterator.next() // inside the // while loop must be a box KnuthElement firstElement; while (sequenceIterator.hasNext()) { firstElement = sequenceIterator.next(); if (!firstElement.isBox()) { log.debug("PLM> ignoring glue or penalty element " + "at the beginning of the sequence"); if (firstElement.isGlue()) { ((BlockLevelLayoutManager) firstElement.getLayoutManager()) .discardSpace((KnuthGlue) firstElement); } } else { break; } } sequenceIterator.previous(); // scan the sub-sequence representing a page, // collecting information about potential adjustments MinOptMax lineNumberMaxAdjustment = MinOptMax.ZERO; MinOptMax spaceMaxAdjustment = MinOptMax.ZERO; LinkedList<KnuthGlue> blockSpacesList = new LinkedList<KnuthGlue>(); LinkedList<KnuthGlue> unconfirmedList = new LinkedList<KnuthGlue>(); LinkedList<KnuthGlue> adjustableLinesList = new LinkedList<KnuthGlue>(); boolean bBoxSeen = false; while (sequenceIterator.hasNext() && sequenceIterator.nextIndex() <= thisBreak.getLeafPos()) { thisElement = sequenceIterator.next(); if (thisElement.isGlue()) { // glue elements are used to represent adjustable // lines // and adjustable spaces between blocks KnuthGlue thisGlue = (KnuthGlue) thisElement; Adjustment adjustment = thisGlue.getAdjustmentClass(); if (adjustment.equals(Adjustment.SPACE_BEFORE_ADJUSTMENT) || adjustment.equals(Adjustment.SPACE_AFTER_ADJUSTMENT)) { // potential space adjustment // glue items before the first box or after the // last one // must be ignored unconfirmedList.add(thisGlue); } else if (adjustment.equals(Adjustment.LINE_NUMBER_ADJUSTMENT)) { // potential line number adjustment lineNumberMaxAdjustment = lineNumberMaxAdjustment.plusMax(thisElement.getStretch()); lineNumberMaxAdjustment = lineNumberMaxAdjustment.minusMin(thisElement.getShrink()); adjustableLinesList.add(thisGlue); } else if (adjustment.equals(Adjustment.LINE_HEIGHT_ADJUSTMENT)) { // potential line height adjustment } } else if (thisElement.isBox()) { if (!bBoxSeen) { // this is the first box met in this page bBoxSeen = true; } else { while (!unconfirmedList.isEmpty()) { // glue items in unconfirmedList were not after // the last box // in this page; they must be added to // blockSpaceList KnuthGlue blockSpace = unconfirmedList.removeFirst(); spaceMaxAdjustment = spaceMaxAdjustment.plusMax(blockSpace.getStretch()); spaceMaxAdjustment = spaceMaxAdjustment.minusMin(blockSpace.getShrink()); blockSpacesList.add(blockSpace); } } } } log.debug("| line number adj= " + lineNumberMaxAdjustment); log.debug("| space adj = " + spaceMaxAdjustment); if (thisElement.isPenalty() && thisElement.getWidth() > 0) { log.debug(" mandatory variation to the number of lines!"); ((BlockLevelLayoutManager) thisElement.getLayoutManager()) .negotiateBPDAdjustment(thisElement.getWidth(), thisElement); } if (thisBreak.bpdAdjust != 0 && (thisBreak.difference > 0 && thisBreak.difference <= spaceMaxAdjustment.getMax()) || (thisBreak.difference < 0 && thisBreak.difference >= spaceMaxAdjustment.getMin())) { // modify only the spaces between blocks adjustedDiff += adjustBlockSpaces(blockSpacesList, thisBreak.difference, (thisBreak.difference > 0 ? spaceMaxAdjustment.getMax() : -spaceMaxAdjustment.getMin())); log.debug("single space: " + (adjustedDiff == thisBreak.difference || thisBreak.bpdAdjust == 0 ? "ok" : "ERROR")); } else if (thisBreak.bpdAdjust != 0) { adjustedDiff += adjustLineNumbers(adjustableLinesList, thisBreak.difference, (thisBreak.difference > 0 ? lineNumberMaxAdjustment.getMax() : -lineNumberMaxAdjustment.getMin())); adjustedDiff += adjustBlockSpaces(blockSpacesList, thisBreak.difference - adjustedDiff, ((thisBreak.difference - adjustedDiff) > 0 ? spaceMaxAdjustment.getMax() : -spaceMaxAdjustment.getMin())); log.debug("lines and space: " + (adjustedDiff == thisBreak.difference || thisBreak.bpdAdjust == 0 ? "ok" : "ERROR")); } } // create a new sequence: the new elements will contain the // Positions // which will be used in the addAreas() phase BlockSequence effectiveList = new BlockSequence(blockList.getStartOn(), blockList.getDisplayAlign()); effectiveList.addAll(getCurrentChildLM().getChangedKnuthElements( blockList.subList(0, blockList.size() - blockList.ignoreAtEnd), /* 0, */0)); //effectiveList.add(new KnuthPenalty(0, -KnuthElement.INFINITE, // false, new Position(this), false)); effectiveList.endSequence(); ElementListObserver.observe(effectiveList, "breaker-effective", null); alg.getPageBreaks().clear(); //Why this? return effectiveList; }
From source file:com.android.systemui.qs.QSDragPanel.java
public void setTiles(final Collection<QSTile<?>> tilesCollection) { // we try to be as efficient as possible here because this can happen while the user // is in edit mode, or maybe even while tiles are animating // step 1: stop all animations // step 2: remove tiles no longer to be used, cache ones that are still valid // step 3: remove empty viewpager pages // step 4: generate new tiles, re-add cached ones if (DEBUG_TILES) { Log.i(TAG, "setTiles() called with tiles = [" + tilesCollection + "]"); }/* w ww.j a v a2 s.c o m*/ if (mLastDragRecord != null && mRecords.indexOf(mLastDragRecord) == -1) { // the last removed record might be stored in mLastDragRecord if we just shifted // re-add it to the list so we'll clean it up below mRecords.add(mLastDragRecord); mLastDragRecord = null; } // step kinda-1 if (mDraggingRecord != null) { // dragging record might be animating back, force it to finished position mDraggingRecord.tileView.animate().cancel(); } int currentViewPagerPage = mViewPager.getCurrentItem(); int removedPages = 0; Map<QSTile<?>, DragTileRecord> cachedRecords = new ArrayMap<>(); ListIterator<TileRecord> iterator = mRecords.listIterator(mRecords.size()); int recordsRemoved = 0; // cleanup current records while (iterator.hasPrevious()) { // mRecords DragTileRecord dr = (DragTileRecord) iterator.previous(); // step 1 dr.tileView.animate().cancel(); // step 2 if (tilesCollection.contains(dr.tile)) { if (DEBUG_TILES) { Log.i(TAG, "caching tile: " + dr.tile); } cachedRecords.put(dr.tile, dr); } else { if (dr.page >= 0) { if (DEBUG_TILES) { Log.w(TAG, "removed dr.tileView: " + dr.tileView + " from page: " + dr.page + " (dest page: " + dr.destinationPage + ")"); } removeTileView(dr.tileView); } if (DEBUG_TILES) { Log.i(TAG, "removing tile: " + dr.tile); } // remove record iterator.remove(); recordsRemoved++; dr.page = -1; dr.destinationPage = -1; } } // at this point cachedRecords should have all retained tiles, no new or old tiles int delta = tilesCollection.size() - cachedRecords.size() - recordsRemoved; if (DEBUG_TILES) { Log.i(TAG, "record map delta: " + delta); } // step 3 final Iterator<QSPage> pageIterator = mPages.iterator(); while (pageIterator.hasNext()) { final QSPage page = pageIterator.next(); final int viewpagerIndex = page.getPageIndex() + (mEditing ? 1 : 0); final int childCount = page.getChildCount(); if (DEBUG_TILES) { Log.d(TAG, "page " + viewpagerIndex + " has " + childCount); } if (page.getPageIndex() >= getCurrentMaxPageCount() - 1) { if (DEBUG_TILES) { Log.d(TAG, "page : " + page + " has " + childCount + " children"); } if (childCount == 0) { removedPages++; page.removeAllViews(); mPagerAdapter.startUpdate(mViewPager); mPagerAdapter.destroyItem(mViewPager, viewpagerIndex, page); mPagerAdapter.finishUpdate(mViewPager); mPagerAdapter.notifyDataSetChanged(); } } } if (removedPages > 0) { // even though we explicitly destroy old pages, without this call, // the viewpager doesn't seem to want to pick up the fact that we have less pages // and allows "empty" scrolls to the right where there is no page. if (DEBUG_TILES) { Log.d(TAG, "re-setting adapter, page: " + currentViewPagerPage); } mViewPager.setAdapter(mPagerAdapter); mViewPager.setCurrentItem(Math.min(currentViewPagerPage, mPagerAdapter.getCount()), false); mPagerAdapter.notifyDataSetChanged(); } // step 4 mRecords.ensureCapacity(tilesCollection.size()); int runningCount = 0; final Iterator<QSTile<?>> newTileIterator = tilesCollection.iterator(); while (newTileIterator.hasNext()) { QSTile<?> tile = newTileIterator.next(); if (tile instanceof CustomQSTile) { if (((CustomQSTile) tile).isUserRemoved() || ((CustomQSTile) tile).getTile() == null) { // tile not published yet continue; } } final int tileDestPage = getPagesForCount(runningCount + 1) - 1; if (DEBUG_TILES) { Log.d(TAG, "tile at : " + runningCount + ": " + tile + " to dest page: " + tileDestPage); } DragTileRecord record; if (!cachedRecords.containsKey(tile)) { if (DEBUG_TILES) { Log.d(TAG, "tile at: " + runningCount + " not cached, adding it to records"); } record = makeRecord(tile); record.destinationPage = tileDestPage; mRecords.add(runningCount, record); mPagerAdapter.notifyDataSetChanged(); } else { record = cachedRecords.get(tile); if (DEBUG_TILES) { Log.d(TAG, "tile at : " + runningCount + ": cached, restoring: " + record); } mPages.get(record.page).removeView(record.tileView); record.page = -1; record.destinationPage = tileDestPage; mRecords.remove(record); mRecords.add(runningCount, record); mPagerAdapter.notifyDataSetChanged(); } if (record.page == -1) { // add the view mPages.get(record.destinationPage).addView(record.tileView); record.page = record.destinationPage; if (DEBUG_TILES) { Log.d(TAG, "added view " + record); } } runningCount++; } if (isShowingDetail()) { mDetail.bringToFront(); } mPagerAdapter.notifyDataSetChanged(); refreshAllTiles(); requestLayout(); }
From source file:org.trnltk.experiment.morphology.ambiguity.DataDiffUtil.java
/** * Reduce the number of edits by eliminating semantically trivial equalities. * * @param diffs LinkedList of Diff objects. */// w w w . ja va 2 s . c o m public void diff_cleanupSemantic(LinkedList<Diff<T>> diffs) { if (diffs.isEmpty()) { return; } boolean changes = false; Stack<Diff<T>> equalities = new Stack<Diff<T>>(); // Stack of qualities. List<T> lastequality = null; // Always equal to equalities.lastElement().text ListIterator<Diff<T>> pointer = diffs.listIterator(); // Number of characters that changed prior to the equality. int length_insertions1 = 0; int length_deletions1 = 0; // Number of characters that changed after the equality. int length_insertions2 = 0; int length_deletions2 = 0; Diff<T> thisDiff = pointer.next(); while (thisDiff != null) { if (thisDiff.operation == Operation.EQUAL) { // Equality found. equalities.push(thisDiff); length_insertions1 = length_insertions2; length_deletions1 = length_deletions2; length_insertions2 = 0; length_deletions2 = 0; lastequality = thisDiff.text; } else { // An insertion or deletion. if (thisDiff.operation == Operation.INSERT) { length_insertions2 += thisDiff.text.size(); } else { length_deletions2 += thisDiff.text.size(); } // Eliminate an equality that is smaller or equal to the edits on both // sides of it. if (lastequality != null && (lastequality.size() <= Math.max(length_insertions1, length_deletions1)) && (lastequality.size() <= Math.max(length_insertions2, length_deletions2))) { //System.out.println("Splitting: '" + lastequality + "'"); // Walk back to offending equality. while (thisDiff != equalities.lastElement()) { thisDiff = pointer.previous(); } pointer.next(); // Replace equality with a delete. pointer.set(new Diff(Operation.DELETE, lastequality)); // Insert a corresponding an insert. pointer.add(new Diff(Operation.INSERT, lastequality)); equalities.pop(); // Throw away the equality we just deleted. if (!equalities.empty()) { // Throw away the previous equality (it needs to be reevaluated). equalities.pop(); } if (equalities.empty()) { // There are no previous equalities, walk back to the start. while (pointer.hasPrevious()) { pointer.previous(); } } else { // There is a safe equality we can fall back to. thisDiff = equalities.lastElement(); while (thisDiff != pointer.previous()) { // Intentionally empty loop. } } length_insertions1 = 0; // Reset the counters. length_insertions2 = 0; length_deletions1 = 0; length_deletions2 = 0; lastequality = null; changes = true; } } thisDiff = pointer.hasNext() ? pointer.next() : null; } // Normalize the diff. if (changes) { diff_cleanupMerge(diffs); } diff_cleanupSemanticLossless(diffs); // Find any overlaps between deletions and insertions. // e.g: <del>abcxxx</del><ins>xxxdef</ins> // -> <del>abc</del>xxx<ins>def</ins> // e.g: <del>xxxabc</del><ins>defxxx</ins> // -> <ins>def</ins>xxx<del>abc</del> // Only extract an overlap if it is as big as the edit ahead or behind it. pointer = diffs.listIterator(); Diff<T> prevDiff = null; thisDiff = null; if (pointer.hasNext()) { prevDiff = pointer.next(); if (pointer.hasNext()) { thisDiff = pointer.next(); } } while (thisDiff != null) { if (prevDiff.operation == Operation.DELETE && thisDiff.operation == Operation.INSERT) { List<T> deletion = prevDiff.text; List<T> insertion = thisDiff.text; int overlap_length1 = this.diff_commonOverlap(deletion, insertion); int overlap_length2 = this.diff_commonOverlap(insertion, deletion); if (overlap_length1 >= overlap_length2) { if (overlap_length1 >= deletion.size() / 2.0 || overlap_length1 >= insertion.size() / 2.0) { // Overlap found. Insert an equality and trim the surrounding edits. pointer.previous(); pointer.add(new Diff<T>(Operation.EQUAL, insertion.subList(0, overlap_length1))); prevDiff.text = deletion.subList(0, deletion.size() - overlap_length1); thisDiff.text = insertion.subList(overlap_length1, insertion.size()); // pointer.add inserts the element before the cursor, so there is // no need to step past the new element. } } else { if (overlap_length2 >= deletion.size() / 2.0 || overlap_length2 >= insertion.size() / 2.0) { // Reverse overlap found. // Insert an equality and swap and trim the surrounding edits. pointer.previous(); pointer.add(new Diff<T>(Operation.EQUAL, deletion.subList(0, overlap_length2))); prevDiff.operation = Operation.INSERT; prevDiff.text = insertion.subList(0, insertion.size() - overlap_length2); thisDiff.operation = Operation.DELETE; thisDiff.text = deletion.subList(overlap_length2, deletion.size()); // pointer.add inserts the element before the cursor, so there is // no need to step past the new element. } } thisDiff = pointer.hasNext() ? pointer.next() : null; } prevDiff = thisDiff; thisDiff = pointer.hasNext() ? pointer.next() : null; } }
From source file:org.kuali.kfs.module.bc.document.service.impl.BudgetDocumentServiceImpl.java
/** * Reloads benefits target accounting lines. Usually called right after an annual benefits calculation and the display needs * updated with a fresh copy from the database. All old row versions are removed and database row versions are inserted in the * list in the correct order.//from ww w . j av a 2 s . c om * * @param bcDoc */ @Transactional protected void reloadBenefitsLines(BudgetConstructionDocument bcDoc) { // get list of potential fringe objects to use as an in query param Map<String, Object> fieldValues = new HashMap<String, Object>(); fieldValues.put(KFSPropertyConstants.UNIVERSITY_FISCAL_YEAR, bcDoc.getUniversityFiscalYear()); fieldValues.put(KFSPropertyConstants.CHART_OF_ACCOUNTS_CODE, bcDoc.getChartOfAccountsCode()); List<LaborLedgerBenefitsCalculation> benefitsCalculation = kualiModuleService .getResponsibleModuleService(LaborLedgerBenefitsCalculation.class) .getExternalizableBusinessObjectsList(LaborLedgerBenefitsCalculation.class, fieldValues); List<String> fringeObjects = new ArrayList<String>(); for (LaborLedgerBenefitsCalculation element : benefitsCalculation) { fringeObjects.add(element.getPositionFringeBenefitObjectCode()); } List<PendingBudgetConstructionGeneralLedger> dbPBGLFringeLines = budgetConstructionDao .getDocumentPBGLFringeLines(bcDoc.getDocumentNumber(), fringeObjects); List<PendingBudgetConstructionGeneralLedger> docPBGLExpLines = bcDoc .getPendingBudgetConstructionGeneralLedgerExpenditureLines(); // holds the request sums of removed, added records and used to adjust the document expenditure request total KualiInteger docRequestTotals = KualiInteger.ZERO; KualiInteger dbRequestTotals = KualiInteger.ZERO; // remove the current set of fringe lines ListIterator docLines = docPBGLExpLines.listIterator(); while (docLines.hasNext()) { PendingBudgetConstructionGeneralLedger docLine = (PendingBudgetConstructionGeneralLedger) docLines .next(); if (fringeObjects.contains(docLine.getFinancialObjectCode())) { docRequestTotals = docRequestTotals.add(docLine.getAccountLineAnnualBalanceAmount()); docLines.remove(); } } // add the dbset of fringe lines, if any if (dbPBGLFringeLines != null && !dbPBGLFringeLines.isEmpty()) { if (docPBGLExpLines == null || docPBGLExpLines.isEmpty()) { docPBGLExpLines.addAll(dbPBGLFringeLines); } else { ListIterator dbLines = dbPBGLFringeLines.listIterator(); docLines = docPBGLExpLines.listIterator(); PendingBudgetConstructionGeneralLedger dbLine = (PendingBudgetConstructionGeneralLedger) dbLines .next(); PendingBudgetConstructionGeneralLedger docLine = (PendingBudgetConstructionGeneralLedger) docLines .next(); boolean dbDone = false; boolean docDone = false; while (!dbDone) { if (docDone || docLine.getFinancialObjectCode() .compareToIgnoreCase(dbLine.getFinancialObjectCode()) > 0) { if (!docDone) { docLine = (PendingBudgetConstructionGeneralLedger) docLines.previous(); } dbRequestTotals = dbRequestTotals.add(dbLine.getAccountLineAnnualBalanceAmount()); dbLine.setPersistedAccountLineAnnualBalanceAmount( dbLine.getAccountLineAnnualBalanceAmount()); this.populatePBGLLine(dbLine); docLines.add(dbLine); if (!docDone) { docLine = (PendingBudgetConstructionGeneralLedger) docLines.next(); } if (dbLines.hasNext()) { dbLine = (PendingBudgetConstructionGeneralLedger) dbLines.next(); } else { dbDone = true; } } else { if (docLines.hasNext()) { docLine = (PendingBudgetConstructionGeneralLedger) docLines.next(); } else { docDone = true; } } } } } // adjust the request total for the removed and added recs bcDoc.setExpenditureAccountLineAnnualBalanceAmountTotal( bcDoc.getExpenditureAccountLineAnnualBalanceAmountTotal() .add(dbRequestTotals.subtract(docRequestTotals))); }