List of usage examples for java.lang InterruptedException InterruptedException
public InterruptedException()
InterruptedException
with no detail message. From source file:org.micromanager.plugins.magellan.imagedisplay.DisplayOverlayer.java
private void addConvexHull(Overlay overlay) throws InterruptedException { //draw convex hull Vector2D[] hullPoints = display_.getCurrentSurface().getConvexHullPoints(); LongPoint lastPoint = null, firstPoint = null; for (Vector2D v : hullPoints) { if (Thread.interrupted()) { throw new InterruptedException(); }// www . ja v a 2 s .c o m //convert to image coords LongPoint p = display_.imageCoordsFromStageCoords(v.getX(), v.getY()); if (lastPoint != null) { Line l = new Line(p.x_, p.y_, lastPoint.x_, lastPoint.y_); l.setStrokeColor(CONVEX_HULL_COLOR); overlay.add(l); } else { firstPoint = p; } lastPoint = p; } //draw last connection Line l = new Line(firstPoint.x_, firstPoint.y_, lastPoint.x_, lastPoint.y_); l.setStrokeColor(CONVEX_HULL_COLOR); overlay.add(l); }
From source file:org.rhq.enterprise.server.plugin.pc.content.ContentProviderManager.java
private SyncTracker updateSyncStatus(SyncTracker tracker, ContentSyncStatus status) throws InterruptedException { RepoManagerLocal repoManager = LookupUtil.getRepoManagerLocal(); SubjectManagerLocal subjMgr = LookupUtil.getSubjectManager(); Subject overlord = subjMgr.getOverlord(); int repoId = tracker.getRepoId(); RepoSyncResults cancelCheck = repoManager.getMostRecentSyncResults(overlord, repoId); if (cancelCheck.getStatus() == ContentSyncStatus.CANCELLING) { throw new InterruptedException(); }/* ww w . ja v a2s . com*/ RepoSyncResults results = tracker.getRepoSyncResults(); results.setStatus(status); results = repoManager.mergeRepoSyncResults(results); tracker.setRepoSyncResults(results); return tracker; }
From source file:org.micromanager.plugins.magellan.surfacesandregions.SurfaceInterpolator.java
private void fitXYPositionsToConvexHull(double overlap) throws InterruptedException { int fullTileWidth = JavaLayerImageConstructor.getInstance().getImageWidth(); int fullTileHeight = JavaLayerImageConstructor.getInstance().getImageHeight(); int overlapX = (int) (JavaLayerImageConstructor.getInstance().getImageWidth() * overlap / 100); int overlapY = (int) (JavaLayerImageConstructor.getInstance().getImageHeight() * overlap / 100); int tileWidthMinusOverlap = fullTileWidth - overlapX; int tileHeightMinusOverlap = fullTileHeight - overlapY; int pixelPadding = (int) (xyPadding_um_ / Magellan.getCore().getPixelSizeUm()); numRows_ = (int) Math .ceil((boundYPixelMax_ - boundYPixelMin_ + pixelPadding) / (double) tileHeightMinusOverlap); numCols_ = (int) Math .ceil((boundXPixelMax_ - boundXPixelMin_ + pixelPadding) / (double) tileWidthMinusOverlap); //take center of bounding box and create grid int pixelCenterX = boundXPixelMin_ + (boundXPixelMax_ - boundXPixelMin_) / 2; int pixelCenterY = boundYPixelMin_ + (boundYPixelMax_ - boundYPixelMin_) / 2; AffineTransform transform = AffineUtils.getAffineTransform(getCurrentPixelSizeConfig(), 0, 0); ArrayList<XYStagePosition> positions = new ArrayList<XYStagePosition>(); Point2D.Double gridCenterStageCoords = new Point2D.Double(); transform.transform(new Point2D.Double(pixelCenterX, pixelCenterY), gridCenterStageCoords); gridCenterStageCoords.x += convexHullVertices_[0].getX(); gridCenterStageCoords.y += convexHullVertices_[0].getY(); //set affine transform translation relative to grid center double[] transformMaxtrix = new double[6]; transform.getMatrix(transformMaxtrix); transformMaxtrix[4] = gridCenterStageCoords.x; transformMaxtrix[5] = gridCenterStageCoords.y; //create new transform with translation applied transform = new AffineTransform(transformMaxtrix); //add all positions of rectangle around convex hull for (int col = 0; col < numCols_; col++) { double xPixelOffset = (col - (numCols_ - 1) / 2.0) * (tileWidthMinusOverlap); for (int row = 0; row < numRows_; row++) { if (Thread.interrupted()) { throw new InterruptedException(); }/* w w w. j av a 2s . c o m*/ double yPixelOffset = (row - (numRows_ - 1) / 2.0) * (tileHeightMinusOverlap); Point2D.Double pixelPos = new Point2D.Double(xPixelOffset, yPixelOffset); Point2D.Double stagePos = new Point2D.Double(); transform.transform(pixelPos, stagePos); AffineTransform posTransform = AffineUtils.getAffineTransform(getCurrentPixelSizeConfig(), stagePos.x, stagePos.y); positions.add(new XYStagePosition(stagePos, tileWidthMinusOverlap, tileHeightMinusOverlap, fullTileWidth, fullTileHeight, row, col, posTransform)); } } //delete positions squares (+padding) that do not overlap convex hull for (int i = positions.size() - 1; i >= 0; i--) { if (Thread.interrupted()) { throw new InterruptedException(); } XYStagePosition pos = positions.get(i); //create square region correpsonding to stage pos Region<Euclidean2D> square = getStagePositionRegion(pos); //if convex hull and position have no intersection, delete Region<Euclidean2D> intersection = regionFacotry_.intersection(square, convexHullRegion_); if (intersection.isEmpty()) { positions.remove(i); } square.getBoundarySize(); } if (Thread.interrupted()) { throw new InterruptedException(); } synchronized (xyPositionLock_) { xyPositions_ = positions; xyPositionLock_.notifyAll(); } //let manger know new parmas caluclated manager_.updateSurfaceTableAndCombos(); }
From source file:org.micromanager.plugins.magellan.imagedisplay.DisplayOverlayer.java
private void addStagePositions(Overlay overlay, boolean above) throws InterruptedException { double zPosition = zoomableStack_.getZCoordinateOfDisplayedSlice(display_.getVisibleSliceIndex()); //this will block until interpolation detailed enough to show stage positions ArrayList<XYStagePosition> positionsAtSlice = display_.getCurrentSurface().getXYPositonsAtSlice(zPosition, above);//from www . j a v a 2 s . c om for (XYStagePosition pos : positionsAtSlice) { if (Thread.interrupted()) { throw new InterruptedException(); } Point2D.Double[] corners = pos.getDisplayedTileCorners(); LongPoint corner1 = display_.imageCoordsFromStageCoords(corners[0].x, corners[0].y); LongPoint corner2 = display_.imageCoordsFromStageCoords(corners[1].x, corners[1].y); LongPoint corner3 = display_.imageCoordsFromStageCoords(corners[2].x, corners[2].y); LongPoint corner4 = display_.imageCoordsFromStageCoords(corners[3].x, corners[3].y); //add lines connecting 4 corners Line l1 = new Line(corner1.x_, corner1.y_, corner2.x_, corner2.y_); Line l2 = new Line(corner2.x_, corner2.y_, corner3.x_, corner3.y_); Line l3 = new Line(corner3.x_, corner3.y_, corner4.x_, corner4.y_); Line l4 = new Line(corner4.x_, corner4.y_, corner1.x_, corner1.y_); l1.setStrokeColor(Color.red); l2.setStrokeColor(Color.red); l3.setStrokeColor(Color.red); l4.setStrokeColor(Color.red); overlay.add(l1); overlay.add(l2); overlay.add(l3); overlay.add(l4); } }
From source file:org.micromanager.plugins.magellan.imagedisplay.DisplayOverlayer.java
private void renderSurfaceOverlay() throws InterruptedException { //start out with 10 interpolation points across the whole image int displayPixPerInterpPoint = Math.max(display_.getImagePlus().getWidth(), display_.getImagePlus().getHeight()) / INITIAL_NUM_INTERPOLATION_DIVISIONS; //keep redrawing until surface full interpolated final Overlay startingOverlay = createBackgroundOverlay(); addInterpPoints(display_.getCurrentSurface(), startingOverlay); if (showConvexHull_) { addConvexHull(startingOverlay);/* ww w. j a v a 2 s .c o m*/ } while (true) { final Overlay surfOverlay = new Overlay(); //add all objects from starting overlay rather than recalculating them each time for (int i = 0; i < startingOverlay.size(); i++) { if (Thread.interrupted()) { throw new InterruptedException(); } surfOverlay.add(startingOverlay.get(i)); } SingleResolutionInterpolation interp = display_.getCurrentSurface().waitForCurentInterpolation(); //wait until surface is interpolated at sufficent resolution to draw while (displayPixPerInterpPoint * zoomableStack_.getDownsampleFactor() < interp .getPixelsPerInterpPoint()) { if (Thread.interrupted()) { throw new InterruptedException(); } display_.getCurrentSurface().waitForHigherResolutionInterpolation(); interp = display_.getCurrentSurface().waitForCurentInterpolation(); } if (showStagePositionsAbove_ || showStagePositionsBelow_) { //these could concieveably change as function of interpolation detail addStagePositions(surfOverlay, showStagePositionsAbove_); } //add surface interpolation addSurfaceInterpolation(surfOverlay, interp, displayPixPerInterpPoint); if (Thread.interrupted()) { throw new InterruptedException(); } SwingUtilities.invokeLater(new Runnable() { @Override public void run() { canvas_.setOverlay(surfOverlay); } }); if (displayPixPerInterpPoint == 1 || displayPixPerInterpPoint * zoomableStack_.getDownsampleFactor() <= SurfaceInterpolator.MIN_PIXELS_PER_INTERP_POINT) { //finished return; } displayPixPerInterpPoint /= 2; } }
From source file:fr.gael.dhus.sync.impl.ODataProductSynchronizer.java
/** * Retrieve new/updated products./*from www. j av a2s . c o m*/ * @return how many products have been retrieved. */ private int getNewProducts() throws InterruptedException { int res = 0; try { // Makes the query parameters Map<String, String> query_param = new HashMap<>(); String lup_s = EdmSimpleTypeKind.DateTime.getEdmSimpleTypeInstance().valueToString(lastCreated, EdmLiteralKind.URI, null); // 'GreaterEqual' because of products with the same IngestionDate String filter = "IngestionDate ge " + lup_s; // Appends custom $filter parameter if (filterParam != null) { filter += " and (" + filterParam + ")"; } query_param.put("$filter", filter); query_param.put("$top", String.valueOf(pageSize)); query_param.put("$orderby", "IngestionDate"); // Executes the query long delta = System.currentTimeMillis(); ODataFeed pdf = client.readFeed(sourceCollection + "/Products", query_param); logODataPerf("Products", System.currentTimeMillis() - delta); // For each entry, creates a DataBase Object for (ODataEntry pdt : pdf.getEntries()) { Map<String, Object> props = pdt.getProperties(); // Checks if a product with the same UUID already exist // (`UUID` and `PATH` have unique constraint), PATH references the UUID String uuid = (String) props.get("Id"); if (PRODUCT_SERVICE.systemGetProduct(uuid) != null) { // FIXME: might not be the same product this.lastCreated = (((GregorianCalendar) props.get("IngestionDate")).getTime()); this.dateChanged = true; continue; } // Makes the product resource path String pdt_p = "/Products('" + uuid + "')"; Product product = new Product(); product.setUuid(uuid); // Reads the properties product.setIdentifier((String) props.get("Name")); product.setIngestionDate(((GregorianCalendar) props.get("IngestionDate")).getTime()); product.setCreated(((GregorianCalendar) props.get("CreationDate")).getTime()); product.setFootPrint((String) props.get("ContentGeometry")); product.setProcessed(Boolean.TRUE); product.setSize((Long) props.get("ContentLength")); // Reads the ContentDate complex type Map contentDate = (Map) props.get("ContentDate"); product.setContentStart(((GregorianCalendar) contentDate.get("Start")).getTime()); product.setContentEnd(((GregorianCalendar) contentDate.get("End")).getTime()); // Sets the origin to the remote URI product.setOrigin(client.getServiceRoot() + pdt_p + "/$value"); product.setPath(new URL(pdt.getMetadata().getId() + "/$value")); // Sets the download path to LocalPath (if LocalPaths are exposed) if (this.remoteIncoming != null && !this.copyProduct) { String path = (String) props.get("LocalPath"); if (path != null && !path.isEmpty()) { Map<String, String> checksum = (Map) props.get("Checksum"); Product.Download d = new Product.Download(); d.setPath(Paths.get(this.remoteIncoming, path).toString()); d.setSize(product.getSize()); d.setType((String) props.get("ContentType")); d.setChecksums(Collections.singletonMap(checksum.get(V1Model.ALGORITHM), checksum.get(V1Model.VALUE))); product.setDownload(d); File f = new File(d.getPath()); if (!f.exists()) { // The incoming path is probably false // Throws an exception to notify the admin about this issue throw new RuntimeException("ODataSynchronizer: Local file '" + path + "' not found in remote incoming '" + this.remoteIncoming + '\''); } product.setPath(new URL("file://" + d.getPath())); } else { throw new RuntimeException("RemoteIncoming is set" + " but the LocalPath property is missing in remote products"); } } // Retrieves the Product Class delta = System.currentTimeMillis(); ODataEntry pdt_class_e = client.readEntry(pdt_p + "/Class", null); logODataPerf(pdt_p + "/Class", System.currentTimeMillis() - delta); Map<String, Object> pdt_class_pm = pdt_class_e.getProperties(); String pdt_class = (String) pdt_class_pm.get("Uri"); product.setItemClass(pdt_class); // Retrieves Metadata Indexes (aka Attributes on odata) delta = System.currentTimeMillis(); ODataFeed mif = client.readFeed(pdt_p + "/Attributes", null); logODataPerf(pdt_p + "/Attributes", System.currentTimeMillis() - delta); List<MetadataIndex> mi_l = new ArrayList<>(mif.getEntries().size()); for (ODataEntry mie : mif.getEntries()) { props = mie.getProperties(); MetadataIndex mi = new MetadataIndex(); String mi_name = (String) props.get("Name"); mi.setName(mi_name); mi.setType((String) props.get("ContentType")); mi.setValue((String) props.get("Value")); MetadataType mt = METADATA_TYPE_SERVICE.getMetadataTypeByName(pdt_class, mi_name); if (mt != null) { mi.setCategory(mt.getCategory()); if (mt.getSolrField() != null) { mi.setQueryable(mt.getSolrField().getName()); } } else if (mi_name.equals("Identifier")) { mi.setCategory(""); mi.setQueryable("identifier"); } else if (mi_name.equals("Ingestion Date")) { mi.setCategory("product"); mi.setQueryable("ingestionDate"); } else { mi.setCategory(""); } mi_l.add(mi); } product.setIndexes(mi_l); // Retrieves subProducts delta = System.currentTimeMillis(); ODataFeed subp = client.readFeed(pdt_p + "/Products", null); logODataPerf(pdt_p + "/Products", System.currentTimeMillis() - delta); for (ODataEntry subpe : subp.getEntries()) { String id = (String) subpe.getProperties().get("Id"); Long content_len = (Long) subpe.getProperties().get("ContentLength"); String path = (String) subpe.getProperties().get("LocalPath"); if (this.remoteIncoming != null && !this.copyProduct && path != null && !path.isEmpty()) { path = Paths.get(this.remoteIncoming, path).toString(); } else { path = client.getServiceRoot() + pdt_p + "/Products('" + subpe.getProperties().get("Id") + "')/$value"; } // Retrieves the Quicklook if (id.equals("Quicklook")) { product.setQuicklookSize(content_len); product.setQuicklookPath(path); } // Retrieves the Thumbnail else if (id.equals("Thumbnail")) { product.setThumbnailSize(content_len); product.setThumbnailPath(path); } } // `processed` must be set to TRUE product.setProcessed(Boolean.TRUE); // Downloads the product if required if (this.copyProduct) { downloadProduct(product); } // Stores `product` in the database product = PRODUCT_SERVICE.addProduct(product); product.setIndexes(mi_l); // DELME lazy loading not working atm ... // Sets the target collection both in the DB and Solr if (this.targetCollection != null) { try { COLLECTION_SERVICE.systemAddProduct(this.targetCollection, product.getId(), false); } catch (HibernateException e) { LOGGER.error("Synchronizer#" + getId() + " Failed to set collection#" + this.targetCollection + " for product " + product.getIdentifier(), e); // Reverting ... PRODUCT_SERVICE.systemDeleteProduct(product.getId()); throw e; } catch (Exception e) { LOGGER.error("Synchronizer#" + getId() + " Failed to update product " + product.getIdentifier() + " in Solr's index", e); } } // Stores `product` in the index try { delta = System.currentTimeMillis(); SEARCH_SERVICE.index(product); LOGGER.debug("Synchronizer#" + getId() + " indexed product " + product.getIdentifier() + " in " + (System.currentTimeMillis() - delta) + "ms"); } catch (Exception e) { // Solr errors are not considered fatal LOGGER.error("Synchronizer#" + getId() + " Failed to index product " + product.getIdentifier() + " in Solr's index", e); } this.lastCreated = product.getIngestionDate(); this.dateChanged = true; LOGGER.info("Synchronizer#" + getId() + " Product " + product.getIdentifier() + " (" + product.getSize() + " bytes compressed) " + "successfully synchronized from " + this.client.getServiceRoot()); res++; // Checks if we have to abandon the current pass if (Thread.interrupted()) { throw new InterruptedException(); } } } catch (IOException | ODataException ex) { LOGGER.error("OData failure", ex); } finally { // Save the ingestionDate of the last created Product this.syncConf.setConfig("last_created", String.valueOf(this.lastCreated.getTime())); } return res; }
From source file:org.rhq.modules.plugins.wildfly10.BaseServerComponent.java
private boolean waitForServerToStart() throws InterruptedException { boolean up = false; while (!up) { Operation op = new ReadAttribute(new Address(), "release-version"); try {/*from w w w.j av a 2s . c o m*/ Result res = getASConnection().execute(op); if (res.isSuccess()) { // If op succeeds, server is not down up = true; } } catch (Exception e) { //do absolutely nothing //if an exception is thrown that means the server is still down, so consider this //a single failed attempt, equivalent to res.isSuccess == false } if (!up) { if (context.getComponentInvocationContext().isInterrupted()) { // Operation canceled or timed out throw new InterruptedException(); } Thread.sleep(SECONDS.toMillis(1)); } } return true; }
From source file:org.apache.tinkerpop.gremlin.server.op.traversal.TraversalOpProcessor.java
protected void handleIterator(final Context context, final Iterator itty, final Graph graph) throws TimeoutException, InterruptedException { final ChannelHandlerContext ctx = context.getChannelHandlerContext(); final RequestMessage msg = context.getRequestMessage(); final Settings settings = context.getSettings(); final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get(); final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get(); boolean warnOnce = false; // we have an empty iterator - happens on stuff like: g.V().iterate() if (!itty.hasNext()) { // as there is nothing left to iterate if we are transaction managed then we should execute a // commit here before we send back a NO_CONTENT which implies success onTraversalSuccess(graph, context); ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create()); return;//from ww w.j a v a2s . com } // timer for the total serialization time final StopWatch stopWatch = new StopWatch(); stopWatch.start(); // the batch size can be overridden by the request final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE) .orElse(settings.resultIterationBatchSize); List<Object> aggregate = new ArrayList<>(resultIterationBatchSize); // use an external control to manage the loop as opposed to just checking hasNext() in the while. this // prevent situations where auto transactions create a new transaction after calls to commit() withing // the loop on calls to hasNext(). boolean hasMore = itty.hasNext(); while (hasMore) { if (Thread.interrupted()) throw new InterruptedException(); // check if an implementation needs to force flush the aggregated results before the iteration batch // size is reached. final boolean forceFlush = isForceFlushed(ctx, msg, itty); // have to check the aggregate size because it is possible that the channel is not writeable (below) // so iterating next() if the message is not written and flushed would bump the aggregate size beyond // the expected resultIterationBatchSize. Total serialization time for the response remains in // effect so if the client is "slow" it may simply timeout. // // there is a need to check hasNext() on the iterator because if the channel is not writeable the // previous pass through the while loop will have next()'d the iterator and if it is "done" then a // NoSuchElementException will raise its head. also need a check to ensure that this iteration doesn't // require a forced flush which can be forced by sub-classes. // // this could be placed inside the isWriteable() portion of the if-then below but it seems better to // allow iteration to continue into a batch if that is possible rather than just doing nothing at all // while waiting for the client to catch up if (aggregate.size() < resultIterationBatchSize && itty.hasNext() && !forceFlush) aggregate.add(itty.next()); // send back a page of results if batch size is met or if it's the end of the results being iterated. // also check writeability of the channel to prevent OOME for slow clients. if (ctx.channel().isWritable()) { if (forceFlush || aggregate.size() == resultIterationBatchSize || !itty.hasNext()) { final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT : ResponseStatusCode.SUCCESS; // serialize here because in sessionless requests the serialization must occur in the same // thread as the eval. as eval occurs in the GremlinExecutor there's no way to get back to the // thread that processed the eval of the script so, we have to push serialization down into that Frame frame = null; try { frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code, generateMetaData(ctx, msg, code, itty)); } catch (Exception ex) { // a frame may use a Bytebuf which is a countable release - if it does not get written // downstream it needs to be released here if (frame != null) frame.tryRelease(); // exception is handled in makeFrame() - serialization error gets written back to driver // at that point onError(graph, context); break; } try { // only need to reset the aggregation list if there's more stuff to write if (itty.hasNext()) aggregate = new ArrayList<>(resultIterationBatchSize); else { // iteration and serialization are both complete which means this finished successfully. note that // errors internal to script eval or timeout will rollback given GremlinServer's global configurations. // local errors will get rolledback below because the exceptions aren't thrown in those cases to be // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if // there are no more items to iterate and serialization is complete onTraversalSuccess(graph, context); // exit the result iteration loop as there are no more results left. using this external control // because of the above commit. some graphs may open a new transaction on the call to // hasNext() hasMore = false; } } catch (Exception ex) { // a frame may use a Bytebuf which is a countable release - if it does not get written // downstream it needs to be released here if (frame != null) frame.tryRelease(); throw ex; } if (!itty.hasNext()) iterateComplete(ctx, msg, itty); // the flush is called after the commit has potentially occurred. in this way, if a commit was // required then it will be 100% complete before the client receives it. the "frame" at this point // should have completely detached objects from the transaction (i.e. serialization has occurred) // so a new one should not be opened on the flush down the netty pipeline ctx.writeAndFlush(frame); } } else { // don't keep triggering this warning over and over again for the same request if (!warnOnce) { logger.warn( "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up", msg); warnOnce = true; } // since the client is lagging we can hold here for a period of time for the client to catch up. // this isn't blocking the IO thread - just a worker. TimeUnit.MILLISECONDS.sleep(10); } stopWatch.split(); if (settings.serializedResponseTimeout > 0 && stopWatch.getSplitTime() > settings.serializedResponseTimeout) { final String timeoutMsg = String.format( "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s", warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]" : ""); throw new TimeoutException(timeoutMsg.trim()); } stopWatch.unsplit(); } stopWatch.stop(); }
From source file:com.cloudbees.jenkins.plugins.bitbucket.server.client.BitbucketServerAPIClient.java
private <V> List<V> getResources(UriTemplate template, Class<? extends PagedApiResponse<V>> clazz) throws IOException, InterruptedException { List<V> resources = new ArrayList<>(); PagedApiResponse<V> page;//from ww w . j a v a 2s. co m Integer pageNumber = 0; Integer limit = DEFAULT_PAGE_LIMIT; do { if (Thread.interrupted()) { throw new InterruptedException(); } String url = template // .set("start", pageNumber) // .set("limit", limit) // .expand(); String response = getRequest(url); try { page = JsonParser.toJava(response, clazz); } catch (IOException e) { throw new IOException("I/O error when parsing response from URL: " + url, e); } resources.addAll(page.getValues()); limit = page.getLimit(); pageNumber = page.getNextPageStart(); } while (!page.isLastPage()); return resources; }
From source file:fr.gael.dhus.sync.impl.ODataProductSynchronizer.java
@Override public boolean synchronize() throws InterruptedException { int retrieved = 0, updated = 0, deleted = 0; LOGGER.info("Synchronizer#" + getId() + " started"); try {//from ww w . jav a 2 s. co m retrieved = getNewProducts(); if (Thread.interrupted()) { throw new InterruptedException(); } updated = getUpdatedProducts(); if (Thread.interrupted()) { throw new InterruptedException(); } deleted = getDeletedProducts(); } catch (LockAcquisitionException | CannotAcquireLockException e) { throw new InterruptedException(e.getMessage()); } finally { // Logs a summary of what it has done this session StringBuilder sb = new StringBuilder("Synchronizer#"); sb.append(getId()).append(" done: "); sb.append(retrieved).append(" new Products, "); sb.append(updated).append(" updated Products, "); sb.append(deleted).append(" deleted Products"); sb.append(" from ").append(this.client.getServiceRoot()); LOGGER.info(sb.toString()); // Writes the database only if there is a modification if (this.dateChanged) { SYNC_SERVICE.saveSynchronizer(this); this.dateChanged = false; } } return retrieved < pageSize && updated < pageSize && deleted < pageSize; }