List of usage examples for org.apache.commons.lang.time StopWatch StopWatch
public StopWatch()
Constructor.
From source file:org.apache.rocketmq.jms.integration.listener.SimpleTextListenerTest.java
@Test public void testListener() throws Exception { jmsTemplate.convertAndSend(DESTINATION, "first"); StopWatch watch = new StopWatch(); watch.start();/*from w w w. j a va 2 s .c om*/ int count = 1; while (simpleTextListener.getReceivedMsg().size() != count) { Thread.sleep(1000); log.info("Waiting for receiving {} messages sent to {} topic,now has received {}", count, DESTINATION, simpleTextListener.getReceivedMsg().size()); if (watch.getTime() > 1000 * 10) { assertFalse(true); } } assertTrue(true); }
From source file:org.apache.sling.hc.core.impl.executor.HealthCheckExecutorImpl.java
/** * Execute a set of health checks//from w ww .ja va2 s . com */ private List<HealthCheckExecutionResult> execute(final ServiceReference[] healthCheckReferences, HealthCheckExecutionOptions options) { final StopWatch stopWatch = new StopWatch(); stopWatch.start(); final List<HealthCheckExecutionResult> results = new ArrayList<HealthCheckExecutionResult>(); final List<HealthCheckMetadata> healthCheckDescriptors = getHealthCheckMetadata(healthCheckReferences); createResultsForDescriptors(healthCheckDescriptors, results, options); stopWatch.stop(); if (logger.isDebugEnabled()) { logger.debug("Time consumed for all checks: {}", msHumanReadable(stopWatch.getTime())); } // sort result Collections.sort(results, new Comparator<HealthCheckExecutionResult>() { @Override public int compare(final HealthCheckExecutionResult arg0, final HealthCheckExecutionResult arg1) { return ((ExecutionResult) arg0).compareTo((ExecutionResult) arg1); } }); return results; }
From source file:org.apache.sling.hc.core.impl.executor.HealthCheckExecutorImpl.java
/** * Wait for the futures until the timeout is reached *///from ww w .j a va2 s . c om private void waitForFuturesRespectingTimeout(final List<HealthCheckFuture> futuresForResultOfThisCall, HealthCheckExecutionOptions options) { final StopWatch callExcutionTimeStopWatch = new StopWatch(); callExcutionTimeStopWatch.start(); boolean allFuturesDone; long effectiveTimeout = this.timeoutInMs; if (options != null && options.getOverrideGlobalTimeout() > 0) { effectiveTimeout = options.getOverrideGlobalTimeout(); } if (futuresForResultOfThisCall.isEmpty()) { return; // nothing to wait for (usually because of cached results) } do { try { synchronized (stillRunningFutures) { stillRunningFutures.wait(50); // wait for notifications of callbacks of HealthCheckFutures } } catch (final InterruptedException ie) { logger.warn("Unexpected InterruptedException while waiting for healthCheckContributors", ie); } allFuturesDone = true; for (final HealthCheckFuture healthCheckFuture : futuresForResultOfThisCall) { allFuturesDone &= healthCheckFuture.isDone(); } } while (!allFuturesDone && callExcutionTimeStopWatch.getTime() < effectiveTimeout); }
From source file:org.apache.sling.hc.core.impl.executor.HealthCheckFuture.java
HealthCheckFuture(final HealthCheckMetadata metadata, final BundleContext bundleContext, final Callback callback) { super(new Callable<ExecutionResult>() { @Override/*from ww w.ja v a 2s . c o m*/ public ExecutionResult call() throws Exception { Thread.currentThread().setName("HealthCheck " + metadata.getTitle()); LOG.debug("Starting check {}", metadata); final StopWatch stopWatch = new StopWatch(); stopWatch.start(); Result resultFromHealthCheck = null; ExecutionResult executionResult = null; final HealthCheck healthCheck = (HealthCheck) bundleContext .getService(metadata.getServiceReference()); try { if (healthCheck != null) { resultFromHealthCheck = healthCheck.execute(); } else { throw new IllegalStateException("Service for " + metadata + " is gone"); } } catch (final Exception e) { resultFromHealthCheck = new Result(Result.Status.CRITICAL, "Exception during execution of '" + metadata.getName() + "': " + e, e); } finally { // unget service ref bundleContext.ungetService(metadata.getServiceReference()); // update result with information about this run stopWatch.stop(); long elapsedTime = stopWatch.getTime(); if (resultFromHealthCheck != null) { // wrap the result in an execution result executionResult = new ExecutionResult(metadata, resultFromHealthCheck, elapsedTime); } LOG.debug("Time consumed for {}: {}", metadata, msHumanReadable(elapsedTime)); } callback.finished(executionResult); Thread.currentThread().setName("HealthCheck idle"); return executionResult; } }); this.createdTime = new Date(); this.metadata = metadata; }
From source file:org.apache.solr.handler.component.RatiosComponent.java
@Override public void process(ResponseBuilder rb) throws IOException { try {/* ww w .j av a 2 s .c om*/ HashMap<String, Long> timers = new HashMap<String, Long>(); if (rb.doRatios) { SolrParams params = rb.req.getParams(); // in ratios the facet field is always the dimension field String dimension = params.get(RatiosParams.RATIOS_DIMENSION); String measure = params.get(RatiosParams.RATIOS_MEASURE); Double min = params.getDouble(RatiosParams.RATIOS_MIN, 0); Double max = params.getDouble(RatiosParams.RATIOS_MAX, 1); boolean debug = params.getBool(RatiosParams.RATIOS_DEBUG, false); boolean rows = params.getBool(RatiosParams.RATIOS_ROWS, false); HashMap<String, String[]> fieldFacets = new HashMap<String, String[]>(); fieldFacets.put(measure, new String[] { dimension }); SolrIndexSearcher searcher = rb.req.getSearcher(); String defType = params.get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE); QParser q1 = QParser.getParser( params.get("q") + " AND (" + params.get(RatiosParams.RATIOS_Q1) + ")", defType, rb.req); QParser q2 = QParser.getParser( params.get("q") + " AND (" + params.get(RatiosParams.RATIOS_Q2) + ")", defType, rb.req); StopWatch stopwatch = new StopWatch(); stopwatch.start(); DocSet set1 = searcher.getDocSet(q1.getQuery()); stopwatch.stop(); timers.put("q1.ms", stopwatch.getTime()); stopwatch.reset(); stopwatch.start(); DocSet set2 = searcher.getDocSet(q2.getQuery()); stopwatch.stop(); timers.put("q2.ms", stopwatch.getTime()); stopwatch.reset(); // ====== stats for 1st stopwatch.start(); ModifiableSolrParams xp = new ModifiableSolrParams(); xp.add(StatsParams.STATS_FIELD, measure); xp.add(StatsParams.STATS_FACET, dimension); xp.add(ShardParams.IS_SHARD, String.valueOf(params.getBool(ShardParams.IS_SHARD, false))); SimpleStats stats1 = new SimpleStats(rb.req, set1, xp); // TODO implement according to SOLR standard NamedList<?> map1 = stats1.getFieldCacheStats(measure, new String[] { dimension }); if (map1 == null || map1.size() <= 0) { // empty do nothing return; } Map<String, Double> matrix1 = new HashMap<String, Double>(); // TODO map1.get(dimension); stopwatch.stop(); timers.put("q1.stats.ms", stopwatch.getTime()); stopwatch.reset(); // ====== stats for 2nd stopwatch.start(); SimpleStats stats2 = new SimpleStats(rb.req, set2, xp); NamedList<?> map2 = stats2.getFieldCacheStats(measure, new String[] { dimension }); if (map2 == null || map2.size() <= 0) { // empty do nothing return; } Map<String, Double> matrix2 = new HashMap<String, Double>(); // TODO map2.get(dimension); stopwatch.stop(); timers.put("q2.stats.ms", stopwatch.getTime()); stopwatch.reset(); // ====== ratios stopwatch.start(); OpenBitSet ratios = new OpenBitSet();// TODO filter(matrix1, matrix2, min, max); stopwatch.stop(); timers.put("ratio.ms", stopwatch.getTime()); stopwatch.reset(); // ====== done do payload extraction NamedList<Object> payload = new NamedList<Object>(); if (debug) { // timer information NamedList<Object> performance = new NamedList<Object>(); for (String key : timers.keySet()) { performance.add(key, timers.get(key)); } payload.add("debug", performance); } payload.add("count", ratios.cardinality()); payload.add("union", set1.unionSize(set2)); payload.add("intersection", set1.intersectionSize(set2)); NamedList<Object> query1 = new NamedList<Object>(); query1.add("rows", set1.size()); query1.add("dimensions", matrix1.size()); if (rows) { query1.add("results", toNamedList(matrix1)); } NamedList<Object> query2 = new NamedList<Object>(); query2.add("rows", set2.size()); query2.add("dimensions", matrix2.size()); if (rows) { query2.add("results", toNamedList(matrix2)); } NamedList<Object> breakdown = new NamedList<Object>(); breakdown.add("query1", query1); breakdown.add("query2", query2); payload.add("breakdown", breakdown); // TODO - output ratio bitset to hex for UX to do client side join // byte[] bytes = HexUtil.convertToGzipCompressedByte(ratios.getBits()); // String x = javax.xml.bind.DatatypeConverter.printBase64Binary(bytes); // payload.add("base64", x); rb.rsp.add(RatiosParams.RATIOS, payload); } } catch (ParseException e) { throw new RuntimeException(e); } }
From source file:org.apache.spark.simr.Simr.java
public void startWorker() throws IOException { StopWatch sw = new StopWatch(); sw.start();/*from ww w . j av a2 s . c o m*/ UrlCoresTuple uc = getMasterURL(); sw.stop(); if (uc == null) { log.warn(String.format("getMasterURL timed out in startWorker after "), sw.toString()); return; } int uniqueId = context.getTaskAttemptID().getTaskID().getId(); int maxCores = uc.cores; String masterUrl = uc.url; String[] exList = new String[] { masterUrl, Integer.toString(uniqueId), getLocalIP(), Integer.toString(maxCores) }; redirectOutput("worker" + uniqueId); org.apache.spark.executor.CoarseGrainedExecutorBackend.main(exList); }
From source file:org.apache.tinkerpop.gremlin.server.handler.IteratorHandler.java
@Override public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) throws Exception { if (msg instanceof Pair) { try {// www .jav a 2 s.c o m final Pair pair = (Pair) msg; final Iterator itty = (Iterator) pair.getValue1(); final RequestMessage requestMessage = (RequestMessage) pair.getValue0(); // the batch size can be overriden by the request final int resultIterationBatchSize = (Integer) requestMessage.optionalArgs(Tokens.ARGS_BATCH_SIZE) .orElse(settings.resultIterationBatchSize); // timer for the total serialization time final StopWatch stopWatch = new StopWatch(); final EventExecutorGroup executorService = ctx.executor(); final Future<?> iteration = executorService.submit((Callable<Void>) () -> { logger.debug("Preparing to iterate results from - {} - in thread [{}]", requestMessage, Thread.currentThread().getName()); stopWatch.start(); List<Object> aggregate = new ArrayList<>(resultIterationBatchSize); while (itty.hasNext()) { aggregate.add(itty.next()); // send back a page of results if batch size is met or if it's the end of the results being // iterated if (aggregate.size() == resultIterationBatchSize || !itty.hasNext()) { final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT : ResponseStatusCode.SUCCESS; ctx.writeAndFlush( ResponseMessage.build(requestMessage).code(code).result(aggregate).create()); aggregate = new ArrayList<>(resultIterationBatchSize); } stopWatch.split(); if (stopWatch.getSplitTime() > settings.serializedResponseTimeout) throw new TimeoutException( "Serialization of the entire response exceeded the serializeResponseTimeout setting"); stopWatch.unsplit(); } return null; }); iteration.addListener(f -> { stopWatch.stop(); if (!f.isSuccess()) { final String errorMessage = String.format( "Response iteration and serialization exceeded the configured threshold for request [%s] - %s", msg, f.cause().getMessage()); logger.warn(errorMessage); ctx.writeAndFlush( ResponseMessage.build(requestMessage).code(ResponseStatusCode.SERVER_ERROR_TIMEOUT) .statusMessage(errorMessage).create()); } }); } finally { ReferenceCountUtil.release(msg); } } else { ctx.write(msg, promise); } }
From source file:org.apache.tinkerpop.gremlin.server.op.AbstractEvalOpProcessor.java
/** * Called by {@link #evalOpInternal} when iterating a result set. Implementers should respect the * {@link Settings#serializedResponseTimeout} configuration and break the serialization process if * it begins to take too long to do so, throwing a {@link java.util.concurrent.TimeoutException} in such * cases./*w w w .ja v a 2s .c o m*/ * * @param context The Gremlin Server {@link Context} object containing settings, request message, etc. * @param itty The result to iterator * @throws TimeoutException if the time taken to serialize the entire result set exceeds the allowable time. */ protected void handleIterator(final Context context, final Iterator itty) throws TimeoutException, InterruptedException { final ChannelHandlerContext ctx = context.getChannelHandlerContext(); final RequestMessage msg = context.getRequestMessage(); final Settings settings = context.getSettings(); final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get(); final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get(); boolean warnOnce = false; // sessionless requests are always transaction managed, but in-session requests are configurable. final boolean managedTransactionsForRequest = manageTransactions ? true : (Boolean) msg.getArgs().getOrDefault(Tokens.ARGS_MANAGE_TRANSACTION, false); // we have an empty iterator - happens on stuff like: g.V().iterate() if (!itty.hasNext()) { // as there is nothing left to iterate if we are transaction managed then we should execute a // commit here before we send back a NO_CONTENT which implies success if (managedTransactionsForRequest) attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement); ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create()); return; } // timer for the total serialization time final StopWatch stopWatch = new StopWatch(); stopWatch.start(); // the batch size can be overridden by the request final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE) .orElse(settings.resultIterationBatchSize); List<Object> aggregate = new ArrayList<>(resultIterationBatchSize); // use an external control to manage the loop as opposed to just checking hasNext() in the while. this // prevent situations where auto transactions create a new transaction after calls to commit() withing // the loop on calls to hasNext(). boolean hasMore = itty.hasNext(); while (hasMore) { if (Thread.interrupted()) throw new InterruptedException(); // have to check the aggregate size because it is possible that the channel is not writeable (below) // so iterating next() if the message is not written and flushed would bump the aggregate size beyond // the expected resultIterationBatchSize. Total serialization time for the response remains in // effect so if the client is "slow" it may simply timeout. if (aggregate.size() < resultIterationBatchSize) aggregate.add(itty.next()); // send back a page of results if batch size is met or if it's the end of the results being iterated. // also check writeability of the channel to prevent OOME for slow clients. if (ctx.channel().isWritable()) { if (aggregate.size() == resultIterationBatchSize || !itty.hasNext()) { final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT : ResponseStatusCode.SUCCESS; // serialize here because in sessionless requests the serialization must occur in the same // thread as the eval. as eval occurs in the GremlinExecutor there's no way to get back to the // thread that processed the eval of the script so, we have to push serialization down into that Frame frame; try { frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code); } catch (Exception ex) { // exception is handled in makeFrame() - serialization error gets written back to driver // at that point if (manageTransactions) attemptRollback(msg, context.getGraphManager(), settings.strictTransactionManagement); break; } // only need to reset the aggregation list if there's more stuff to write if (itty.hasNext()) aggregate = new ArrayList<>(resultIterationBatchSize); else { // iteration and serialization are both complete which means this finished successfully. note that // errors internal to script eval or timeout will rollback given GremlinServer's global configurations. // local errors will get rolledback below because the exceptions aren't thrown in those cases to be // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if // there are no more items to iterate and serialization is complete if (managedTransactionsForRequest) attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement); // exit the result iteration loop as there are no more results left. using this external control // because of the above commit. some graphs may open a new transaction on the call to // hasNext() hasMore = false; } // the flush is called after the commit has potentially occurred. in this way, if a commit was // required then it will be 100% complete before the client receives it. the "frame" at this point // should have completely detached objects from the transaction (i.e. serialization has occurred) // so a new one should not be opened on the flush down the netty pipeline ctx.writeAndFlush(frame); } } else { // don't keep triggering this warning over and over again for the same request if (!warnOnce) { logger.warn( "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up", msg); warnOnce = true; } // since the client is lagging we can hold here for a period of time for the client to catch up. // this isn't blocking the IO thread - just a worker. TimeUnit.MILLISECONDS.sleep(10); } stopWatch.split(); if (stopWatch.getSplitTime() > settings.serializedResponseTimeout) { final String timeoutMsg = String.format( "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s", warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]" : ""); throw new TimeoutException(timeoutMsg.trim()); } stopWatch.unsplit(); } stopWatch.stop(); }
From source file:org.apache.tinkerpop.gremlin.server.op.AbstractOpProcessor.java
/** * Provides a generic way of iterating a result set back to the client. Implementers should respect the * {@link Settings#serializedResponseTimeout} configuration and break the serialization process if * it begins to take too long to do so, throwing a {@link java.util.concurrent.TimeoutException} in such * cases./*from w w w . ja v a2 s . c o m*/ * * @param context The Gremlin Server {@link Context} object containing settings, request message, etc. * @param itty The result to iterator * @throws TimeoutException if the time taken to serialize the entire result set exceeds the allowable time. */ protected void handleIterator(final Context context, final Iterator itty) throws TimeoutException, InterruptedException { final ChannelHandlerContext ctx = context.getChannelHandlerContext(); final RequestMessage msg = context.getRequestMessage(); final Settings settings = context.getSettings(); final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get(); final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get(); boolean warnOnce = false; // sessionless requests are always transaction managed, but in-session requests are configurable. final boolean managedTransactionsForRequest = manageTransactions ? true : (Boolean) msg.getArgs().getOrDefault(Tokens.ARGS_MANAGE_TRANSACTION, false); // we have an empty iterator - happens on stuff like: g.V().iterate() if (!itty.hasNext()) { // as there is nothing left to iterate if we are transaction managed then we should execute a // commit here before we send back a NO_CONTENT which implies success if (managedTransactionsForRequest) attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement); ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create()); return; } // timer for the total serialization time final StopWatch stopWatch = new StopWatch(); stopWatch.start(); // the batch size can be overridden by the request final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE) .orElse(settings.resultIterationBatchSize); List<Object> aggregate = new ArrayList<>(resultIterationBatchSize); // use an external control to manage the loop as opposed to just checking hasNext() in the while. this // prevent situations where auto transactions create a new transaction after calls to commit() withing // the loop on calls to hasNext(). boolean hasMore = itty.hasNext(); while (hasMore) { if (Thread.interrupted()) throw new InterruptedException(); // check if an implementation needs to force flush the aggregated results before the iteration batch // size is reached. final boolean forceFlush = isForceFlushed(ctx, msg, itty); // have to check the aggregate size because it is possible that the channel is not writeable (below) // so iterating next() if the message is not written and flushed would bump the aggregate size beyond // the expected resultIterationBatchSize. Total serialization time for the response remains in // effect so if the client is "slow" it may simply timeout. // // there is a need to check hasNext() on the iterator because if the channel is not writeable the // previous pass through the while loop will have next()'d the iterator and if it is "done" then a // NoSuchElementException will raise its head. also need a check to ensure that this iteration doesn't // require a forced flush which can be forced by sub-classes. // // this could be placed inside the isWriteable() portion of the if-then below but it seems better to // allow iteration to continue into a batch if that is possible rather than just doing nothing at all // while waiting for the client to catch up if (aggregate.size() < resultIterationBatchSize && itty.hasNext() && !forceFlush) aggregate.add(itty.next()); // send back a page of results if batch size is met or if it's the end of the results being iterated. // also check writeability of the channel to prevent OOME for slow clients. if (ctx.channel().isWritable()) { if (forceFlush || aggregate.size() == resultIterationBatchSize || !itty.hasNext()) { final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT : ResponseStatusCode.SUCCESS; // serialize here because in sessionless requests the serialization must occur in the same // thread as the eval. as eval occurs in the GremlinExecutor there's no way to get back to the // thread that processed the eval of the script so, we have to push serialization down into that Frame frame = null; try { frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code, generateMetaData(ctx, msg, code, itty)); } catch (Exception ex) { // a frame may use a Bytebuf which is a countable release - if it does not get written // downstream it needs to be released here if (frame != null) frame.tryRelease(); // exception is handled in makeFrame() - serialization error gets written back to driver // at that point if (managedTransactionsForRequest) attemptRollback(msg, context.getGraphManager(), settings.strictTransactionManagement); break; } // track whether there is anything left in the iterator because it needs to be accessed after // the transaction could be closed - in that case a call to hasNext() could open a new transaction // unintentionally final boolean moreInIterator = itty.hasNext(); try { // only need to reset the aggregation list if there's more stuff to write if (moreInIterator) aggregate = new ArrayList<>(resultIterationBatchSize); else { // iteration and serialization are both complete which means this finished successfully. note that // errors internal to script eval or timeout will rollback given GremlinServer's global configurations. // local errors will get rolledback below because the exceptions aren't thrown in those cases to be // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if // there are no more items to iterate and serialization is complete if (managedTransactionsForRequest) attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement); // exit the result iteration loop as there are no more results left. using this external control // because of the above commit. some graphs may open a new transaction on the call to // hasNext() hasMore = false; } } catch (Exception ex) { // a frame may use a Bytebuf which is a countable release - if it does not get written // downstream it needs to be released here if (frame != null) frame.tryRelease(); throw ex; } if (!moreInIterator) iterateComplete(ctx, msg, itty); // the flush is called after the commit has potentially occurred. in this way, if a commit was // required then it will be 100% complete before the client receives it. the "frame" at this point // should have completely detached objects from the transaction (i.e. serialization has occurred) // so a new one should not be opened on the flush down the netty pipeline ctx.writeAndFlush(frame); } } else { // don't keep triggering this warning over and over again for the same request if (!warnOnce) { logger.warn( "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up", msg); warnOnce = true; } // since the client is lagging we can hold here for a period of time for the client to catch up. // this isn't blocking the IO thread - just a worker. TimeUnit.MILLISECONDS.sleep(10); } stopWatch.split(); if (settings.serializedResponseTimeout > 0 && stopWatch.getSplitTime() > settings.serializedResponseTimeout) { final String timeoutMsg = String.format( "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s", warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]" : ""); throw new TimeoutException(timeoutMsg.trim()); } stopWatch.unsplit(); } stopWatch.stop(); }
From source file:org.apache.tinkerpop.gremlin.server.op.traversal.TraversalOpProcessor.java
protected void handleIterator(final Context context, final Iterator itty, final Graph graph) throws TimeoutException, InterruptedException { final ChannelHandlerContext ctx = context.getChannelHandlerContext(); final RequestMessage msg = context.getRequestMessage(); final Settings settings = context.getSettings(); final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get(); final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get(); boolean warnOnce = false; // we have an empty iterator - happens on stuff like: g.V().iterate() if (!itty.hasNext()) { // as there is nothing left to iterate if we are transaction managed then we should execute a // commit here before we send back a NO_CONTENT which implies success onTraversalSuccess(graph, context); ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create()); return;/*ww w .ja v a 2 s . c o m*/ } // timer for the total serialization time final StopWatch stopWatch = new StopWatch(); stopWatch.start(); // the batch size can be overridden by the request final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE) .orElse(settings.resultIterationBatchSize); List<Object> aggregate = new ArrayList<>(resultIterationBatchSize); // use an external control to manage the loop as opposed to just checking hasNext() in the while. this // prevent situations where auto transactions create a new transaction after calls to commit() withing // the loop on calls to hasNext(). boolean hasMore = itty.hasNext(); while (hasMore) { if (Thread.interrupted()) throw new InterruptedException(); // check if an implementation needs to force flush the aggregated results before the iteration batch // size is reached. final boolean forceFlush = isForceFlushed(ctx, msg, itty); // have to check the aggregate size because it is possible that the channel is not writeable (below) // so iterating next() if the message is not written and flushed would bump the aggregate size beyond // the expected resultIterationBatchSize. Total serialization time for the response remains in // effect so if the client is "slow" it may simply timeout. // // there is a need to check hasNext() on the iterator because if the channel is not writeable the // previous pass through the while loop will have next()'d the iterator and if it is "done" then a // NoSuchElementException will raise its head. also need a check to ensure that this iteration doesn't // require a forced flush which can be forced by sub-classes. // // this could be placed inside the isWriteable() portion of the if-then below but it seems better to // allow iteration to continue into a batch if that is possible rather than just doing nothing at all // while waiting for the client to catch up if (aggregate.size() < resultIterationBatchSize && itty.hasNext() && !forceFlush) aggregate.add(itty.next()); // send back a page of results if batch size is met or if it's the end of the results being iterated. // also check writeability of the channel to prevent OOME for slow clients. if (ctx.channel().isWritable()) { if (forceFlush || aggregate.size() == resultIterationBatchSize || !itty.hasNext()) { final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT : ResponseStatusCode.SUCCESS; // serialize here because in sessionless requests the serialization must occur in the same // thread as the eval. as eval occurs in the GremlinExecutor there's no way to get back to the // thread that processed the eval of the script so, we have to push serialization down into that Frame frame = null; try { frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code, generateMetaData(ctx, msg, code, itty)); } catch (Exception ex) { // a frame may use a Bytebuf which is a countable release - if it does not get written // downstream it needs to be released here if (frame != null) frame.tryRelease(); // exception is handled in makeFrame() - serialization error gets written back to driver // at that point onError(graph, context); break; } try { // only need to reset the aggregation list if there's more stuff to write if (itty.hasNext()) aggregate = new ArrayList<>(resultIterationBatchSize); else { // iteration and serialization are both complete which means this finished successfully. note that // errors internal to script eval or timeout will rollback given GremlinServer's global configurations. // local errors will get rolledback below because the exceptions aren't thrown in those cases to be // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if // there are no more items to iterate and serialization is complete onTraversalSuccess(graph, context); // exit the result iteration loop as there are no more results left. using this external control // because of the above commit. some graphs may open a new transaction on the call to // hasNext() hasMore = false; } } catch (Exception ex) { // a frame may use a Bytebuf which is a countable release - if it does not get written // downstream it needs to be released here if (frame != null) frame.tryRelease(); throw ex; } if (!itty.hasNext()) iterateComplete(ctx, msg, itty); // the flush is called after the commit has potentially occurred. in this way, if a commit was // required then it will be 100% complete before the client receives it. the "frame" at this point // should have completely detached objects from the transaction (i.e. serialization has occurred) // so a new one should not be opened on the flush down the netty pipeline ctx.writeAndFlush(frame); } } else { // don't keep triggering this warning over and over again for the same request if (!warnOnce) { logger.warn( "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up", msg); warnOnce = true; } // since the client is lagging we can hold here for a period of time for the client to catch up. // this isn't blocking the IO thread - just a worker. TimeUnit.MILLISECONDS.sleep(10); } stopWatch.split(); if (settings.serializedResponseTimeout > 0 && stopWatch.getSplitTime() > settings.serializedResponseTimeout) { final String timeoutMsg = String.format( "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s", warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]" : ""); throw new TimeoutException(timeoutMsg.trim()); } stopWatch.unsplit(); } stopWatch.stop(); }