Example usage for java.util.concurrent TimeoutException TimeoutException

List of usage examples for java.util.concurrent TimeoutException TimeoutException

Introduction

In this page you can find the example usage for java.util.concurrent TimeoutException TimeoutException.

Prototype

public TimeoutException(String message) 

Source Link

Document

Constructs a TimeoutException with the specified detail message.

Usage

From source file:org.apache.hadoop.hbase.client.TestFastFailWithoutTestUtil.java

@Test
public void testExceptionsIdentifiedByInterceptor() throws IOException {
    Throwable[] networkexceptions = new Throwable[] { new ConnectException("Mary is unwell"),
            new SocketTimeoutException("Mike is too late"), new ClosedChannelException(),
            new SyncFailedException("Dave is not on the same page"), new TimeoutException("Mike is late again"),
            new EOFException("This is the end... "), new ConnectionClosingException("Its closing") };
    final String INDUCED = "Induced";
    Throwable[] nonNetworkExceptions = new Throwable[] { new IOException("Bob died"),
            new RemoteException("Bob's cousin died", null), new NoSuchMethodError(INDUCED),
            new NullPointerException(INDUCED), new DoNotRetryIOException(INDUCED), new Error(INDUCED) };

    Configuration conf = HBaseConfiguration.create();
    long CLEANUP_TIMEOUT = 0;
    long FAST_FAIL_THRESHOLD = 1000000;
    conf.setBoolean(HConstants.HBASE_CLIENT_FAST_FAIL_MODE_ENABLED, true);
    conf.setLong(HConstants.HBASE_CLIENT_FAST_FAIL_CLEANUP_MS_DURATION_MS, CLEANUP_TIMEOUT);
    conf.setLong(HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS, FAST_FAIL_THRESHOLD);
    for (Throwable e : networkexceptions) {
        PreemptiveFastFailInterceptor interceptor = TestFastFailWithoutTestUtil
                .createPreemptiveInterceptor(conf);
        FastFailInterceptorContext context = (FastFailInterceptorContext) interceptor.createEmptyContext();

        RetryingCallable<?> callable = getDummyRetryingCallable(getSomeServerName());
        context.prepare(callable, 0);// w  w w.ja  va  2  s.c  om
        interceptor.intercept(context);
        interceptor.handleFailure(context, e);
        interceptor.updateFailureInfo(context);
        assertTrue("The call shouldn't have been successful if there was a ConnectException",
                context.getCouldNotCommunicateWithServer().booleanValue());
    }
    for (Throwable e : nonNetworkExceptions) {
        try {
            PreemptiveFastFailInterceptor interceptor = TestFastFailWithoutTestUtil
                    .createPreemptiveInterceptor(conf);
            FastFailInterceptorContext context = (FastFailInterceptorContext) interceptor.createEmptyContext();

            RetryingCallable<?> callable = getDummyRetryingCallable(getSomeServerName());
            context.prepare(callable, 0);
            interceptor.intercept(context);
            interceptor.handleFailure(context, e);
            interceptor.updateFailureInfo(context);
            assertFalse("The call shouldn't have been successful if there was a ConnectException",
                    context.getCouldNotCommunicateWithServer().booleanValue());
        } catch (NoSuchMethodError t) {
            assertTrue("Exception not induced", t.getMessage().contains(INDUCED));
        } catch (NullPointerException t) {
            assertTrue("Exception not induced", t.getMessage().contains(INDUCED));
        } catch (DoNotRetryIOException t) {
            assertTrue("Exception not induced", t.getMessage().contains(INDUCED));
        } catch (Error t) {
            assertTrue("Exception not induced", t.getMessage().contains(INDUCED));
        }
    }
}

From source file:com.reactivetechnologies.analytics.core.IncrementalClassifierBean.java

@Override
public void incrementModel(Dataset nextInstance) throws Exception {
    boolean b = queue.offer(nextInstance, 5, TimeUnit.SECONDS);
    if (!b)/*ww  w. ja v a2s  . c o m*/
        throw new TimeoutException("Unable to offer model even after waiting for 5 secs");
}

From source file:org.alfresco.repo.content.transform.AbstractContentTransformer2.java

/**
 * @see org.alfresco.repo.content.transform.ContentTransformer#transform(org.alfresco.service.cmr.repository.ContentReader, org.alfresco.service.cmr.repository.ContentWriter, org.alfresco.service.cmr.repository.TransformationOptions)
 *//*from   w w  w.ja v a  2  s . com*/
public final void transform(ContentReader reader, ContentWriter writer, TransformationOptions options)
        throws ContentIOException {
    try {
        depth.set(depth.get() + 1);

        // begin timing
        long before = System.currentTimeMillis();

        String sourceMimetype = reader.getMimetype();
        String targetMimetype = writer.getMimetype();

        // check options map
        if (options == null) {
            options = new TransformationOptions();
        }

        try {
            if (transformerDebug.isEnabled()) {
                transformerDebug.pushTransform(this, reader.getContentUrl(), sourceMimetype, targetMimetype,
                        reader.getSize(), options);
            }

            // MNT-16381: check the mimetype of the file supplied by the user
            // matches the sourceMimetype of the reader. Intermediate files are
            // not checked.
            strictMimetypeCheck(reader, options, sourceMimetype);

            // Check the transformability
            checkTransformable(reader, writer, options);

            // Pass on any limits to the reader
            setReaderLimits(reader, writer, options);

            // Transform
            // MNT-12238: CLONE - CLONE - Upload of PPTX causes very high memory usage leading to system instability
            // Limiting transformation up to configured amount of milliseconds to avoid very high RAM consumption
            // and OOM during transforming problematic documents
            TransformationOptionLimits limits = getLimits(reader.getMimetype(), writer.getMimetype(), options);

            long timeoutMs = limits.getTimeoutMs();
            if (!useTimeoutThread || (null == limits) || (-1 == timeoutMs)) {
                transformInternal(reader, writer, options);
            } else {
                Future<?> submittedTask = null;
                StreamAwareContentReaderProxy proxiedReader = new StreamAwareContentReaderProxy(reader);
                StreamAwareContentWriterProxy proxiedWriter = new StreamAwareContentWriterProxy(writer);

                try {
                    submittedTask = getExecutorService()
                            .submit(new TransformInternalCallable(proxiedReader, proxiedWriter, options));
                    submittedTask.get(timeoutMs + additionalThreadTimout, TimeUnit.MILLISECONDS);
                } catch (TimeoutException e) {
                    releaseResources(submittedTask, proxiedReader, proxiedWriter);
                    throw new TimeoutException("Transformation failed due to timeout limit");
                } catch (InterruptedException e) {
                    releaseResources(submittedTask, proxiedReader, proxiedWriter);
                    throw new InterruptedException(
                            "Transformation failed, because the thread of the transformation was interrupted");
                } catch (ExecutionException e) {
                    Throwable cause = e.getCause();
                    if (cause instanceof TransformInternalCallableException) {
                        cause = ((TransformInternalCallableException) cause).getCause();
                    }

                    throw cause;
                }
            }

            // record time
            long after = System.currentTimeMillis();
            recordTime(sourceMimetype, targetMimetype, after - before);
        } catch (ContentServiceTransientException cste) {
            // A transient failure has occurred within the content transformer.
            // This should not be interpreted as a failure and therefore we should not
            // update the transformer's average time.
            if (logger.isDebugEnabled()) {
                logger.debug("Transformation has been transiently declined: \n" + "   reader: " + reader + "\n"
                        + "   writer: " + writer + "\n" + "   options: " + options + "\n" + "   transformer: "
                        + this);
            }
            // the finally block below will still perform tidyup. Otherwise we're done.
            // We rethrow the exception
            throw cste;
        } catch (UnsupportedTransformationException e) {
            // Don't record an error or even the time, as this is normal in compound transformations.
            transformerDebug.debug("          Failed", e);
            throw e;
        } catch (Throwable e) {
            // Make sure that this transformation gets set back i.t.o. time taken.
            // This will ensure that transformers that compete for the same transformation
            // will be prejudiced against transformers that tend to fail
            long after = System.currentTimeMillis();
            recordError(sourceMimetype, targetMimetype, after - before);

            // Ask Tika to detect the document, and report back on if
            //  the current mime type is plausible
            String differentType = getMimetypeService().getMimetypeIfNotMatches(reader.getReader());

            // Report the error
            if (differentType == null) {
                transformerDebug.debug("          Failed", e);
                throw new ContentIOException("Content conversion failed: \n" + "   reader: " + reader + "\n"
                        + "   writer: " + writer + "\n" + "   options: " + options.toString(false) + "\n"
                        + "   limits: " + getLimits(reader, writer, options), e);
            } else {
                transformerDebug.debug("          Failed: Mime type was '" + differentType + "'", e);

                if (retryTransformOnDifferentMimeType) {
                    // MNT-11015 fix.
                    // Set a new reader to refresh the input stream.
                    reader = reader.getReader();
                    // set the actual file MIME type detected by Tika for content reader
                    reader.setMimetype(differentType);

                    // Get correct transformer according actual file MIME type and try to transform file with
                    // actual transformer
                    ContentTransformer transformer = this.registry.getTransformer(differentType,
                            reader.getSize(), targetMimetype, options);
                    if (null != transformer) {
                        transformer.transform(reader, writer, options);
                    } else {
                        transformerDebug.debug("          Failed", e);
                        throw new ContentIOException("Content conversion failed: \n" + "   reader: " + reader
                                + "\n" + "   writer: " + writer + "\n" + "   options: "
                                + options.toString(false) + "\n" + "   limits: "
                                + getLimits(reader, writer, options) + "\n" + "   claimed mime type: "
                                + reader.getMimetype() + "\n" + "   detected mime type: " + differentType + "\n"
                                + "   transformer not found" + "\n", e);
                    }
                } else {
                    throw new ContentIOException("Content conversion failed: \n" + "   reader: " + reader + "\n"
                            + "   writer: " + writer + "\n" + "   options: " + options.toString(false) + "\n"
                            + "   limits: " + getLimits(reader, writer, options) + "\n"
                            + "   claimed mime type: " + reader.getMimetype() + "\n" + "   detected mime type: "
                            + differentType, e);
                }
            }
        } finally {
            transformerDebug.popTransform();

            // check that the reader and writer are both closed
            if (reader.isChannelOpen()) {
                logger.error("Content reader not closed by transformer: \n" + "   reader: " + reader + "\n"
                        + "   transformer: " + this);
            }
            if (writer.isChannelOpen()) {
                logger.error("Content writer not closed by transformer: \n" + "   writer: " + writer + "\n"
                        + "   transformer: " + this);
            }
        }

        // done
        if (logger.isDebugEnabled()) {
            logger.debug("Completed transformation: \n" + "   reader: " + reader + "\n" + "   writer: " + writer
                    + "\n" + "   options: " + options + "\n" + "   transformer: " + this);
        }
    } finally {
        depth.set(depth.get() - 1);
    }
}

From source file:com.spotify.helios.client.DefaultRequestDispatcher.java

/**
 * Sets up a connection, retrying on connect failure.
 *///from w  w  w. jav  a2  s  . c om
private HttpURLConnection connect(final URI uri, final String method, final byte[] entity,
        final Map<String, List<String>> headers)
        throws URISyntaxException, IOException, TimeoutException, InterruptedException, HeliosException {
    final long deadline = currentTimeMillis() + RETRY_TIMEOUT_MILLIS;
    final int offset = ThreadLocalRandom.current().nextInt();

    while (currentTimeMillis() < deadline) {
        final List<URI> endpoints = endpointSupplier.get();
        if (endpoints.isEmpty()) {
            throw new RuntimeException("failed to resolve master");
        }
        log.debug("endpoint uris are {}", endpoints);

        // Resolve hostname into IPs so client will round-robin and retry for multiple A records.
        // Keep a mapping of IPs to hostnames for TLS verification.
        final List<URI> ipEndpoints = Lists.newArrayList();
        final Map<URI, URI> ipToHostnameUris = Maps.newHashMap();

        for (final URI hnUri : endpoints) {
            try {
                final InetAddress[] ips = InetAddress.getAllByName(hnUri.getHost());
                for (final InetAddress ip : ips) {
                    final URI ipUri = new URI(hnUri.getScheme(), hnUri.getUserInfo(), ip.getHostAddress(),
                            hnUri.getPort(), hnUri.getPath(), hnUri.getQuery(), hnUri.getFragment());
                    ipEndpoints.add(ipUri);
                    ipToHostnameUris.put(ipUri, hnUri);
                }
            } catch (UnknownHostException e) {
                log.warn("Unable to resolve hostname {} into IP address: {}", hnUri.getHost(), e);
            }
        }

        for (int i = 0; i < ipEndpoints.size() && currentTimeMillis() < deadline; i++) {
            final URI ipEndpoint = ipEndpoints.get(positive(offset + i) % ipEndpoints.size());
            final String fullpath = ipEndpoint.getPath() + uri.getPath();

            final String scheme = ipEndpoint.getScheme();
            final String host = ipEndpoint.getHost();
            final int port = ipEndpoint.getPort();
            if (!VALID_PROTOCOLS.contains(scheme) || host == null || port == -1) {
                throw new HeliosException(String.format(
                        "Master endpoints must be of the form \"%s://heliosmaster.domain.net:<port>\"",
                        VALID_PROTOCOLS_STR));
            }

            final URI realUri = new URI(scheme, host + ":" + port, fullpath, uri.getQuery(), null);

            AgentProxy agentProxy = null;
            Deque<Identity> identities = Queues.newArrayDeque();
            try {
                if (scheme.equals("https")) {
                    agentProxy = AgentProxies.newInstance();
                    for (final Identity identity : agentProxy.list()) {
                        if (identity.getPublicKey().getAlgorithm().equals("RSA")) {
                            // only RSA keys will work with our TLS implementation
                            identities.offerLast(identity);
                        }
                    }
                }
            } catch (Exception e) {
                log.warn("Couldn't get identities from ssh-agent", e);
            }

            try {
                do {
                    final Identity identity = identities.poll();

                    try {
                        log.debug("connecting to {}", realUri);

                        final HttpURLConnection connection = connect0(realUri, method, entity, headers,
                                ipToHostnameUris.get(ipEndpoint).getHost(), agentProxy, identity);

                        final int responseCode = connection.getResponseCode();
                        if (((responseCode == HTTP_FORBIDDEN) || (responseCode == HTTP_UNAUTHORIZED))
                                && !identities.isEmpty()) {
                            // there was some sort of security error. if we have any more SSH identities to try,
                            // retry with the next available identity
                            log.debug("retrying with next SSH identity since {} failed", identity.getComment());
                            continue;
                        }

                        return connection;
                    } catch (ConnectException | SocketTimeoutException | UnknownHostException e) {
                        // UnknownHostException happens if we can't resolve hostname into IP address.
                        // UnknownHostException's getMessage method returns just the hostname which is a
                        // useless message, so log the exception class name to provide more info.
                        log.debug(e.toString());
                        // Connecting failed, sleep a bit to avoid hammering and then try another endpoint
                        Thread.sleep(200);
                    }
                } while (false);
            } finally {
                if (agentProxy != null) {
                    agentProxy.close();
                }
            }
        }
        log.warn("Failed to connect, retrying in 5 seconds.");
        Thread.sleep(5000);
    }
    throw new TimeoutException("Timed out connecting to master");
}

From source file:no.ntnu.osnap.com.Protocol.java

/**
 * Requests the value of the specified sensor on the remote device. This method is blocking
 * until a Timeout happens. What kind of Sensor the remote device supports and how they are
 * implemented (and what integer value each sensor represents) is defined on the remote device
 * firmware.//w  w w .ja  va  2 s  .c  om
 * @param sensor which pin to get the value from
 * @return the value of the specified sensor
 * @throws TimeoutException if the remote device used too long time to respond
 * @see The ConnectionMetadata class to retrieve services such as sensor types the remote device supports
 */
public final int sensor(int sensor) throws TimeoutException {
    ProtocolInstruction newInstruction = new ProtocolInstruction(OpCode.SENSOR, (byte) sensor, new byte[1]);

    lock();

    waitingForAck = OpCode.SENSOR;

    try {
        sendBytes(newInstruction.getInstructionBytes());
    } catch (IOException ex) {
        System.out.println("Send fail");
    }

    release();

    long time = System.currentTimeMillis();
    while (waitingForAck != null) {
        if (System.currentTimeMillis() - time > TIMEOUT) {
            waitingForAck = null;
            throw new TimeoutException("Timeout");
        }
        try {
            Thread.sleep(100);
        } catch (InterruptedException ex) {
        }
    }

    byte content[] = currentCommand.getContent();

    ackProcessingComplete();

    int sensorValue = (content[0] << 8) + toUnsigned(content[1]);

    return sensorValue;
}

From source file:org.apache.axis2.jaxws.client.async.AsyncResponse.java

public Object get(long timeout, TimeUnit unit)
        throws InterruptedException, ExecutionException, TimeoutException {
    if (cancelled) {
        throw new CancellationException(Messages.getMessage("getErr"));
    }/*w w w  .  ja v a 2 s . com*/

    // Wait for the response to come back
    if (log.isDebugEnabled()) {
        log.debug("Waiting for async response delivery with time out.");
        log.debug("timeout = " + timeout);
        log.debug("units   = " + unit);
    }

    // latch.await will only block if its count is > 0
    latch.await(timeout, unit);

    if (savedException != null) {
        throw savedException;
    }

    // If the response still hasn't been returned, then we've timed out
    // and must throw a TimeoutException
    if (latch.getCount() > 0) {
        throw new TimeoutException(Messages.getMessage("getErr1"));
    }

    return responseObject;
}

From source file:org.apache.marmotta.platform.sparql.services.sparql.SparqlServiceImpl.java

@Override
@Deprecated//from  w w w  . j  a v a2  s  . co m
public void query(final QueryLanguage queryLanguage, final String query, final QueryResultWriter writer,
        final int timeoutInSeconds)
        throws MarmottaException, MalformedQueryException, QueryEvaluationException, TimeoutException {
    log.debug("executing SPARQL query:\n{}", query);
    Future<Boolean> future = executorService.submit(new Callable<Boolean>() {
        @Override
        public Boolean call() throws Exception {
            long start = System.currentTimeMillis();
            try {
                RepositoryConnection connection = sesameService.getConnection();
                try {
                    connection.begin();
                    Query sparqlQuery = connection.prepareQuery(queryLanguage, query,
                            configurationService.getBaseUri());

                    if (sparqlQuery instanceof TupleQuery) {
                        query((TupleQuery) sparqlQuery, (TupleQueryResultWriter) writer);
                    } else if (sparqlQuery instanceof BooleanQuery) {
                        query((BooleanQuery) sparqlQuery, (BooleanQueryResultWriter) writer);
                    } else if (sparqlQuery instanceof GraphQuery) {
                        query((GraphQuery) sparqlQuery, ((SPARQLGraphResultWriter) writer).getOutputStream(),
                                ((SPARQLGraphResultWriter) writer).getFormat());
                    } else {
                        connection.rollback();
                        throw new InvalidArgumentException(
                                "SPARQL query type " + sparqlQuery.getClass() + " not supported!");
                    }

                    connection.commit();
                } catch (Exception ex) {
                    connection.rollback();
                    throw ex;
                } finally {
                    connection.close();
                }
            } catch (RepositoryException e) {
                log.error("error while getting repository connection: {}", e);
                throw new MarmottaException("error while getting repository connection", e);
            } catch (QueryEvaluationException e) {
                log.error("error while evaluating query: {}", e.getMessage());
                throw new MarmottaException("error while writing query result in format ", e);
            }

            log.debug("SPARQL execution took {}ms", System.currentTimeMillis() - start);

            return Boolean.TRUE;
        }
    });

    try {
        future.get(timeoutInSeconds, TimeUnit.SECONDS);
    } catch (InterruptedException | TimeoutException e) {
        log.info("SPARQL query execution aborted due to timeout");
        future.cancel(true);
        throw new TimeoutException("SPARQL query execution aborted due to timeout (" + timeoutInSeconds + "s)");
    } catch (ExecutionException e) {
        log.info("SPARQL query execution aborted due to exception");
        log.debug("exception details", e);
        if (e.getCause() instanceof MarmottaException) {
            throw (MarmottaException) e.getCause();
        } else if (e.getCause() instanceof MalformedQueryException) {
            throw (MalformedQueryException) e.getCause();
        } else {
            throw new MarmottaException("unknown exception while evaluating SPARQL query", e.getCause());
        }
    }
}

From source file:pt.lsts.neptus.comm.iridium.RockBlockIridiumMessenger.java

public static Future<Boolean> rockBlockIsReachable() {
    return new Future<Boolean>() {
        Boolean result = null;//from w  ww . ja va  2s .co m
        boolean canceled = false;
        long start = System.currentTimeMillis();
        {

            if (System.currentTimeMillis() - lastSuccess < 15000) {
                result = true;
            }

            try {
                URL url = new URL("https://secure.rock7mobile.com/rockblock");
                int len = url.openConnection().getContentLength();
                if (len > 0)
                    lastSuccess = System.currentTimeMillis();
                result = len > 0;
            } catch (Exception e) {
                NeptusLog.pub().error(e);
                result = false;
            }
        }

        @Override
        public Boolean get() throws InterruptedException, ExecutionException {
            while (result == null) {
                Thread.sleep(100);
            }
            return result;
        }

        @Override
        public boolean cancel(boolean mayInterruptIfRunning) {
            canceled = true;
            return false;
        }

        @Override
        public Boolean get(long timeout, TimeUnit unit)
                throws InterruptedException, ExecutionException, TimeoutException {
            while (result == null) {
                Thread.sleep(100);
                if (System.currentTimeMillis() - start > unit.toMillis(timeout))
                    throw new TimeoutException("Time out while connecting");
            }
            return result;
        }

        @Override
        public boolean isCancelled() {
            return canceled;
        }

        @Override
        public boolean isDone() {
            return result != null;
        }
    };
}

From source file:org.springframework.amqp.rabbit.core.RabbitAdminTests.java

@Test
public void testIgnoreDeclarationExeptionsTimeout() throws Exception {
    com.rabbitmq.client.ConnectionFactory rabbitConnectionFactory = mock(
            com.rabbitmq.client.ConnectionFactory.class);
    TimeoutException toBeThrown = new TimeoutException("test");
    doThrow(toBeThrown).when(rabbitConnectionFactory).newConnection(any(ExecutorService.class), anyString());
    CachingConnectionFactory ccf = new CachingConnectionFactory(rabbitConnectionFactory);
    RabbitAdmin admin = new RabbitAdmin(ccf);
    List<DeclarationExceptionEvent> events = new ArrayList<DeclarationExceptionEvent>();
    admin.setApplicationEventPublisher(new EventPublisher(events));
    admin.setIgnoreDeclarationExceptions(true);
    admin.declareQueue(new AnonymousQueue());
    admin.declareQueue();/*from   w  w w.j a va2s . c o  m*/
    admin.declareExchange(new DirectExchange("foo"));
    admin.declareBinding(new Binding("foo", DestinationType.QUEUE, "bar", "baz", null));
    assertThat(events.size(), equalTo(4));
    assertThat(events.get(0).getSource(), sameInstance(admin));
    assertThat(events.get(0).getDeclarable(), instanceOf(AnonymousQueue.class));
    assertSame(toBeThrown, events.get(0).getThrowable().getCause());
    assertNull(events.get(1).getDeclarable());
    assertSame(toBeThrown, events.get(1).getThrowable().getCause());
    assertThat(events.get(2).getDeclarable(), instanceOf(DirectExchange.class));
    assertSame(toBeThrown, events.get(2).getThrowable().getCause());
    assertThat(events.get(3).getDeclarable(), instanceOf(Binding.class));
    assertSame(toBeThrown, events.get(3).getThrowable().getCause());

    assertSame(events.get(3), admin.getLastDeclarationExceptionEvent());
}

From source file:org.apache.tinkerpop.gremlin.server.op.AbstractOpProcessor.java

/**
 * Provides a generic way of iterating a result set back to the client. Implementers should respect the
 * {@link Settings#serializedResponseTimeout} configuration and break the serialization process if
 * it begins to take too long to do so, throwing a {@link java.util.concurrent.TimeoutException} in such
 * cases./*from  www . j  av  a  2 s  .  c o  m*/
 *
 * @param context The Gremlin Server {@link Context} object containing settings, request message, etc.
 * @param itty The result to iterator
 * @throws TimeoutException if the time taken to serialize the entire result set exceeds the allowable time.
 */
protected void handleIterator(final Context context, final Iterator itty)
        throws TimeoutException, InterruptedException {
    final ChannelHandlerContext ctx = context.getChannelHandlerContext();
    final RequestMessage msg = context.getRequestMessage();
    final Settings settings = context.getSettings();
    final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get();
    final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get();
    boolean warnOnce = false;

    // sessionless requests are always transaction managed, but in-session requests are configurable.
    final boolean managedTransactionsForRequest = manageTransactions ? true
            : (Boolean) msg.getArgs().getOrDefault(Tokens.ARGS_MANAGE_TRANSACTION, false);

    // we have an empty iterator - happens on stuff like: g.V().iterate()
    if (!itty.hasNext()) {
        // as there is nothing left to iterate if we are transaction managed then we should execute a
        // commit here before we send back a NO_CONTENT which implies success
        if (managedTransactionsForRequest)
            attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement);
        ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create());
        return;
    }

    // timer for the total serialization time
    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    // the batch size can be overridden by the request
    final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE)
            .orElse(settings.resultIterationBatchSize);
    List<Object> aggregate = new ArrayList<>(resultIterationBatchSize);

    // use an external control to manage the loop as opposed to just checking hasNext() in the while.  this
    // prevent situations where auto transactions create a new transaction after calls to commit() withing
    // the loop on calls to hasNext().
    boolean hasMore = itty.hasNext();

    while (hasMore) {
        if (Thread.interrupted())
            throw new InterruptedException();

        // check if an implementation needs to force flush the aggregated results before the iteration batch
        // size is reached.
        final boolean forceFlush = isForceFlushed(ctx, msg, itty);

        // have to check the aggregate size because it is possible that the channel is not writeable (below)
        // so iterating next() if the message is not written and flushed would bump the aggregate size beyond
        // the expected resultIterationBatchSize.  Total serialization time for the response remains in
        // effect so if the client is "slow" it may simply timeout.
        //
        // there is a need to check hasNext() on the iterator because if the channel is not writeable the
        // previous pass through the while loop will have next()'d the iterator and if it is "done" then a
        // NoSuchElementException will raise its head. also need a check to ensure that this iteration doesn't
        // require a forced flush which can be forced by sub-classes.
        //
        // this could be placed inside the isWriteable() portion of the if-then below but it seems better to
        // allow iteration to continue into a batch if that is possible rather than just doing nothing at all
        // while waiting for the client to catch up
        if (aggregate.size() < resultIterationBatchSize && itty.hasNext() && !forceFlush)
            aggregate.add(itty.next());

        // send back a page of results if batch size is met or if it's the end of the results being iterated.
        // also check writeability of the channel to prevent OOME for slow clients.
        if (ctx.channel().isWritable()) {
            if (forceFlush || aggregate.size() == resultIterationBatchSize || !itty.hasNext()) {
                final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT
                        : ResponseStatusCode.SUCCESS;

                // serialize here because in sessionless requests the serialization must occur in the same
                // thread as the eval.  as eval occurs in the GremlinExecutor there's no way to get back to the
                // thread that processed the eval of the script so, we have to push serialization down into that
                Frame frame = null;
                try {
                    frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code,
                            generateMetaData(ctx, msg, code, itty));
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();

                    // exception is handled in makeFrame() - serialization error gets written back to driver
                    // at that point
                    if (managedTransactionsForRequest)
                        attemptRollback(msg, context.getGraphManager(), settings.strictTransactionManagement);
                    break;
                }

                // track whether there is anything left in the iterator because it needs to be accessed after
                // the transaction could be closed - in that case a call to hasNext() could open a new transaction
                // unintentionally
                final boolean moreInIterator = itty.hasNext();

                try {
                    // only need to reset the aggregation list if there's more stuff to write
                    if (moreInIterator)
                        aggregate = new ArrayList<>(resultIterationBatchSize);
                    else {
                        // iteration and serialization are both complete which means this finished successfully. note that
                        // errors internal to script eval or timeout will rollback given GremlinServer's global configurations.
                        // local errors will get rolledback below because the exceptions aren't thrown in those cases to be
                        // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if
                        // there are no more items to iterate and serialization is complete
                        if (managedTransactionsForRequest)
                            attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement);

                        // exit the result iteration loop as there are no more results left.  using this external control
                        // because of the above commit.  some graphs may open a new transaction on the call to
                        // hasNext()
                        hasMore = false;
                    }
                } catch (Exception ex) {
                    // a frame may use a Bytebuf which is a countable release - if it does not get written
                    // downstream it needs to be released here
                    if (frame != null)
                        frame.tryRelease();
                    throw ex;
                }

                if (!moreInIterator)
                    iterateComplete(ctx, msg, itty);

                // the flush is called after the commit has potentially occurred.  in this way, if a commit was
                // required then it will be 100% complete before the client receives it. the "frame" at this point
                // should have completely detached objects from the transaction (i.e. serialization has occurred)
                // so a new one should not be opened on the flush down the netty pipeline
                ctx.writeAndFlush(frame);
            }
        } else {
            // don't keep triggering this warning over and over again for the same request
            if (!warnOnce) {
                logger.warn(
                        "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up",
                        msg);
                warnOnce = true;
            }

            // since the client is lagging we can hold here for a period of time for the client to catch up.
            // this isn't blocking the IO thread - just a worker.
            TimeUnit.MILLISECONDS.sleep(10);
        }

        stopWatch.split();
        if (settings.serializedResponseTimeout > 0
                && stopWatch.getSplitTime() > settings.serializedResponseTimeout) {
            final String timeoutMsg = String.format(
                    "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s",
                    warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]"
                            : "");
            throw new TimeoutException(timeoutMsg.trim());
        }

        stopWatch.unsplit();
    }

    stopWatch.stop();
}