Example usage for com.rabbitmq.client Channel basicAck

List of usage examples for com.rabbitmq.client Channel basicAck

Introduction

In this page you can find the example usage for com.rabbitmq.client Channel basicAck.

Prototype

void basicAck(long deliveryTag, boolean multiple) throws IOException;

Source Link

Document

Acknowledge one or several received messages.

Usage

From source file:info.pancancer.arch3.worker.WorkerRunnable.java

License:Open Source License

@Override
public void run() {

    int max = maxRuns;

    try {//from  w  w  w. j a v  a2s .c om
        // the VM UUID
        log.info(" WORKER VM UUID provided as: '" + vmUuid + "'");
        // write to
        // TODO: Add some sort of "local debug" mode so that developers working on their local
        // workstation can declare the queue if it doesn't exist. Normally, the results queue is
        // created by the Coordinator.
        resultsChannel = Utilities.setupExchange(settings, this.resultsQueueName);

        while (max > 0 || this.endless) {
            log.debug(max + " remaining jobs will be executed");
            log.info(" WORKER IS PREPARING TO PULL JOB FROM QUEUE " + this.jobQueueName);

            if (!endless) {
                max--;
            }

            // jobChannel needs to be created inside the loop because it is closed inside the loop, and it is closed inside this loop to
            // prevent pre-fetching.
            Channel jobChannel = Utilities.setupQueue(settings, this.jobQueueName);
            if (jobChannel == null) {
                throw new NullPointerException("jobChannel is null for queue: " + this.jobQueueName
                        + ". Something bad must have happened while trying to set up the queue connections. Please ensure that your configuration is correct.");
            }
            QueueingConsumer consumer = new QueueingConsumer(jobChannel);
            jobChannel.basicConsume(this.jobQueueName, false, consumer);

            QueueingConsumer.Delivery delivery = consumer.nextDelivery();
            log.info(vmUuid + "  received " + delivery.getEnvelope().toString());
            if (delivery.getBody() != null) {
                String message = new String(delivery.getBody(), StandardCharsets.UTF_8);
                if (message.trim().length() > 0) {

                    log.info(" [x] Received JOBS REQUEST '" + message + "' @ " + vmUuid);

                    Job job = new Job().fromJSON(message);

                    Status status = new Status(vmUuid, job.getUuid(), StatusState.RUNNING,
                            Utilities.JOB_MESSAGE_TYPE, "job is starting", this.networkAddress);
                    status.setStderr("");
                    status.setStdout("");
                    String statusJSON = status.toJSON();

                    log.info(" WORKER LAUNCHING JOB");

                    // greedy acknowledge, it will be easier to deal with lost jobs than zombie workers in hostile OpenStack
                    // environments
                    log.info(vmUuid + " acknowledges " + delivery.getEnvelope().toString());
                    jobChannel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
                    // we need to close the channel IMMEDIATELY to complete the ACK.
                    jobChannel.close();
                    // Close the connection object as well, or the main thread may not exit because of still-open-and-in-use resources.
                    jobChannel.getConnection().close();

                    WorkflowResult workflowResult = new WorkflowResult();
                    if (testMode) {
                        workflowResult.setWorkflowStdout("everything is awesome");
                        workflowResult.setExitCode(0);
                    } else {
                        String seqwareEngine = settings.getString(Constants.WORKER_SEQWARE_ENGINE,
                                Constants.SEQWARE_WHITESTAR_ENGINE);
                        String seqwareSettingsFile = settings.getString(Constants.WORKER_SEQWARE_SETTINGS_FILE);
                        String dockerImage = settings.getString(Constants.WORKER_SEQWARE_DOCKER_IMAGE_NAME);
                        workflowResult = launchJob(statusJSON, job, seqwareEngine, seqwareSettingsFile,
                                dockerImage);
                    }

                    status = new Status(vmUuid, job.getUuid(),
                            workflowResult.getExitCode() == 0 ? StatusState.SUCCESS : StatusState.FAILED,
                            Utilities.JOB_MESSAGE_TYPE, "job is finished", networkAddress);
                    status.setStderr(workflowResult.getWorkflowStdErr());
                    status.setStdout(workflowResult.getWorkflowStdout());
                    statusJSON = status.toJSON();

                    log.info(" WORKER FINISHING JOB");

                    finishJob(statusJSON);
                } else {
                    log.info(NO_MESSAGE_FROM_QUEUE_MESSAGE);
                }
                // we need to close the channel *conditionally*
                if (jobChannel.isOpen()) {
                    jobChannel.close();
                }
                // Close the connection object as well, or the main thread may not exit because of still-open-and-in-use resources.
                if (jobChannel.getConnection().isOpen()) {
                    jobChannel.getConnection().close();
                }
            } else {
                log.info(NO_MESSAGE_FROM_QUEUE_MESSAGE);
            }

            if (endless) {
                log.info("attempting to reset workspace");
                DefaultExecutor executor = new DefaultExecutor();
                DefaultExecuteResultHandler resultHandler = new DefaultExecuteResultHandler();
                // attempt a cleanup
                CommandLine cli = new CommandLine("sudo");
                List<String> args = new ArrayList<>(Arrays.asList("rm", "-rf", "/datastore/*"));
                cli.addArguments(args.toArray(new String[args.size()]));
                executor.execute(cli, resultHandler);
                // Use the result handler for non-blocking call, so this way we should be able to get updates of
                // stdout and stderr while the command is running.
                resultHandler.waitFor();
                log.info("exit code for cleanup: " + resultHandler.getExitValue());
            }
        }
        log.info(" \n\n\nWORKER FOR VM UUID HAS FINISHED!!!: '" + vmUuid + "'\n\n");

        // turns out this is needed when multiple threads are reading from the same
        // queue otherwise you end up with multiple unacknowledged messages being undeliverable to other workers!!!
        if (resultsChannel != null && resultsChannel.isOpen()) {
            resultsChannel.close();
            resultsChannel.getConnection().close();
        }
        log.debug("result channel open: " + resultsChannel.isOpen());
        log.debug("result channel connection open: " + resultsChannel.getConnection().isOpen());
    } catch (Exception ex) {
        log.error(ex.getMessage(), ex);
    }
}

From source file:io.druid.firehose.rabbitmq.RabbitMQFirehoseFactory.java

License:Apache License

@Override
public Firehose connect(StringInputRowParser firehoseParser) throws IOException {
    final StringInputRowParser stringParser = firehoseParser;

    ConnectionOptions lyraOptions = new ConnectionOptions(this.connectionFactory);
    Config lyraConfig = new Config().withRecoveryPolicy(new RetryPolicy().withMaxRetries(config.getMaxRetries())
            .withRetryInterval(Duration.seconds(config.getRetryIntervalSeconds()))
            .withMaxDuration(Duration.seconds(config.getMaxDurationSeconds())));

    String queue = config.getQueue();
    String exchange = config.getExchange();
    String routingKey = config.getRoutingKey();

    boolean durable = config.isDurable();
    boolean exclusive = config.isExclusive();
    boolean autoDelete = config.isAutoDelete();

    final Connection connection = Connections.create(lyraOptions, lyraConfig);

    connection.addShutdownListener(new ShutdownListener() {
        @Override/*from w w w  .  j a v a  2s  .  c  o  m*/
        public void shutdownCompleted(ShutdownSignalException cause) {
            log.warn(cause, "Connection closed!");
        }
    });

    final Channel channel = connection.createChannel();
    channel.queueDeclare(queue, durable, exclusive, autoDelete, null);
    channel.queueBind(queue, exchange, routingKey);
    channel.addShutdownListener(new ShutdownListener() {
        @Override
        public void shutdownCompleted(ShutdownSignalException cause) {
            log.warn(cause, "Channel closed!");
        }
    });

    // We create a QueueingConsumer that will not auto-acknowledge messages since that
    // happens on commit().
    final QueueingConsumer consumer = new QueueingConsumer(channel);
    channel.basicConsume(queue, false, consumer);

    return new Firehose() {
        /**
         * Storing the latest delivery as a member variable should be safe since this will only be run
         * by a single thread.
         */
        private Delivery delivery;

        /**
         * Store the latest delivery tag to be able to commit (acknowledge) the message delivery up to
         * and including this tag. See commit() for more detail.
         */
        private long lastDeliveryTag;

        @Override
        public boolean hasMore() {
            delivery = null;
            try {
                // Wait for the next delivery. This will block until something is available.
                delivery = consumer.nextDelivery();
                if (delivery != null) {
                    lastDeliveryTag = delivery.getEnvelope().getDeliveryTag();
                    // If delivery is non-null, we report that there is something more to process.
                    return true;
                }
            } catch (InterruptedException e) {
                // A little unclear on how we should handle this.

                // At any rate, we're in an unknown state now so let's log something and return false.
                log.wtf(e, "Got interrupted while waiting for next delivery. Doubt this should ever happen.");
            }

            // This means that delivery is null or we caught the exception above so we report that we have
            // nothing more to process.
            return false;
        }

        @Override
        public InputRow nextRow() {
            if (delivery == null) {
                //Just making sure.
                log.wtf("I have nothing in delivery. Method hasMore() should have returned false.");
                return null;
            }

            return stringParser.parse(StringUtils.fromUtf8(delivery.getBody()));
        }

        @Override
        public Runnable commit() {
            // This method will be called from the same thread that calls the other methods of
            // this Firehose. However, the returned Runnable will be called by a different thread.
            //
            // It should be (thread) safe to copy the lastDeliveryTag like we do below and then
            // acknowledge values up to and including that value.
            return new Runnable() {
                // Store (copy) the last delivery tag to "become" thread safe.
                final long deliveryTag = lastDeliveryTag;

                @Override
                public void run() {
                    try {
                        log.info("Acknowledging delivery of messages up to tag: " + deliveryTag);

                        // Acknowledge all messages up to and including the stored delivery tag.
                        channel.basicAck(deliveryTag, true);
                    } catch (IOException e) {
                        log.error(e, "Unable to acknowledge message reception to message queue.");
                    }
                }
            };
        }

        @Override
        public void close() throws IOException {
            log.info("Closing connection to RabbitMQ");
            channel.close();
            connection.close();
        }
    };
}

From source file:io.druid.segment.realtime.firehose.RabbitMQFirehoseFactory.java

License:Open Source License

@Override
public Firehose connect() throws IOException {
    String queue = config.getQueue();
    String exchange = config.getExchange();
    String routingKey = config.getRoutingKey();

    boolean durable = config.isDurable();
    boolean exclusive = config.isExclusive();
    boolean autoDelete = config.isAutoDelete();

    final Connection connection = connectionFactory.newConnection();
    connection.addShutdownListener(new ShutdownListener() {
        @Override/* w w w.j a  v a  2s .  co m*/
        public void shutdownCompleted(ShutdownSignalException cause) {
            log.warn(cause, "Connection closed!");
            //FUTURE: we could try to re-establish the connection here. Not done in this version though.
        }
    });

    final Channel channel = connection.createChannel();
    channel.queueDeclare(queue, durable, exclusive, autoDelete, null);
    channel.queueBind(queue, exchange, routingKey);
    channel.addShutdownListener(new ShutdownListener() {
        @Override
        public void shutdownCompleted(ShutdownSignalException cause) {
            log.warn(cause, "Channel closed!");
            //FUTURE: we could try to re-establish the connection here. Not done in this version though.
        }
    });

    // We create a QueueingConsumer that will not auto-acknowledge messages since that
    // happens on commit().
    final QueueingConsumer consumer = new QueueingConsumer(channel);
    channel.basicConsume(queue, false, consumer);

    return new Firehose() {
        /**
         * Storing the latest delivery as a member variable should be safe since this will only be run
         * by a single thread.
         */
        private QueueingConsumer.Delivery delivery;

        /**
         * Store the latest delivery tag to be able to commit (acknowledge) the message delivery up to
         * and including this tag. See commit() for more detail.
         */
        private long lastDeliveryTag;

        @Override
        public boolean hasMore() {
            delivery = null;
            try {
                // Wait for the next delivery. This will block until something is available.
                delivery = consumer.nextDelivery();
                if (delivery != null) {
                    lastDeliveryTag = delivery.getEnvelope().getDeliveryTag();
                    // If delivery is non-null, we report that there is something more to process.
                    return true;
                }
            } catch (InterruptedException e) {
                // A little unclear on how we should handle this.

                // At any rate, we're in an unknown state now so let's log something and return false.
                log.wtf(e, "Got interrupted while waiting for next delivery. Doubt this should ever happen.");
            }

            // This means that delivery is null or we caught the exception above so we report that we have
            // nothing more to process.
            return false;
        }

        @Override
        public InputRow nextRow() {
            if (delivery == null) {
                //Just making sure.
                log.wtf("I have nothing in delivery. Method hasMore() should have returned false.");
                return null;
            }

            return parser.parse(new String(delivery.getBody()));
        }

        @Override
        public Runnable commit() {
            // This method will be called from the same thread that calls the other methods of
            // this Firehose. However, the returned Runnable will be called by a different thread.
            //
            // It should be (thread) safe to copy the lastDeliveryTag like we do below and then
            // acknowledge values up to and including that value.
            return new Runnable() {
                // Store (copy) the last delivery tag to "become" thread safe.
                final long deliveryTag = lastDeliveryTag;

                @Override
                public void run() {
                    try {
                        log.info("Acknowledging delivery of messages up to tag: " + deliveryTag);

                        // Acknowledge all messages up to and including the stored delivery tag.
                        channel.basicAck(deliveryTag, true);
                    } catch (IOException e) {
                        log.error(e, "Unable to acknowledge message reception to message queue.");
                    }
                }
            };
        }

        @Override
        public void close() throws IOException {
            log.info("Closing connection to RabbitMQ");
            channel.close();
            connection.close();
        }
    };
}

From source file:io.qdb.server.input.RabbitMQInputHandler.java

License:Apache License

@Override
public void start(final Sink sink) throws Exception {
    this.sink = sink;
    final Channel c = ensureChannel();
    c.basicConsume(queue, autoAck, "qdb:" + inputPath, new DefaultConsumer(c) {
        @Override//from   ww w. j av  a 2 s . co  m
        public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties,
                byte[] body) throws IOException {
            boolean ok = false;
            try {
                sink.append(envelope.getRoutingKey(), body);
                if (!autoAck)
                    c.basicAck(envelope.getDeliveryTag(), false);
                ok = true;
            } catch (Exception e) {
                sink.error(e);
            } finally {
                if (!ok) {
                    try {
                        // todo should probably sit on the message for a bit before nacking to prevent storm
                        c.basicNack(envelope.getDeliveryTag(), false, true);
                    } catch (IOException e) {
                        log.debug("Error nacking message: " + e, e);
                    }
                }
            }
        }
    });
}

From source file:javarpc_server.JavaRPC_Server.java

/**
 * @param args the command line arguments
 * @throws java.io.IOException//from  w w  w.j  ava  2 s .c  om
 * @throws java.lang.InterruptedException
 */
public static void main(String[] args) throws IOException, InterruptedException {
    // TODO code application logic here

    ConnectionFactory factory = new ConnectionFactory();
    System.out.println(factory.getUsername() + " " + factory.getPassword());
    factory.setHost("localhost");

    Connection connection = factory.newConnection();
    Channel channel = connection.createChannel();

    channel.queueDeclare(RPC_QUEUE_NAME, false, false, false, null);

    channel.basicQos(1);

    QueueingConsumer consumer = new QueueingConsumer(channel);
    channel.basicConsume(RPC_QUEUE_NAME, false, consumer);

    System.out.println(" [x] Awaiting RPC requests");

    while (true) {
        QueueingConsumer.Delivery delivery = consumer.nextDelivery();

        BasicProperties props = delivery.getProperties();
        BasicProperties replyProps = new BasicProperties.Builder().correlationId(props.getCorrelationId())
                .build();

        String message = new String(delivery.getBody());

        System.out.println(" [.] convert(" + message + ")");
        String response = "" + convert(message);

        channel.basicPublish("", props.getReplyTo(), replyProps, response.getBytes());

        channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
    }
}

From source file:localdomain.localhost.RabbitMQClient.java

License:Apache License

@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {

    try {//from   w w  w. j av  a2 s  . com
        ConnectionFactory factory = new ConnectionFactory();
        String messages = new String();

        String uri = System.getProperty("CLOUDAMQP_URL");
        factory.setUri(uri);

        Connection connection = factory.newConnection();
        Channel channel = connection.createChannel();

        channel.queueDeclare(QUEUE_NAME, true, false, false, null);

        QueueingConsumer consumer = new QueueingConsumer(channel);

        boolean autoACK = false;
        channel.basicConsume(QUEUE_NAME, autoACK, consumer);

        System.out.println(" [*] Waiting 100ms for a message");
        QueueingConsumer.Delivery delivery = consumer.nextDelivery(100);

        while (delivery != null) {
            String message = new String(delivery.getBody());

            System.out.println(" [x] Received '" + message + "'");

            channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);

            messages = message + " <br/> " + messages;
            delivery = consumer.nextDelivery(100);
        }
        request.setAttribute("messages", messages);
        request.getRequestDispatcher("/index.jsp").forward(request, response);

        channel.close();
        connection.close();

    } catch (Exception e) {
        request.setAttribute("throwable", e);
        request.getRequestDispatcher("/index.jsp").forward(request, response);
    }

}

From source file:mapas.Mapas.java

public static void main(String[] args) throws Exception {
    ConnectionFactory factory = new ConnectionFactory();
    factory.setHost("localhost");
    factory.setPassword("test");
    factory.setUsername("test");
    final Connection connection = factory.newConnection();

    final Channel channel = connection.createChannel();

    channel.queueDeclare(TASK_QUEUE_NAME, true, false, false, null);
    System.out.println(" [*] Waiting for messages. To exit press CTRL+C");

    channel.basicQos(1);//from   ww  w  .  j  av a2s.com

    final Consumer consumer = new DefaultConsumer(channel) {
        @Override
        public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties,
                byte[] body) throws IOException {
            String message = new String(body, "UTF-8");

            System.out.println(" [x] Received '" + message + "'");
            try {
                doWork(message);
            } catch (Exception e) {
                System.out.println(e.getMessage());
            }
            System.out.println(" [x] Done");
            channel.basicAck(envelope.getDeliveryTag(), false);

        }
    };
    channel.basicConsume(TASK_QUEUE_NAME, false, consumer);
}

From source file:ms.dew.core.cluster.spi.rabbit.RabbitClusterMQ.java

License:Apache License

private DefaultConsumer getDefaultConsumer(Channel channel, String flag, String exchange, String routingKey,
        String queueName, Consumer<String> consumer) {
    return new DefaultConsumer(channel) {
        @Override/* w w w  .  j  ava2 s . c  o  m*/
        public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties,
                byte[] body) throws IOException {
            setMQHeader(flag, properties.getHeaders());
            String message = new String(body, StandardCharsets.UTF_8);
            Object funResult = receiveBeforeFun.invoke(exchange, routingKey, queueName, properties);
            try {
                consumer.accept(message);
            } catch (RuntimeException e) {
                receiveErrorFun.invoke(e, funResult);
                throw e;
            } finally {
                channel.basicAck(envelope.getDeliveryTag(), false);
                receiveFinishFun.invoke(funResult);
            }
        }
    };
}

From source file:mx.bigdata.utils.amqp.AMQPClientHelperImpl.java

License:Apache License

public byte[] getBodyAndAck(Channel channel, QueueingConsumer consumer) throws Exception {
    QueueingConsumer.Delivery delivery = consumer.nextDelivery();
    byte[] body = delivery.getBody();
    long deliveryTag = delivery.getEnvelope().getDeliveryTag();
    channel.basicAck(deliveryTag, true);
    return body;/*from   w ww  .  ja  va 2 s  .c  o m*/
}

From source file:mx.bigdata.utils.amqp.AMQPClientHelperImpl.java

License:Apache License

public void ack(Channel channel, QueueingConsumer.Delivery delivery) throws Exception {
    long deliveryTag = delivery.getEnvelope().getDeliveryTag();
    channel.basicAck(deliveryTag, true);
}