List of usage examples for com.rabbitmq.client Channel basicAck
void basicAck(long deliveryTag, boolean multiple) throws IOException;
From source file:org.apache.druid.firehose.rabbitmq.RabbitMQFirehoseFactory.java
License:Apache License
@Override public Firehose connect(final InputRowParser<ByteBuffer> firehoseParser, File temporaryDirectory) throws IOException { ConnectionOptions lyraOptions = new ConnectionOptions(this.connectionFactory); Config lyraConfig = new Config().withRecoveryPolicy(new RetryPolicy().withMaxRetries(config.getMaxRetries()) .withRetryInterval(Duration.seconds(config.getRetryIntervalSeconds())) .withMaxDuration(Duration.seconds(config.getMaxDurationSeconds()))); String queue = config.getQueue(); String exchange = config.getExchange(); String routingKey = config.getRoutingKey(); boolean durable = config.isDurable(); boolean exclusive = config.isExclusive(); boolean autoDelete = config.isAutoDelete(); final Connection connection = Connections.create(lyraOptions, lyraConfig); connection.addShutdownListener(new ShutdownListener() { @Override//from w w w . j av a 2s .com public void shutdownCompleted(ShutdownSignalException cause) { log.warn(cause, "Connection closed!"); } }); final Channel channel = connection.createChannel(); channel.queueDeclare(queue, durable, exclusive, autoDelete, null); channel.queueBind(queue, exchange, routingKey); channel.addShutdownListener(new ShutdownListener() { @Override public void shutdownCompleted(ShutdownSignalException cause) { log.warn(cause, "Channel closed!"); } }); // We create a QueueingConsumer that will not auto-acknowledge messages since that // happens on commit(). final QueueingConsumer consumer = new QueueingConsumer(channel); channel.basicConsume(queue, false, consumer); return new Firehose() { /** * Storing the latest row as a member variable should be safe since this will only be run * by a single thread. */ private InputRow nextRow; /** * Store the latest delivery tag to be able to commit (acknowledge) the message delivery up to * and including this tag. See commit() for more detail. */ private long lastDeliveryTag; private Iterator<InputRow> nextIterator = Collections.emptyIterator(); @Override public boolean hasMore() { nextRow = null; try { if (nextIterator.hasNext()) { nextRow = nextIterator.next(); return true; } // Wait for the next delivery. This will block until something is available. final Delivery delivery = consumer.nextDelivery(); if (delivery != null) { lastDeliveryTag = delivery.getEnvelope().getDeliveryTag(); nextIterator = firehoseParser.parseBatch(ByteBuffer.wrap(delivery.getBody())).iterator(); if (nextIterator.hasNext()) { nextRow = nextIterator.next(); // If delivery is non-null, we report that there is something more to process. return true; } } } catch (InterruptedException e) { // A little unclear on how we should handle this. // At any rate, we're in an unknown state now so let's log something and return false. log.wtf(e, "Got interrupted while waiting for next delivery. Doubt this should ever happen."); } // This means that delivery is null or we caught the exception above so we report that we have // nothing more to process. return false; } @Nullable @Override public InputRow nextRow() { if (nextRow == null) { //Just making sure. log.wtf("I have nothing in delivery. Method hasMore() should have returned false."); return null; } return nextRow; } @Override public Runnable commit() { // This method will be called from the same thread that calls the other methods of // this Firehose. However, the returned Runnable will be called by a different thread. // // It should be (thread) safe to copy the lastDeliveryTag like we do below and then // acknowledge values up to and including that value. return new Runnable() { // Store (copy) the last delivery tag to "become" thread safe. final long deliveryTag = lastDeliveryTag; @Override public void run() { try { log.info("Acknowledging delivery of messages up to tag: " + deliveryTag); // Acknowledge all messages up to and including the stored delivery tag. channel.basicAck(deliveryTag, true); } catch (IOException e) { log.error(e, "Unable to acknowledge message reception to message queue."); } } }; } @Override public void close() throws IOException { log.info("Closing connection to RabbitMQ"); channel.close(); connection.close(); } }; }
From source file:org.apache.flume.amqp.AmqpConsumer.java
License:Apache License
protected void deliverBatch(Channel channel, List<QueueingConsumer.Delivery> batch) throws IOException { batchDeliveryListener.onBatchDelivery(batch); if (!autoAck) { int lastItemIndex = batch.size() - 1; channel.basicAck(batch.get(lastItemIndex).getEnvelope().getDeliveryTag(), true); }/*from w ww . j a v a 2 s.c o m*/ }
From source file:org.apache.niolex.rabbit.rpc.RPCServer.java
License:Apache License
public static void main(String[] argv) { Connection connection = null; Channel channel = null; try {/*from www .ja v a 2 s . co m*/ ConnectionFactory factory = new ConnectionFactory(); factory.setHost("localhost"); connection = factory.newConnection(); channel = connection.createChannel(); channel.queueDeclare(RPC_QUEUE_NAME, false, false, false, null); channel.basicQos(1); QueueingConsumer consumer = new QueueingConsumer(channel); channel.basicConsume(RPC_QUEUE_NAME, false, consumer); System.out.println(" [x] Awaiting RPC requests"); while (true) { String response = null; QueueingConsumer.Delivery delivery = consumer.nextDelivery(); BasicProperties props = delivery.getProperties(); BasicProperties replyProps = new BasicProperties.Builder().correlationId(props.getCorrelationId()) .build(); try { String message = new String(delivery.getBody(), "UTF-8"); int n = Integer.parseInt(message); System.out.println(" [.] fib(" + message + ")"); response = "" + fib(n); } catch (Exception e) { System.out.println(" [.] " + e.toString()); response = ""; } finally { channel.basicPublish("", props.getReplyTo(), replyProps, response.getBytes("UTF-8")); channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false); } } } catch (Exception e) { e.printStackTrace(); } finally { if (connection != null) { try { connection.close(); } catch (Exception ignore) { } } } }
From source file:org.ballerinalang.messaging.rabbitmq.nativeimpl.message.BasicAck.java
License:Open Source License
@Override public void execute(Context context) { @SuppressWarnings(RabbitMQConstants.UNCHECKED) BMap<String, BValue> messageObject = (BMap<String, BValue>) context.getRefArgument(0); Channel channel = RabbitMQUtils.getNativeObject(messageObject, RabbitMQConstants.CHANNEL_NATIVE_OBJECT, Channel.class, context); long deliveryTag = (long) messageObject.getNativeData(RabbitMQConstants.DELIVERY_TAG); boolean multiple = context.getBooleanArgument(0); boolean multipleAck = ChannelUtils.validateMultipleAcknowledgements(messageObject); boolean ackMode = ChannelUtils.validateAckMode(messageObject); if (!multipleAck && ackMode) { try {//from w w w .j av a 2 s .c om channel.basicAck(deliveryTag, multiple); messageObject.addNativeData(RabbitMQConstants.MESSAGE_ACK_STATUS, true); } catch (IOException exception) { LOGGER.error("Error occurred while positively acknowledging the message", exception); RabbitMQUtils.returnError("Error occurred while positively acknowledging the message: ", context, exception); } catch (AlreadyClosedException exception) { LOGGER.error(RabbitMQConstants.CHANNEL_CLOSED_ERROR, exception); RabbitMQUtils.returnError(RabbitMQConstants.CHANNEL_CLOSED_ERROR, context, exception); } } else if (multipleAck && ackMode) { RabbitMQUtils.returnError(RabbitMQConstants.MULTIPLE_ACK_ERROR, context, new RabbitMQConnectorException(RabbitMQConstants.MULTIPLE_ACK_ERROR)); } else { RabbitMQUtils.returnError(RabbitMQConstants.ACK_MODE_ERROR, context, new RabbitMQConnectorException(RabbitMQConstants.ACK_MODE_ERROR)); } }
From source file:org.graylog2.inputs.amqp.AMQPConsumer.java
License:Open Source License
public Consumer createConsumer(final Channel channel) { return new DefaultConsumer(channel) { @Override//w ww . ja v a2s . c o m public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException { try { // The duplication here is a bit unfortunate. Improve by having a Processor Interface. switch (queueConfig.getInputType()) { case GELF: GELFMessage gelf = new GELFMessage(body); try { gelfProcessor.messageReceived(gelf); } catch (BufferOutOfCapacityException e) { LOG.warn("ProcessBufferProcessor is out of capacity. Requeuing message!"); channel.basicReject(envelope.getDeliveryTag(), true); reQueuedMessages.mark(); return; } handledGELFMessages.mark(); break; case SYSLOG: try { syslogProcessor.messageReceived(new String(body), connection.getAddress()); } catch (BufferOutOfCapacityException e) { LOG.warn("ProcessBufferProcessor is out of capacity. Requeuing message!"); channel.basicReject(envelope.getDeliveryTag(), true); reQueuedMessages.mark(); return; } handledSyslogMessages.mark(); break; } channel.basicAck(envelope.getDeliveryTag(), false); handledMessages.mark(); } catch (Exception e) { LOG.error("Could not handle message from AMQP.", e); } } }; }
From source file:org.graylog2.messagehandlers.amqp.AMQPSubscriberThread.java
License:Open Source License
/** * Run the thread. Runs forever!//from w w w .ja va 2 s. c om */ @Override public void run() { while (true) { Connection connection = null; Channel channel = null; QueueingConsumer consumer = new QueueingConsumer(channel); try { connection = broker.getConnection(); channel = connection.createChannel(); channel.basicConsume(this.queue.getName(), false, consumer); LOG.info("Successfully connected to queue '" + this.queue.getName() + "'"); } catch (Exception e) { LOG.error("AMQP queue '" + this.queue.getName() + "': Could not connect to AMQP broker or channel (Make sure that " + "the queue exists. Retrying in " + SLEEP_INTERVAL + " seconds. (" + e.getMessage() + ")"); // Retry after waiting for SLEEP_INTERVAL seconds. try { Thread.sleep(SLEEP_INTERVAL * 1000); } catch (InterruptedException foo) { } continue; } while (true) { try { QueueingConsumer.Delivery delivery; try { delivery = consumer.nextDelivery(); } catch (InterruptedException ie) { continue; } // Handle the message. (Store in MongoDB etc) try { handleMessage(delivery.getBody()); } catch (Exception e) { LOG.error("Could not handle AMQP message: " + e.toString()); } try { channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false); } catch (IOException e) { LOG.error("Could not ack AMQP message: " + e.toString()); } } catch (Exception e) { // Error while receiving. i.e. when AMQP broker breaks down. LOG.error("AMQP queue '" + this.queue.getName() + "': Error while subscribed (rebuilding connection " + "in " + SLEEP_INTERVAL + " seconds. (" + e.getMessage() + ")"); // Better close connection stuff it is still active. try { channel.close(); connection.close(); } catch (IOException ex) { // I don't care. } catch (AlreadyClosedException ex) { // I don't care. } // Retry after waiting for SLEEP_INTERVAL seconds. try { Thread.sleep(SLEEP_INTERVAL * 1000); } catch (InterruptedException foo) { } break; } } } }
From source file:org.hp.samples.ProcessMessage.java
License:Open Source License
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setContentType("text/plain"); response.setStatus(200);/*w ww. j a v a 2s .c o m*/ PrintWriter writer = response.getWriter(); writer.println("Here's your message:"); // Pull out the RABBITMQ_URL environment variable String uri = System.getenv("RABBITMQ_URL"); ConnectionFactory factory = new ConnectionFactory(); try { factory.setUri(uri); } catch (KeyManagementException e) { e.printStackTrace(); } catch (NoSuchAlgorithmException e) { e.printStackTrace(); } catch (URISyntaxException e) { e.printStackTrace(); } Connection connection = factory.newConnection(); Channel channel = connection.createChannel(); // Create the queue channel.queueDeclare("hello", false, false, false, null); String routingKey = "thekey"; String exchangeName = "exchange"; // Declare an exchange and bind it to the queue channel.exchangeDeclare(exchangeName, "direct", true); channel.queueBind("hello", exchangeName, routingKey); // Grab the message from the HTML form and publish it to the queue String message = request.getParameter("message"); channel.basicPublish(exchangeName, routingKey, null, message.getBytes()); writer.println(" Message sent to queue '" + message + "'"); boolean autoAck = false; // Get the response message GetResponse responseMsg = channel.basicGet("hello", autoAck); if (responseMsg == null) { // No message retrieved. } else { byte[] body = responseMsg.getBody(); // Since getBody() returns a byte array, convert to a string for // the user. String bodyString = new String(body); long deliveryTag = responseMsg.getEnvelope().getDeliveryTag(); writer.println("Message received: " + bodyString); // Acknowledge that we received the message so that the queue // removes the message so that it's not sent to us again. channel.basicAck(deliveryTag, false); } writer.close(); }
From source file:org.mazerunner.core.messaging.Worker.java
License:Apache License
public void doMain(String[] args) throws Exception { CmdLineParser parser = new CmdLineParser(this); // if you have a wider console, you could increase the value; // here 80 is also the default parser.setUsageWidth(80);//from ww w . j av a 2 s.c o m try { // parse the arguments. parser.parseArgument(args); if (sparkMaster == "" || hadoopHdfs == "") throw new CmdLineException(parser, "Options required: --hadoop.hdfs <url>, --spark.master <url>"); ConfigurationLoader.getInstance().setHadoopHdfsUri(hadoopHdfs); ConfigurationLoader.getInstance().setSparkHost(sparkMaster); ConfigurationLoader.getInstance().setAppName(sparkAppName); ConfigurationLoader.getInstance().setExecutorMemory(sparkExecutorMemory); ConfigurationLoader.getInstance().setDriverHost(driverHost); ConfigurationLoader.getInstance().setRabbitmqNodename(rabbitMqHost); } catch (CmdLineException e) { // if there's a problem in the command line, // you'll get this exception. this will report // an error message. System.err.println(e.getMessage()); System.err.println("java -cp $CLASSPATH [<spark-config-options>] <main-class> [<mazerunner-args>]"); // print the list of available options parser.printUsage(System.err); System.err.println(); // print option sample. This is useful some time System.err.println(" Example: java -cp $CLASSPATH org.mazerunner.core.messaging.Worker" + parser.printExample(ALL)); return; } ConnectionFactory factory = new ConnectionFactory(); factory.setHost(ConfigurationLoader.getInstance().getRabbitmqNodename()); Connection connection = factory.newConnection(); Channel channel = connection.createChannel(); channel.queueDeclare(TASK_QUEUE_NAME, true, false, false, null); channel.basicQos(20); // Initialize spark context GraphProcessor.initializeSparkContext(); QueueingConsumer consumer = new QueueingConsumer(channel); channel.basicConsume(TASK_QUEUE_NAME, false, consumer); System.out.println(" [*] Waiting for messages. To exit press CTRL+C"); while (true) { QueueingConsumer.Delivery delivery = consumer.nextDelivery(); String message = new String(delivery.getBody()); System.out.println(" [x] Received '" + message + "'"); // Deserialize message Gson gson = new Gson(); ProcessorMessage processorMessage = gson.fromJson(message, ProcessorMessage.class); // Run PageRank GraphProcessor.processEdgeList(processorMessage); System.out.println(" [x] Done '" + message + "'"); channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false); } }
From source file:org.mule.transport.amqp.AmqpConnector.java
License:Open Source License
public void ackMessageIfNecessary(final Channel channel, final AmqpMessage amqpMessage) throws IOException { if (getAckMode() == AckMode.MULE_AUTO) { channel.basicAck(amqpMessage.getEnvelope().getDeliveryTag(), false); if (logger.isDebugEnabled()) { logger.debug("Mule acknowledged message: " + amqpMessage + " on channel: " + channel); }//w ww .j a v a 2s .c o m } }
From source file:org.mule.transport.amqp.AmqpMessageAcknowledger.java
License:Open Source License
public static void ack(final MuleMessage message, final boolean multiple) throws SessionException { final Long deliveryTag = message.getInboundProperty(AmqpConstants.DELIVERY_TAG); if (deliveryTag == null) { LOG.warn("Missing " + AmqpConstants.DELIVERY_TAG + " inbound property, impossible to ack message: " + message);//from www . j av a 2 s. c om return; } final Channel channel = AmqpConnector.getChannelFromMessage(message); if (channel == null) { throw new SessionException(MessageFactory.createStaticMessage("No " + AmqpConstants.CHANNEL + " session property found, impossible to ack message: " + message)); } try { channel.basicAck(deliveryTag, multiple); } catch (final IOException ioe) { throw new SessionException( MessageFactory.createStaticMessage( "Failed to ack message w/deliveryTag: " + deliveryTag + " on channel: " + channel), ioe); } if (LOG.isDebugEnabled()) { LOG.debug("Manually acknowledged message w/deliveryTag: " + deliveryTag + " on channel: " + channel); } }