Example usage for com.rabbitmq.client Channel basicAck

List of usage examples for com.rabbitmq.client Channel basicAck

Introduction

In this page you can find the example usage for com.rabbitmq.client Channel basicAck.

Prototype

void basicAck(long deliveryTag, boolean multiple) throws IOException;

Source Link

Document

Acknowledge one or several received messages.

Usage

From source file:edu.iit.rabbitmq.Receive.java

public String getMessage() throws Exception {
    Channel channel = getChannel();
    QueueingConsumer consumer = new QueueingConsumer(channel);
    channel.basicConsume(QUEUENAME, true, consumer);
    QueueingConsumer.Delivery delivery = consumer.nextDelivery();
    channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
    String message = new String(delivery.getBody(), "UTF-8");
    return message;
}

From source file:edu.jhu.pha.vospace.jobs.JobsProcessorQueuedImpl.java

License:Apache License

@Override
public void run() {
    final JobsProcessor parentProc = this;
    QueueConnector.goAMQP("submitJob", new QueueConnector.AMQPWorker<Boolean>() {
        @Override//w  ww  . ja  v  a2 s .c  o m
        public Boolean go(com.rabbitmq.client.Connection conn, com.rabbitmq.client.Channel channel)
                throws IOException {

            channel.exchangeDeclare(conf.getString("transfers.exchange.submited"), "topic", true);

            channel.queueDeclare(conf.getString("transfers.queue.submited.server_initialised"), true, false,
                    false, null);

            channel.queueBind(conf.getString("transfers.queue.submited.server_initialised"),
                    conf.getString("transfers.exchange.submited"),
                    "direction." + JobDescription.DIRECTION.PUSHFROMVOSPACE);
            channel.queueBind(conf.getString("transfers.queue.submited.server_initialised"),
                    conf.getString("transfers.exchange.submited"),
                    "direction." + JobDescription.DIRECTION.PULLTOVOSPACE);
            channel.queueBind(conf.getString("transfers.queue.submited.server_initialised"),
                    conf.getString("transfers.exchange.submited"),
                    "direction." + JobDescription.DIRECTION.LOCAL);

            QueueingConsumer consumer = new QueueingConsumer(channel);
            channel.basicConsume(conf.getString("transfers.queue.submited.server_initialised"), false,
                    consumer);

            try {
                // The main cycle to process the jobs
                while (!jobsThread.isInterrupted()) {

                    for (Iterator<Future<STATE>> it = workers.iterator(); it.hasNext();) {
                        Future<STATE> next = it.next();
                        if (next.isDone()) {
                            it.remove();
                            logger.debug("Job " + next + " is removed from the workers.");
                        }
                    }

                    if (workers.size() >= jobsPoolSize) {
                        logger.debug("Waiting for a jobs pool, size: " + workers.size());
                        synchronized (JobsProcessor.class) {
                            JobsProcessor.class.wait();
                        }
                        logger.debug("End waiting for a jobs pool, size: " + workers.size());
                    } else {
                        logger.debug("Waiting for a job");
                        QueueingConsumer.Delivery delivery = consumer.nextDelivery();

                        // Job JSON notation
                        JobDescription job = (new ObjectMapper()).readValue(delivery.getBody(), 0,
                                delivery.getBody().length, JobDescription.class);

                        logger.debug("There's a submited job! " + job.getId());

                        TransferThread thread = new TransferThread(job, parentProc);

                        Future<STATE> future = service.submit(thread);

                        workers.add(future);
                        channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
                    }
                }
            } catch (InterruptedException ex) {
                // server restarted
            }

            return true;
        }
    });

}

From source file:edu.jhu.pha.vospace.process.NodeProcessor.java

License:Apache License

@Override
public void run() {
    QueueConnector.goAMQP("nodesProcessor", new QueueConnector.AMQPWorker<Boolean>() {
        @Override/*  w  w  w  . j ava 2 s. c om*/
        public Boolean go(com.rabbitmq.client.Connection conn, com.rabbitmq.client.Channel channel)
                throws IOException {

            channel.exchangeDeclare(conf.getString("process.exchange.nodeprocess"), "fanout", true);
            channel.exchangeDeclare(conf.getString("vospace.exchange.nodechanged"), "fanout", false);

            channel.queueDeclare(conf.getString("process.queue.nodeprocess"), true, false, false, null);

            channel.queueBind(conf.getString("process.queue.nodeprocess"),
                    conf.getString("process.exchange.nodeprocess"), "");

            QueueingConsumer consumer = new QueueingConsumer(channel);
            channel.basicConsume(conf.getString("process.queue.nodeprocess"), false, consumer);

            while (!Thread.currentThread().isInterrupted()) {

                Node node = null;

                try {
                    QueueingConsumer.Delivery delivery = consumer.nextDelivery();

                    Map<String, Object> nodeData = (new ObjectMapper()).readValue(delivery.getBody(), 0,
                            delivery.getBody().length, new TypeReference<HashMap<String, Object>>() {
                            });

                    channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
                    node = NodeFactory.getNode(new VospaceId((String) nodeData.get("uri")),
                            (String) nodeData.get("owner"));

                    logger.debug("Node changed: " + nodeData.get("uri") + " " + nodeData.get("owner") + " "
                            + node.getType());

                    switch (node.getType()) {
                    case DATA_NODE:
                    case STRUCTURED_DATA_NODE:
                    case UNSTRUCTURED_DATA_NODE: {
                        TikaInputStream inp = null;
                        try {
                            Metadata detectTikaMeta = new Metadata();
                            detectTikaMeta.set(Metadata.RESOURCE_NAME_KEY,
                                    node.getUri().getNodePath().getNodeName());
                            inp = TikaInputStream.get(node.exportData());
                            //MediaType type = new DefaultDetector().detect(inp, nodeTikaMeta);
                            List<Detector> list = new ArrayList<Detector>();
                            list.add(new SimulationDetector());
                            list.add(new DefaultDetector());
                            Detector detector = new CompositeDetector(list);

                            MediaType type = detector.detect(inp, detectTikaMeta);
                            node.getNodeInfo().setContentType(type.toString());
                            node.getMetastore().storeInfo(node.getUri(), node.getNodeInfo());

                            JsonNode credentials = UserHelper.getProcessorCredentials(node.getOwner());

                            boolean makeStructured = false;

                            List<String> externalLinks = new ArrayList<String>();
                            for (ProcessorConfig processorConf : ProcessingFactory.getInstance()
                                    .getProcessorConfigsForNode(node, credentials)) {
                                Metadata nodeTikaMeta = new Metadata();
                                nodeTikaMeta.set(TikaCoreProperties.SOURCE, node.getUri().toString());
                                nodeTikaMeta.set("owner", (String) nodeData.get("owner"));
                                nodeTikaMeta.set(TikaCoreProperties.TITLE,
                                        node.getUri().getNodePath().getNodeName());
                                nodeTikaMeta.add(TikaCoreProperties.METADATA_DATE, dateFormat
                                        .format(Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTime()));

                                nodeTikaMeta.set(Metadata.CONTENT_LOCATION,
                                        ((DataNode) node).getHttpDownloadLink().toASCIIString());
                                nodeTikaMeta.set(Metadata.CONTENT_TYPE, type.toString());

                                AbstractParser parser;
                                TikaConfig config = TikaConfig.getDefaultConfig();
                                if (processorConf.getTikaConfig() != null) {
                                    config = new TikaConfig(
                                            getClass().getResourceAsStream(processorConf.getTikaConfig()));
                                }

                                parser = new CompositeParser(config.getMediaTypeRegistry(), config.getParser());

                                Processor processor = Processor.fromProcessorConfig(processorConf);

                                InputStream str = null;
                                try {
                                    str = TikaInputStream.get(node.exportData());
                                    parser.parse(str, processor.getContentHandler(), nodeTikaMeta,
                                            new ParseContext());
                                } finally {
                                    try {
                                        str.close();
                                    } catch (Exception ex) {
                                    }
                                }

                                // now do out-of-tika processing of results
                                try {
                                    processor.processNodeMeta(nodeTikaMeta,
                                            credentials.get(processorConf.getId()));
                                    logger.debug("Processing of " + node.getUri().toString() + " is finished.");

                                } catch (Exception e) {
                                    logger.error("Error processing the node. " + e.getMessage());
                                    e.printStackTrace();
                                    processError(node, e);
                                }

                                String[] links = nodeTikaMeta.getValues("EXTERNAL_LINKS");

                                if (null != links && links.length > 0) {
                                    externalLinks.addAll(Arrays.asList(links));
                                }

                                MediaType curType = MediaType.parse(nodeTikaMeta.get(Metadata.CONTENT_TYPE));
                                if (MIME_REGISTRY.isSpecializationOf(curType, type)) {
                                    type = curType;
                                    logger.debug("Media type reassigned to " + type.toString() + " by "
                                            + processorConf.getId());
                                }

                                String nodeTypeStr = nodeTikaMeta.get("NodeType");
                                if (null != nodeTypeStr
                                        && NodeType.valueOf(nodeTypeStr).equals(NodeType.STRUCTURED_DATA_NODE))
                                    makeStructured = true;
                            }

                            node.makeNodeStructured(makeStructured);

                            Map<String, String> properties = new HashMap<String, String>();
                            properties.put(PROCESSING_PROPERTY, "done");
                            if (externalLinks.size() > 0) {
                                properties.put(EXTERNAL_LINK_PROPERTY, StringUtils.join(externalLinks, ' '));
                            }
                            node.getNodeInfo().setContentType(type.toString());

                            node.getMetastore().updateUserProperties(node.getUri(), properties);
                            node.getMetastore().storeInfo(node.getUri(), node.getNodeInfo());

                            logger.debug("Updated node " + node.getUri().toString() + " to "
                                    + node.getNodeInfo().getContentType() + " and "
                                    + node.getNodeInfo().getSize());

                            // update node's container size metadata
                            try {
                                ContainerNode contNode = (ContainerNode) NodeFactory.getNode(
                                        new VospaceId(
                                                new NodePath(node.getUri().getNodePath().getContainerName())),
                                        node.getOwner());
                                node.getStorage().updateNodeInfo(contNode.getUri().getNodePath(),
                                        contNode.getNodeInfo());
                                node.getMetastore().storeInfo(contNode.getUri(), contNode.getNodeInfo());
                            } catch (URISyntaxException e) {
                                logger.error("Updating root node size failed: " + e.getMessage());
                            }

                            try {
                                nodeData.put("container",
                                        node.getUri().getNodePath().getParentPath().getNodeStoragePath());
                                byte[] jobSer = (new ObjectMapper()).writeValueAsBytes(nodeData);
                                channel.basicPublish(conf.getString("vospace.exchange.nodechanged"), "", null,
                                        jobSer);
                            } catch (IOException e) {
                                logger.error(e);
                            }
                        } catch (TikaException ex) {
                            logger.error("Error parsing the node " + node.getUri().toString() + ": "
                                    + ex.getMessage());
                            processError(node, ex);
                            ex.printStackTrace();
                        } catch (SAXException ex) {
                            logger.error("Error SAX parsing the node " + node.getUri().toString() + ": "
                                    + ex.getMessage());
                            processError(node, ex);
                        } catch (IOException ex) {
                            logger.error("Error reading the node " + node.getUri().toString() + ": "
                                    + ex.getMessage());
                            processError(node, ex);
                        } finally {
                            try {
                                inp.close();
                            } catch (Exception ex2) {
                            }
                            ;
                        }

                        break;
                    }
                    case CONTAINER_NODE: {
                        //                           DbPoolServlet.goSql("Processing nodes",
                        //                               "select * from nodes where owner = ?)",
                        //                                 new SqlWorker<Boolean>() {
                        //                                     @Override
                        //                                     public Boolean go(java.sql.Connection conn, java.sql.PreparedStatement stmt) throws SQLException {
                        //                                        stmt.setString(1, node.getOwner());
                        //                                         /*ResultSet resSet = stmt.executeQuery();
                        //                                         while(resSet.next()) {
                        //                                            String uriStr = resSet.getString(1);
                        //                                            String username = resSet.getString(2);
                        //                                            
                        //                                            try {
                        //                                               VospaceId uri = new VospaceId(uriStr);
                        //                                               
                        //                                               Node newNode = NodeFactory.getInstance().getNode(uri, username);
                        //                                               newNode.remove();
                        //                                            } catch(Exception ex) {
                        //                                               ex.printStackTrace();
                        //                                            }
                        //                                         }*/
                        //                                        return true;
                        //                                     }
                        //                                 }
                        //                        
                        //                            );
                        break;
                    }
                    default: {
                        break;
                    }
                    }

                } catch (InterruptedException ex) {
                    logger.error("Sleeping interrupted. " + ex.getMessage());
                    processError(node, ex);
                } catch (IOException ex) {
                    ex.printStackTrace();
                    logger.error("Error reading the changed node JSON: " + ex.getMessage());
                    processError(node, ex);
                } catch (URISyntaxException ex) {
                    logger.error("Error parsing VospaceId from changed node JSON: " + ex.getMessage());
                    processError(node, ex);
                }
            }

            return true;
        }
    });

}

From source file:es.devcircus.rabbitmq_examples.rabbitmq_rpc.RPCServer.java

License:Open Source License

/**
 *
 * @param argv//from w  w w .  j  a va2  s. c  o  m
 */
public static void main(String[] argv) {
    Connection connection = null;
    Channel channel = null;
    try {
        ConnectionFactory factory = new ConnectionFactory();
        //            factory.setHost("localhost");
        factory.setHost("192.168.0.202");
        //            factory.setPort(5671);

        //            factory.useSslProtocol();

        connection = factory.newConnection();
        channel = connection.createChannel();

        channel.queueDeclare(RPC_QUEUE_NAME, false, false, false, null);

        channel.basicQos(1);

        QueueingConsumer consumer = new QueueingConsumer(channel);
        channel.basicConsume(RPC_QUEUE_NAME, false, consumer);

        System.out.println(" [x] Awaiting RPC requests");

        while (true) {
            String response = null;

            QueueingConsumer.Delivery delivery = consumer.nextDelivery();

            BasicProperties props = delivery.getProperties();
            BasicProperties replyProps = new BasicProperties.Builder().correlationId(props.getCorrelationId())
                    .build();

            try {
                String message = new String(delivery.getBody(), "UTF-8");
                int n = Integer.parseInt(message);

                System.out.println(" [.] fib(" + message + ")");
                response = "" + fib(n);
            } catch (Exception e) {
                System.out.println(" [.] " + e.toString());
                response = "";
            } finally {
                channel.basicPublish("", props.getReplyTo(), replyProps, response.getBytes("UTF-8"));

                channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
            }
        }
    } catch (IOException | InterruptedException | ShutdownSignalException | ConsumerCancelledException e) {
        System.out.println(e.toString());
    } finally {
        if (connection != null) {
            try {
                connection.close();
            } catch (Exception ignore) {
            }
        }
    }
}

From source file:es.devcircus.rabbitmq_examples.rabbitmq_work_queues.Worker.java

License:Open Source License

/**
 * /*from   w ww.  ja  v  a  2 s  . c o m*/
 * @param argv
 * @throws Exception 
 */
public static void main(String[] argv) throws Exception {

    ConnectionFactory factory = new ConnectionFactory();
    factory.setHost("localhost");
    Connection connection = factory.newConnection();
    Channel channel = connection.createChannel();

    channel.queueDeclare(TASK_QUEUE_NAME, true, false, false, null);
    System.out.println(" [*] Waiting for messages. To exit press CTRL+C");

    channel.basicQos(1);

    QueueingConsumer consumer = new QueueingConsumer(channel);
    channel.basicConsume(TASK_QUEUE_NAME, false, consumer);

    while (true) {
        QueueingConsumer.Delivery delivery = consumer.nextDelivery();
        String message = new String(delivery.getBody());

        System.out.println(" [x] Received '" + message + "'");
        doWork(message);
        System.out.println(" [x] Done");

        channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
    }
}

From source file:gash.router.server.MessageServer.java

License:Apache License

public void createQueue() throws IOException, ShutdownSignalException, ConsumerCancelledException,
        InterruptedException, SQLException {
    ConnectionFactory factory = new ConnectionFactory();
    factory.setHost("localhost");
    Connection connection = factory.newConnection();
    Channel channel = connection.createChannel();
    channel.queueDeclare("inbound_queue", false, false, false, null);
    channel.basicQos(1);/*www  .ja  va 2s . c  om*/
    postgre = new PostgreSQL(url, username, password, dbname, ssl);
    QueueingConsumer consumer = new QueueingConsumer(channel);
    channel.basicConsume("inbound_queue", false, consumer);
    //       Map<String, byte[]> map = new HashMap<String, byte[]>();
    while (true) {
        QueueingConsumer.Delivery delivery = consumer.nextDelivery();
        BasicProperties props = delivery.getProperties();
        String request = props.getType();
        System.out.println(request);

        if (request != null) {
            if (request.equals("get")) {
                String key = new String(delivery.getBody());
                BasicProperties replyProps = new BasicProperties.Builder()
                        .correlationId(props.getCorrelationId()).build();
                byte[] image = postgre.get(key);
                channel.basicPublish("", props.getReplyTo(), replyProps, image);
                channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
            }

            if (request.equals("put")) {
                byte[] image = delivery.getBody();
                postgre.put(props.getUserId(), image);
                BasicProperties replyProps = new BasicProperties.Builder()
                        .correlationId(props.getCorrelationId()).build();

                channel.basicPublish("", props.getReplyTo(), replyProps, image);
                channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
            }
            if (request.equals("post")) {
                System.out.println("Message Server");
                String key = postgre.post(delivery.getBody());
                BasicProperties replyProps = new BasicProperties.Builder()
                        .correlationId(props.getCorrelationId()).build();

                channel.basicPublish("", props.getReplyTo(), replyProps, key.getBytes());
                channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
            }
            if (request.equals("delete")) {
                String key = new String(delivery.getBody());
                postgre.delete(key);
            }

        }
    }
}

From source file:genqa.ExportRabbitMQVerifier.java

License:Open Source License

private Consumer createConsumer(final Channel channel) {
    return new DefaultConsumer(channel) {
        @Override//from  w  ww .  j  av a  2 s  .  c o m
        public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties,
                byte[] body) throws IOException {
            long deliveryTag = envelope.getDeliveryTag();

            String row[] = ExportOnServerVerifier.RoughCSVTokenizer.tokenize(new String(body, Charsets.UTF_8));
            if (row.length != 29) {
                return;
            }
            ExportOnServerVerifier.ValidationErr err = null;
            try {
                err = ExportOnServerVerifier.verifyRow(row);
            } catch (ExportOnServerVerifier.ValidationErr validationErr) {
                validationErr.printStackTrace();
            }
            if (err != null) {
                System.out.println("ERROR in validation: " + err.toString());
            }

            if (++m_verifiedRows % VALIDATION_REPORT_INTERVAL == 0) {
                System.out.println("Verified " + m_verifiedRows + " rows.");
            }

            channel.basicAck(deliveryTag, false);
        }
    };
}

From source file:in.cs654.chariot.ashva.AshvaServer.java

License:Open Source License

public static void main(String[] args) {
    Connection connection = null;
    Channel channel;
    try {/*from ww w.ja v a  2 s  .  com*/
        final ConnectionFactory factory = new ConnectionFactory();
        factory.setHost(HOST_IP_ADDR);
        connection = factory.newConnection();
        channel = connection.createChannel();
        channel.queueDeclare(RPC_QUEUE_NAME, false, false, false, null);
        channel.basicQos(1);

        final QueueingConsumer consumer = new QueueingConsumer(channel);
        channel.basicConsume(RPC_QUEUE_NAME, false, consumer);
        LOGGER.info("Ashva Server started. Waiting for requests...");

        AshvaHelper.joinOrStartChariotPool();
        AshvaHelper.startHeartbeat();

        while (true) {
            final QueueingConsumer.Delivery delivery = consumer.nextDelivery();
            BasicResponse response = new BasicResponse();
            BasicRequest request = new BasicRequest();
            final BasicProperties props = delivery.getProperties();
            final BasicProperties replyProps = new BasicProperties.Builder()
                    .correlationId(props.getCorrelationId()).build();
            try {
                final DatumReader<BasicRequest> avroReader = new SpecificDatumReader<BasicRequest>(
                        BasicRequest.class);
                decoder = DecoderFactory.get().binaryDecoder(delivery.getBody(), decoder);
                request = avroReader.read(request, decoder);
                response = AshvaProcessor.process(request);

            } catch (Exception e) {
                e.printStackTrace();
                LOGGER.severe("Error in handling request: " + e.getMessage());
                response = ResponseFactory.getErrorResponse(request);

            } finally {
                baos.reset();
                final DatumWriter<BasicResponse> avroWriter = new SpecificDatumWriter<BasicResponse>(
                        BasicResponse.class);
                encoder = EncoderFactory.get().binaryEncoder(baos, encoder);
                avroWriter.write(response, encoder);
                encoder.flush();
                LOGGER.info("Responding to request id " + request.getRequestId() + " " + response.getStatus());
                channel.basicPublish("", props.getReplyTo(), replyProps, baos.toByteArray());
                channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        LOGGER.severe("Error in RPC server: " + e.getMessage());
    } finally {
        if (connection != null) {
            try {
                connection.close();
            } catch (Exception ignore) {
            }
        }
    }
}

From source file:in.cs654.chariot.prashti.PrashtiServer.java

License:Open Source License

public static void main(String[] args) {
    Connection connection = null;
    Channel channel;
    try {/* w  w w  .j  a v  a2s  .  com*/
        final ConnectionFactory factory = new ConnectionFactory();
        factory.setHost(HOST_IP_ADDR);
        connection = factory.newConnection();
        channel = connection.createChannel();
        channel.queueDeclare(RPC_QUEUE_NAME, false, false, false, null);
        channel.basicQos(1);

        final QueueingConsumer consumer = new QueueingConsumer(channel);
        channel.basicConsume(RPC_QUEUE_NAME, false, consumer);
        LOGGER.info("Prashti Server started. Waiting for requests...");

        final List<Prashti> prashtiList = D2Client.getOnlinePrashtiServers();
        final String ipAddr = CommonUtils.getIPAddress();
        prashtiList.add(new Prashti(ipAddr));
        LOGGER.info("Notifying D2 to set Prashti Server IP Address");
        D2Client.setPrashtiServers(prashtiList);

        while (true) {
            final QueueingConsumer.Delivery delivery = consumer.nextDelivery();
            BasicResponse response = new BasicResponse();
            BasicRequest request = new BasicRequest();
            final BasicProperties props = delivery.getProperties();
            final BasicProperties replyProps = new BasicProperties.Builder()
                    .correlationId(props.getCorrelationId()).build();
            try {
                final DatumReader<BasicRequest> avroReader = new SpecificDatumReader<BasicRequest>(
                        BasicRequest.class);
                decoder = DecoderFactory.get().binaryDecoder(delivery.getBody(), decoder);
                request = avroReader.read(request, decoder);
                response = RequestProcessor.process(request);

            } catch (Exception e) {
                e.printStackTrace();
                LOGGER.severe("Error in handling request: " + e.getMessage());
                response = ResponseFactory.getErrorResponse(request);

            } finally {
                baos.reset();
                final DatumWriter<BasicResponse> avroWriter = new SpecificDatumWriter<BasicResponse>(
                        BasicResponse.class);
                encoder = EncoderFactory.get().binaryEncoder(baos, encoder);
                avroWriter.write(response, encoder);
                encoder.flush();
                LOGGER.info("Responding to request id " + request.getRequestId() + " " + response.getStatus());
                channel.basicPublish("", props.getReplyTo(), replyProps, baos.toByteArray());
                channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        LOGGER.severe("Error in RPC server: " + e.getMessage());
    } finally {
        if (connection != null) {
            try {
                connection.close();
            } catch (Exception ignore) {
            }
        }
    }
}

From source file:in.cs654.chariot.turagraksa.ZooKeeperServer.java

License:Open Source License

public static void main(String[] args) {
    Connection connection = null;
    Channel channel;
    try {//from  w  ww .ja  v a  2s  .c  o  m
        final ConnectionFactory factory = new ConnectionFactory();
        factory.setHost(HOST_IP_ADDR);
        connection = factory.newConnection();
        channel = connection.createChannel();
        channel.queueDeclare(RPC_QUEUE_NAME, false, false, false, null);
        channel.basicQos(1);

        final QueueingConsumer consumer = new QueueingConsumer(channel);
        channel.basicConsume(RPC_QUEUE_NAME, false, consumer);
        LOGGER.info("ZooKeeper Server started. Waiting for requests...");
        ZooKeeper.startPingEcho();

        while (true) {
            final QueueingConsumer.Delivery delivery = consumer.nextDelivery();
            BasicResponse response = new BasicResponse();
            BasicRequest request = new BasicRequest();
            final AMQP.BasicProperties props = delivery.getProperties();
            final AMQP.BasicProperties replyProps = new AMQP.BasicProperties.Builder()
                    .correlationId(props.getCorrelationId()).build();
            try {
                final DatumReader<BasicRequest> avroReader = new SpecificDatumReader<BasicRequest>(
                        BasicRequest.class);
                decoder = DecoderFactory.get().binaryDecoder(delivery.getBody(), decoder);
                request = avroReader.read(request, decoder);
                response = ZooKeeperProcessor.process(request);

            } catch (Exception e) {
                e.printStackTrace();
                LOGGER.severe("Error in handling request: " + e.getMessage());
                response = ResponseFactory.getErrorResponse(request);

            } finally {
                baos.reset();
                final DatumWriter<BasicResponse> avroWriter = new SpecificDatumWriter<BasicResponse>(
                        BasicResponse.class);
                encoder = EncoderFactory.get().binaryEncoder(baos, encoder);
                avroWriter.write(response, encoder);
                encoder.flush();
                channel.basicPublish("", props.getReplyTo(), replyProps, baos.toByteArray());
                channel.basicAck(delivery.getEnvelope().getDeliveryTag(), false);
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        LOGGER.severe("Error in RPC server: " + e.getMessage());
    } finally {
        if (connection != null) {
            try {
                connection.close();
            } catch (Exception ignore) {
            }
        }
    }
}