List of usage examples for com.rabbitmq.client Channel getConnection
Connection getConnection();
From source file:amqp.AmqpClient.java
License:Apache License
/** * Tries to close the channel ignoring any {@link java.io.IOException}s that may occur while doing so. * * @param channel channel to close/* ww w . j a v a 2 s .c o m*/ */ protected void closeChannelSilently(Channel channel) { if (channel != null) { try { channel.close(); } catch (IOException e) { LOG.warn("Problem closing down channel", e); } catch (AlreadyClosedException e) { LOG.debug("Channel was already closed"); } catch (ShutdownSignalException e) { // we can ignore this since we are shutting down LOG.debug("Got a shutdown signal while closing channel", e); } closeConnectionSilently(channel.getConnection()); } }
From source file:com.navercorp.pinpoint.plugin.rabbitmq.client.interceptor.RabbitMQConsumerDispatchInterceptor.java
License:Apache License
private Trace createTrace(Object target, Object[] args) { final Channel channel = ((ChannelGetter) target)._$PINPOINT$_getChannel(); if (channel == null) { logger.debug("channel is null, skipping trace"); return null; }//from www . j av a 2s.com final Connection connection = channel.getConnection(); if (connection == null) { logger.debug("connection is null, skipping trace"); return null; } Envelope envelope = (Envelope) args[2]; String exchange = envelope.getExchange(); if (RabbitMQClientPluginConfig.isExchangeExcluded(exchange, excludeExchangeFilter)) { if (isDebug) { logger.debug("exchange {} is excluded", exchange); } return null; } // args[3] may be null AMQP.BasicProperties properties = (AMQP.BasicProperties) args[3]; Map<String, Object> headers = getHeadersFromBasicProperties(properties); // If this transaction is not traceable, mark as disabled. if (headers.get(RabbitMQClientConstants.META_SAMPLED) != null) { return traceContext.disableSampling(); } final TraceId traceId = populateTraceIdFromRequest(headers); // If there's no trasanction id, a new trasaction begins here. final Trace trace = traceId == null ? traceContext.newTraceObject() : traceContext.continueTraceObject(traceId); if (trace.canSampled()) { final SpanRecorder recorder = trace.getSpanRecorder(); recordRootSpan(recorder, connection, envelope, headers); } return trace; }
From source file:info.pancancer.arch3.reporting.Arch3ReportImpl.java
License:Open Source License
@Override public Map<String, Status> getLastStatus() { String queueName = settings.getString(Constants.RABBIT_QUEUE_NAME); final String resultQueueName = queueName + "_results"; String resultsQueue = null;/* w w w .j a v a 2 s .c o m*/ Channel resultsChannel = null; synchronized (Arch3ReportImpl.this) { try { // read from resultsChannel = Utilities.setupExchange(settings, resultQueueName); // this declares a queue exchange where multiple consumers get the same convertToResult: // https://www.rabbitmq.com/tutorials/tutorial-three-java.html resultsQueue = Utilities.setupQueueOnExchange(resultsChannel, queueName, "SlackReportBot"); resultsChannel.queueBind(resultsQueue, resultQueueName, ""); QueueingConsumer resultsConsumer = new QueueingConsumer(resultsChannel); resultsChannel.basicConsume(resultsQueue, false, resultsConsumer); int messagesToCache = db.getJobs(JobState.RUNNING).size(); Map<String, Status> cache = new TreeMap<>(); int loop = 0; do { loop++; QueueingConsumer.Delivery delivery = resultsConsumer .nextDelivery(Base.FIVE_SECOND_IN_MILLISECONDS); if (delivery == null) { continue; } String messageFromQueue = new String(delivery.getBody(), StandardCharsets.UTF_8); // now parse it as JSONObj Status status = new Status().fromJSON(messageFromQueue); cache.put(status.getIpAddress(), status); } while (loop < LOOP_LIMIT && cache.size() < messagesToCache); return cache; } catch (IOException | ShutdownSignalException | InterruptedException | TimeoutException | ConsumerCancelledException ex) { throw new RuntimeException(ex); } finally { try { if (resultsQueue != null && resultsChannel != null) { resultsChannel.queueDelete(resultsQueue); } if (resultsChannel != null) { resultsChannel.close(); resultsChannel.getConnection().close(); } } catch (IOException | TimeoutException ex) { System.err.println("Could not close channel"); } } } }
From source file:info.pancancer.arch3.worker.WorkerHeartbeat.java
License:Open Source License
@Override public void run() { Channel reportingChannel = null; try {//from www . j a v a2 s . co m try { reportingChannel = Utilities.setupExchange(settings, this.queueName); } catch (IOException | TimeoutException | AlreadyClosedException e) { LOG.error("Exception caught! Queue channel could not be opened, waiting. Exception is: " + e.getMessage(), e); // retry after a minute, do not die simply because the launcher is unavailable, it may come back Thread.sleep(Base.ONE_MINUTE_IN_MILLISECONDS); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.info("Caught interrupt signal, heartbeat shutting down.", e); return; } LOG.info("starting heartbeat thread, will send heartbeat message ever " + secondsDelay + " seconds."); while (!Thread.interrupted()) { // byte[] stdOut = this.getMessageBody().getBytes(StandardCharsets.UTF_8); try { try { Status heartbeatStatus = new Status(); heartbeatStatus.setJobUuid(this.jobUuid); heartbeatStatus.setMessage("job is running; IP address: " + networkID); heartbeatStatus.setState(StatusState.RUNNING); heartbeatStatus.setType(Utilities.JOB_MESSAGE_TYPE); heartbeatStatus.setVmUuid(this.vmUuid); heartbeatStatus.setIpAddress(networkID); // String stdOut = this.statusSource.getStdOut(); Lock lock = new ReentrantLock(); lock.lock(); String stdOut = this.statusSource.getStdOut(stdoutSnipSize); String stdErr = this.statusSource.getStdErr(); lock.unlock(); heartbeatStatus.setStdout(stdOut); heartbeatStatus.setStderr(stdErr); String heartBeatMessage = heartbeatStatus.toJSON(); LOG.debug("Sending heartbeat message to " + queueName + ", with body: " + heartBeatMessage); reportingChannel.basicPublish(queueName, queueName, MessageProperties.PERSISTENT_TEXT_PLAIN, heartBeatMessage.getBytes(StandardCharsets.UTF_8)); reportingChannel.waitForConfirms(); Thread.sleep((long) (secondsDelay * MILLISECONDS_PER_SECOND)); } catch (IOException | AlreadyClosedException e) { LOG.error("IOException caught! Message may not have been published. Exception is: " + e.getMessage(), e); // retry after a minute, do not die simply because the launcher is unavailable, it may come back Thread.sleep(Base.ONE_MINUTE_IN_MILLISECONDS); } } catch (InterruptedException e) { LOG.info("Heartbeat shutting down."); if (reportingChannel.getConnection().isOpen()) { try { reportingChannel.getConnection().close(); } catch (IOException e1) { LOG.error("Error closing reportingChannel connection: " + e1.getMessage(), e1); } } if (reportingChannel.isOpen()) { try { reportingChannel.close(); } catch (IOException e1) { LOG.error("Error (IOException) closing reportingChannel: " + e1.getMessage(), e1); } catch (TimeoutException e1) { LOG.error("Error (TimeoutException) closing reportingChannel: " + e1.getMessage(), e1); } } LOG.debug("reporting channel open: " + reportingChannel.isOpen()); LOG.debug("reporting channel connection open: " + reportingChannel.getConnection().isOpen()); Thread.currentThread().interrupt(); } } }
From source file:info.pancancer.arch3.worker.WorkerRunnable.java
License:Open Source License
@Override public void run() { int max = maxRuns; try {//from w ww . ja v a 2s . c o m // the VM UUID log.info(" WORKER VM UUID provided as: '" + vmUuid + "'"); // write to // TODO: Add some sort of "local debug" mode so that developers working on their local // workstation can declare the queue if it doesn't exist. Normally, the results queue is // created by the Coordinator. resultsChannel = Utilities.setupExchange(settings, this.resultsQueueName); while (max > 0 || this.endless) { log.debug(max + " remaining jobs will be executed"); log.info(" WORKER IS PREPARING TO PULL JOB FROM QUEUE " + this.jobQueueName); if (!endless) { max--; } // jobChannel needs to be created inside the loop because it is closed inside the loop, and it is closed inside this loop to // prevent pre-fetching. Channel jobChannel = Utilities.setupQueue(settings, this.jobQueueName); if (jobChannel == null) { throw new NullPointerException("jobChannel is null for queue: " + this.jobQueueName + ". Something bad must have happened while trying to set up the queue connections. Please ensure that your configuration is correct."); } QueueingConsumer consumer = new QueueingConsumer(jobChannel); jobChannel.basicConsume(this.jobQueueName, false, consumer); QueueingConsumer.Delivery delivery = consumer.nextDelivery(); log.info(vmUuid + " received " + delivery.getEnvelope().toString()); if (delivery.getBody() != null) { String message = new String(delivery.getBody(), StandardCharsets.UTF_8); if (message.trim().length() > 0) { log.info(" [x] Received JOBS REQUEST '" + message + "' @ " + vmUuid); Job job = new Job().fromJSON(message); Status status = new Status(vmUuid, job.getUuid(), StatusState.RUNNING, Utilities.JOB_MESSAGE_TYPE, "job is starting", this.networkAddress); status.setStderr(""); status.setStdout(""); String statusJSON = status.toJSON(); log.info(" WORKER LAUNCHING JOB"); // greedy acknowledge, it will be easier to deal with lost jobs than zombie workers in hostile OpenStack // environments log.info(vmUuid + " acknowledges " + delivery.getEnvelope().toString()); jobChannel.basicAck(delivery.getEnvelope().getDeliveryTag(), false); // we need to close the channel IMMEDIATELY to complete the ACK. jobChannel.close(); // Close the connection object as well, or the main thread may not exit because of still-open-and-in-use resources. jobChannel.getConnection().close(); WorkflowResult workflowResult = new WorkflowResult(); if (testMode) { workflowResult.setWorkflowStdout("everything is awesome"); workflowResult.setExitCode(0); } else { String seqwareEngine = settings.getString(Constants.WORKER_SEQWARE_ENGINE, Constants.SEQWARE_WHITESTAR_ENGINE); String seqwareSettingsFile = settings.getString(Constants.WORKER_SEQWARE_SETTINGS_FILE); String dockerImage = settings.getString(Constants.WORKER_SEQWARE_DOCKER_IMAGE_NAME); workflowResult = launchJob(statusJSON, job, seqwareEngine, seqwareSettingsFile, dockerImage); } status = new Status(vmUuid, job.getUuid(), workflowResult.getExitCode() == 0 ? StatusState.SUCCESS : StatusState.FAILED, Utilities.JOB_MESSAGE_TYPE, "job is finished", networkAddress); status.setStderr(workflowResult.getWorkflowStdErr()); status.setStdout(workflowResult.getWorkflowStdout()); statusJSON = status.toJSON(); log.info(" WORKER FINISHING JOB"); finishJob(statusJSON); } else { log.info(NO_MESSAGE_FROM_QUEUE_MESSAGE); } // we need to close the channel *conditionally* if (jobChannel.isOpen()) { jobChannel.close(); } // Close the connection object as well, or the main thread may not exit because of still-open-and-in-use resources. if (jobChannel.getConnection().isOpen()) { jobChannel.getConnection().close(); } } else { log.info(NO_MESSAGE_FROM_QUEUE_MESSAGE); } if (endless) { log.info("attempting to reset workspace"); DefaultExecutor executor = new DefaultExecutor(); DefaultExecuteResultHandler resultHandler = new DefaultExecuteResultHandler(); // attempt a cleanup CommandLine cli = new CommandLine("sudo"); List<String> args = new ArrayList<>(Arrays.asList("rm", "-rf", "/datastore/*")); cli.addArguments(args.toArray(new String[args.size()])); executor.execute(cli, resultHandler); // Use the result handler for non-blocking call, so this way we should be able to get updates of // stdout and stderr while the command is running. resultHandler.waitFor(); log.info("exit code for cleanup: " + resultHandler.getExitValue()); } } log.info(" \n\n\nWORKER FOR VM UUID HAS FINISHED!!!: '" + vmUuid + "'\n\n"); // turns out this is needed when multiple threads are reading from the same // queue otherwise you end up with multiple unacknowledged messages being undeliverable to other workers!!! if (resultsChannel != null && resultsChannel.isOpen()) { resultsChannel.close(); resultsChannel.getConnection().close(); } log.debug("result channel open: " + resultsChannel.isOpen()); log.debug("result channel connection open: " + resultsChannel.getConnection().isOpen()); } catch (Exception ex) { log.error(ex.getMessage(), ex); } }
From source file:mx.bigdata.utils.amqp.AMQPFinalizer.java
License:Apache License
public synchronized void registerChannel(Channel channel) { registerConnection(channel.getConnection()); }
From source file:mx.bigdata.utils.amqp.ReconnectingConsumer.java
License:Apache License
private boolean initConsumer() { Channel channel = null; try {//from www . j a v a 2 s.c o m channel = amqp.declareChannel(factory, key); String queue = createQueue(amqp, channel, key); this.consumer = new DefaultConsumer(channel) { @Override public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException { ReconnectingConsumer.this.handleDelivery(consumerTag, envelope, properties, body); } @Override public void handleConsumeOk(String consumerTag) { ReconnectingConsumer.this.consumerTag = consumerTag; } @Override public void handleCancel(String consumerTag) throws IOException { logger.warn(tag + " handleCancel for consumer tag: " + consumerTag); try { this.getChannel().basicCancel(ReconnectingConsumer.this.consumerTag); } catch (Exception ignore) { } this.getChannel().getConnection().abort(5000); reconnect(); } @Override public void handleShutdownSignal(java.lang.String consumerTag, ShutdownSignalException sig) { try { getChannel().basicCancel(ReconnectingConsumer.this.consumerTag); } catch (Exception ignore) { ; } getChannel().getConnection().abort(5000); if (!sig.isInitiatedByApplication()) { logger.warn(tag + " ShutdownSignal for tag: " + tag + "\n\t consumer tag: " + consumerTag + "\n\t reason: " + sig.getReason() + "\n\t reference: " + sig.getReason() + "\n\t ", sig); reconnect(); } else { logger.debug(tag + " ShutdownSignal for tag: " + tag + "\n\t consumer tag: " + consumerTag + "\n\t reason: " + sig.getReason() + "\n\t reference: " + sig.getReason() + "\n\t ", sig); consumer = null; } } }; channel.basicConsume(queue, false, consumer); logger.info("Consumer " + tag + " initilized"); return true; } catch (Throwable e) { logger.error("Exception initializing consumer " + tag + ": ", e); if (channel != null) { channel.getConnection().abort(5000); } } return false; }
From source file:net.echinopsii.ariane.community.messaging.rabbitmq.ServiceFactory.java
License:Open Source License
/** * (internal usage)/*from w w w. j ava2s . c o m*/ * Create a new MomConsumer to consume message from RabbitMQ source and forward them to the request actor. * * @param source request source queue * @param channel RabbitMQ channel * @param requestActor request actor ref to treat the message * @param isMsgDebugOnTimeout debug on timeout if true * @return the new MomConsumer */ private static MomConsumer createConsumer(final String source, final Channel channel, final ActorRef requestActor, final boolean isMsgDebugOnTimeout) { return new MomConsumer() { private boolean isRunning = false; @Override public void run() { try { Map<String, Object> finalMessage; channel.queueDeclare(source, false, false, true, null); QueueingConsumer consumer = new QueueingConsumer(channel); channel.basicConsume(source, false, consumer); isRunning = true; while (isRunning) { try { QueueingConsumer.Delivery delivery = consumer.nextDelivery(10); if (delivery != null && isRunning) { finalMessage = translator.decode(new Message().setEnvelope(delivery.getEnvelope()) .setProperties(delivery.getProperties()).setBody(delivery.getBody())); if (((HashMap) finalMessage).containsKey(MomMsgTranslator.MSG_TRACE) && isMsgDebugOnTimeout) ((MomLogger) log).setMsgTraceLevel(true); ((MomLogger) log).traceMessage("MomConsumer(" + source + ").run", finalMessage); requestActor.tell(delivery, null); if (((HashMap) finalMessage).containsKey(MomMsgTranslator.MSG_TRACE) && isMsgDebugOnTimeout) ((MomLogger) log).setMsgTraceLevel(false); } } catch (InterruptedException e) { // no message } } } catch (IOException e) { e.printStackTrace(); } finally { try { if (channel.getConnection() != null && channel.getConnection().isOpen()) { channel.queueDelete(source); //channel.close(); } } catch (IOException e) { e.printStackTrace(); } } } @Override public boolean isRunning() { return isRunning; } @Override public void start() { new Thread(this).start(); } @Override public void stop() { isRunning = false; try { Thread.sleep(20); } catch (InterruptedException e) { e.printStackTrace(); } } }; }
From source file:net.echinopsii.ariane.community.messaging.rabbitmq.ServiceFactory.java
License:Open Source License
/** * Create a new subscriber service.// w ww. jav a 2s . c o m * * @param baseSource the feed base source * @param selector the selector on the feed source (can be null) * @param feedWorker the feed message worker * @return the new subscriber service */ @Override public MomAkkaService subscriberService(final String baseSource, String selector, AppMsgWorker feedWorker) { MomAkkaService ret = null; ActorRef subsActor; MomConsumer consumer; final Connection connection = ((Client) super.getMomClient()).getConnection(); if (selector == null || selector.equals("")) selector = "#"; if (connection != null && connection.isOpen()) { subsActor = super.getMomClient().getActorSystem().actorOf(MsgSubsActor.props(feedWorker), baseSource + "." + ((selector.equals("#")) ? "all" : selector) + "_msgWorker"); final ActorRef runnableSubsActor = subsActor; final String select = selector; final Client cli = ((Client) super.getMomClient()); consumer = new MomConsumer() { private boolean isRunning = false; @Override public void run() { Channel channel = null; try { channel = connection.createChannel(); channel.exchangeDeclare(baseSource, "topic"); String queueName = cli.getClientID() + "_SUBS_2_" + baseSource + "." + select; channel.queueDeclare(queueName, false, true, false, null); channel.queueBind(queueName, baseSource, select); QueueingConsumer consumer = new QueueingConsumer(channel); channel.basicConsume(queueName, true, consumer); isRunning = true; while (isRunning) { try { QueueingConsumer.Delivery delivery = consumer.nextDelivery(10); if (delivery != null && isRunning) runnableSubsActor.tell(delivery, null); } catch (InterruptedException e) { e.printStackTrace(); } } if (channel.getConnection().isOpen()) channel.close(); } catch (IOException e) { e.printStackTrace(); } finally { try { if (channel.getConnection().isOpen()) channel.close(); } catch (IOException e) { e.printStackTrace(); } } } @Override public boolean isRunning() { return isRunning; } @Override public void start() { new Thread(this).start(); } @Override public void stop() { isRunning = false; try { Thread.sleep(20); } catch (InterruptedException e) { e.printStackTrace(); } } }; consumer.start(); ret = new MomAkkaService().setMsgWorker(subsActor).setConsumer(consumer) .setClient(super.getMomClient()); super.getServices().add(ret); } return ret; }
From source file:net.roboconf.messaging.internal.AbstractRabbitMqTest.java
License:Apache License
/** * A method to check whether RabbitMQ is rabbitMqIsRunning or not. * <p>/* w w w . j a va2 s. c om*/ * Tests that must be skipped if it is not rabbitMqIsRunning must begin with * <code> * Assume.assumeTrue( rabbitMqIsRunning ); * </code> * </p> */ @Before public void checkRabbitMQIsRunning() throws Exception { Channel channel = null; try { channel = createTestChannel(); Object o = channel.getConnection().getServerProperties().get("version"); String version = String.valueOf(o); if (!isVersionGOEThreeDotTwo(version)) { Logger logger = Logger.getLogger(getClass().getName()); logger.warning("Tests are skipped because RabbitMQ must be at least in version 3.2.x."); this.rabbitMqIsRunning = false; } } catch (Exception e) { Logger logger = Logger.getLogger(getClass().getName()); logger.warning("Tests are skipped because RabbitMQ is not rabbitMqIsRunning."); logger.finest(Utils.writeException(e)); this.rabbitMqIsRunning = false; } finally { if (channel != null) { channel.close(); channel.getConnection().close(); } } }