List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue
public ArrayBlockingQueue(int capacity)
From source file:com.vsct.dt.strowgr.admin.gui.StrowgrMain.java
@Override public void run(StrowgrConfiguration configuration, Environment environment) throws Exception { LOGGER.info("start dropwizard configuration"); /* Allow the use of another NSQ channel for development purposes */ if (configuration.getNsqChannel() != null) { NSQ.CHANNEL = configuration.getNsqChannel(); }//from w ww . ja v a2s . com /* Main EventBus */ BlockingQueue eventBusQueue = new ArrayBlockingQueue<>(100); ExecutorService executor = environment.lifecycle().executorService("main-bus-handler-threads") .workQueue(eventBusQueue).minThreads(configuration.getThreads()) .maxThreads(configuration.getThreads()).build(); EventBus eventBus = new AsyncEventBus(executor, (exception, context) -> { LOGGER.error("exception on main event bus. Context: " + subscriberExceptionContextToString(context), exception); }); eventBus.register(this); // for dead events /* Templates */ TemplateGenerator templateGenerator = new MustacheTemplateGenerator(); UriTemplateLocator templateLocator = new UriTemplateLocator(); /* Repository */ ConsulRepository repository = configuration.getConsulRepositoryFactory().buildAndManageBy(environment); repository.initPorts(); /* EntryPoint State Machine */ EntryPointEventHandler eventHandler = EntryPointEventHandler.backedBy(repository, repository) .getPortsWith(repository).findTemplatesWith(templateLocator) .generatesTemplatesWith(templateGenerator).commitTimeoutIn(configuration.getCommitTimeout()) .outputMessagesTo(eventBus); eventBus.register(eventHandler); /* NSQ Consumers */ //Object mapper used for NSQ messages ObjectMapper objectMapper = new ObjectMapper(); // retrieve NSQLookup configuration NSQLookup nsqLookup = configuration.getNsqLookupfactory().build(); //NSQConsumers configuration NSQConfig consumerNsqConfig = configuration.getNsqConsumerConfigFactory().build(); NSQConsumersFactory nsqConsumersFactory = NSQConsumersFactory.make(nsqLookup, consumerNsqConfig, objectMapper); ManagedHaproxy managedHaproxy = ManagedHaproxy.create(repository, configuration.getHandledHaproxyRefreshPeriodSecond()); Observable<ManagedHaproxy.HaproxyAction> hapRegistrationActionsObservable = managedHaproxy .registrationActionsObservable(); IncomingEvents incomingEvents = IncomingEvents.watch(hapRegistrationActionsObservable, nsqConsumersFactory); Observable<EntryPointEvent> nsqEventsObservable = incomingEvents.registerServerEventObservable() .map(e -> (EntryPointEvent) e)//Downcast .mergeWith(incomingEvents.commitFailureEventObservale()) .mergeWith(incomingEvents.commitSuccessEventObservale()); //Push all nsq events to eventBus //We observeOn a single thread to avoid blocking nio eventloops //NSQToEventBusSubscriber applies backpressure in regard to the eventBusQueue nsqEventsObservable.observeOn(Schedulers.newThread()) .subscribe(new EventBusSubscriber(eventBus, eventBusQueue)); /* Manage resources */ environment.lifecycle().manage(new Managed() { @Override public void start() throws Exception { managedHaproxy.startLookup(); } @Override public void stop() throws Exception { managedHaproxy.stopLookup(); incomingEvents.shutdownConsumers(); } }); /* NSQ Producers */ NSQProducer nsqProducer = configuration.getNsqProducerFactory().build(); // manage NSQProducer lifecycle by Dropwizard environment.lifecycle().manage(new NSQProducerManaged(nsqProducer)); // Pipeline from eventbus to NSQ producer eventBus.register(new ToNSQSubscriber(new NSQDispatcher(nsqProducer))); /* Commit schedulers */ long periodMilliPendingCurrentScheduler = configuration.getPeriodicSchedulerFactory() .getPeriodicCommitPendingSchedulerFactory().getPeriodMilli(); long periodMilliCommitCurrentScheduler = configuration.getPeriodicSchedulerFactory() .getPeriodicCommitCurrentSchedulerFactory().getPeriodMilli(); CommitSchedulerManaged<TryCommitPendingConfigurationEvent> commitPendingScheduler = new CommitSchedulerManaged<>( "Commit Pending", repository, ep -> new TryCommitPendingConfigurationEvent(CorrelationId.newCorrelationId(), new EntryPointKeyDefaultImpl(ep)), eventBus::post, periodMilliPendingCurrentScheduler); environment.lifecycle().manage(commitPendingScheduler); CommitSchedulerManaged<TryCommitCurrentConfigurationEvent> commitCurrentScheduler = new CommitSchedulerManaged<>( "Commit Current", repository, ep -> new TryCommitCurrentConfigurationEvent(CorrelationId.newCorrelationId(), new EntryPointKeyDefaultImpl(ep)), eventBus::post, periodMilliCommitCurrentScheduler); environment.lifecycle().manage(commitCurrentScheduler); /* REST Resources */ EntrypointResources restApiResource = new EntrypointResources(eventBus, repository); environment.jersey().register(restApiResource); HaproxyResources haproxyResources = new HaproxyResources(repository, templateLocator, templateGenerator); environment.jersey().register(haproxyResources); PortResources portResources = new PortResources(repository); environment.jersey().register(portResources); UriTemplateResources uriTemplateResources = new UriTemplateResources(templateLocator, templateGenerator); environment.jersey().register(uriTemplateResources); AdminResources adminResources = new AdminResources(); environment.jersey().register(adminResources); eventBus.register(restApiResource); /* Http Client */ CloseableHttpClient httpClient = new HttpClientBuilder(environment) .using(configuration.getHttpClientConfiguration()).build("http-client"); NSQHttpClient nsqdHttpClient = new NSQHttpClient("http://" + configuration.getNsqProducerFactory().getHost() + ":" + configuration.getNsqProducerFactory().getHttpPort(), httpClient); NSQHttpClient nsqLookupdHttpClient = new NSQHttpClient( "http://" + configuration.getNsqLookupfactory().getHost() + ":" + configuration.getNsqLookupfactory().getPort(), httpClient); /* Healthchecks */ environment.healthChecks().register("nsqlookup", new NsqHealthcheck(nsqLookupdHttpClient)); environment.healthChecks().register("nsqproducer", new NsqHealthcheck(nsqdHttpClient)); environment.healthChecks().register("consul", new ConsulHealthcheck(configuration.getConsulRepositoryFactory().getHost(), configuration.getConsulRepositoryFactory().getPort())); /* admin */ environment.admin().addTask(new InitPortsTask(repository)); environment.admin().addTask(new HaproxyVipTask(repository)); /* Exception mappers */ environment.jersey().register(new ExceptionMapper<IncompleteConfigurationException>() { @Override public Response toResponse(IncompleteConfigurationException e) { return Response.status(500).entity(e.getMessage()).type(MediaType.TEXT_PLAIN_TYPE).build(); } }); }
From source file:org.ambud.marauder.source.snort.SnortSourceBack.java
@Override public void configure(Context context) { super.configure(context); context = new Context(context.getSubProperties(PROP_PREFIX)); this.watchDirectory = new File(context.getString(PROP_DIRECTORY, PROP_DEFAULT_DIR)); this.logBaseName = context.getString(PROP_BASE_NAME, PROP_DEFAULT_FILENAME); this.isSequential = context.getBoolean(PROP_IS_SEQUENTIAL, true); logger.info("Snort Source will spool/watch - " + this.watchDirectory.getAbsolutePath() + " for Snort log files whose names start with:" + this.logBaseName); FileSystemManager fsMgr = null;/*ww w . j a v a 2 s.c om*/ try { fsMgr = VFS.getManager(); } catch (FileSystemException e) { Throwables.propagate(e); } try { this.watchObject = fsMgr.resolveFile(watchDirectory.getAbsolutePath()); } catch (FileSystemException e) { Throwables.propagate(e); } this.monitor = new DefaultFileMonitor(new FileListener() { @Override public void fileChanged(FileChangeEvent arg0) throws Exception { // ignore these } @Override public void fileCreated(FileChangeEvent fileEvent) throws Exception { if (acceptFile(fileEvent.getFile().getName().getBaseName())) { logger.info("Acknowledged new file:" + fileEvent.getFile().getName().getPath()); processFile(fileEvent.getFile(), true); } } @Override public void fileDeleted(FileChangeEvent arg0) throws Exception { // acknowledge these } }); int bufferSize = context.getInteger("buffer.size", 500); this.outputQueue = new ArrayBlockingQueue<MarauderIDSEvent>(bufferSize); }
From source file:org.apache.solr.client.solrj.impl.BackupRequestLBHttpSolrServer.java
/** * Tries to query a live server from the list provided in Req. Servers in the * dead pool are skipped. If a request fails due to an IOException, the server * is moved to the dead pool for a certain period of time, or until a test * request on that server succeeds./*ww w.j a v a2 s.c o m*/ * * If a request takes longer than backUpRequestDelay the request will be sent * to the next server in the list, this will continue until there is a * response, the server list is exhausted or the number of requests in flight * equals maximumConcurrentRequests. * * Servers are queried in the exact order given (except servers currently in * the dead pool are skipped). If no live servers from the provided list * remain to be tried, a number of previously skipped dead servers will be * tried. Req.getNumDeadServersToTry() controls how many dead servers will be * tried. * * If no live servers are found a SolrServerException is thrown. * * @param req * contains both the request as well as the list of servers to query * * @return the result of the request */ @Override public Rsp request(Req req) throws SolrServerException, IOException { ArrayBlockingQueue<Future<RequestTaskState>> queue = new ArrayBlockingQueue<Future<RequestTaskState>>( maximumConcurrentRequests + 1); ExecutorCompletionService<RequestTaskState> executer = new ExecutorCompletionService<RequestTaskState>( threadPoolExecuter, queue); List<ServerWrapper> skipped = new ArrayList<ServerWrapper>(req.getNumDeadServersToTry()); int inFlight = 0; RequestTaskState returnedRsp = null; Exception ex = null; for (String serverStr : req.getServers()) { serverStr = normalize(serverStr); // if the server is currently a zombie, just skip to the next one ServerWrapper wrapper = zombieServers.get(serverStr); if (wrapper != null) { if (tryDeadServers && skipped.size() < req.getNumDeadServersToTry()) { skipped.add(wrapper); } continue; } HttpSolrServer server = makeServer(serverStr); Callable<RequestTaskState> task = createRequestTask(server, req, false); executer.submit(task); inFlight++; returnedRsp = getResponseIfReady(executer, inFlight >= maximumConcurrentRequests); if (returnedRsp == null) { // null response signifies that the response took too long. log.info("Server :{} did not respond before the backUpRequestDelay time of {} elapsed", server.getBaseURL(), backUpRequestDelay); continue; } inFlight--; if (returnedRsp.stateDescription == TaskState.ResponseReceived) { return returnedRsp.response; } else if (returnedRsp.stateDescription == TaskState.ServerException) { ex = returnedRsp.exception; } else if (returnedRsp.stateDescription == TaskState.RequestException) { throw new SolrServerException(returnedRsp.exception); } } // no response so try the zombie servers if (tryDeadServers) { if (returnedRsp == null || returnedRsp.stateDescription == TaskState.ServerException) { // try the servers we previously skipped for (ServerWrapper wrapper : skipped) { Callable<RequestTaskState> task = createRequestTask(wrapper.solrServer, req, true); executer.submit(task); inFlight++; returnedRsp = getResponseIfReady(executer, inFlight >= maximumConcurrentRequests); if (returnedRsp == null) { log.info("Server :{} did not respond before the backUpRequestDelay time of {} elapsed", wrapper.getKey(), backUpRequestDelay); continue; } inFlight--; if (returnedRsp.stateDescription == TaskState.ResponseReceived) { return returnedRsp.response; } else if (returnedRsp.stateDescription == TaskState.ServerException) { ex = returnedRsp.exception; } else if (returnedRsp.stateDescription == TaskState.RequestException) { throw new SolrServerException(returnedRsp.exception); } } } } // All current attempts could be slower than backUpRequestPause or returned // response could be from struggling server // so we need to wait until we get a good response or tasks all are // exhausted. if (returnedRsp == null || returnedRsp.stateDescription == TaskState.ServerException) { while (inFlight > 0) { returnedRsp = getResponseIfReady(executer, true); inFlight--; if (returnedRsp.stateDescription == TaskState.ResponseReceived) { return returnedRsp.response; } else if (returnedRsp.stateDescription == TaskState.ServerException) { ex = returnedRsp.exception; } else if (returnedRsp.stateDescription == TaskState.RequestException) { throw new SolrServerException(returnedRsp.exception); } } } if (ex == null) { throw new SolrServerException("No live SolrServers available to handle this request"); } else { throw new SolrServerException( "No live SolrServers available to handle this request:" + zombieServers.keySet(), ex); } }
From source file:org.apache.hadoop.hdfs.job.MyJob.java
public MyJob(NameNode nameNode, Configuration conf) { this(nameNode, null, conf, new ArrayBlockingQueue<Job>(conf.getInt("hdfs.job.tatol", 20)), true); }
From source file:com.srotya.monitoring.kafka.util.KafkaConsumerOffsetUtil.java
private KafkaConsumerOffsetUtil(KafkaMonitorConfiguration kafkaConfiguration, ZKClient zkClient, boolean enableHistory, StorageEngine server) { this.kafkaConfiguration = kafkaConfiguration; this.zkClient = zkClient; this.enableHistory = enableHistory; this.server = server; this.topics = new ConcurrentSkipListSet<>(); brokerHosts = new ArrayBlockingQueue<>(kafkaConfiguration.getKafkaBroker().length); for (String broker : kafkaConfiguration.getKafkaBroker()) { brokerHosts.add(broker);/*from w w w. j a va2s .co m*/ } Thread th = new Thread(new KafkaNewConsumerOffsetThread(this)); th.setDaemon(true); th.start(); }
From source file:annis.gui.resultfetch.ResultFetchJob.java
@Override public void run() { WebResource subgraphRes = Helper.getAnnisWebResource().path("query/search/subgraph"); // holds the ids of the matches. MatchGroup result;/*from ww w. j ava2 s. c om*/ try { if (Thread.interrupted()) { return; } // set the the progress bar, for given the user some information about the loading process ui.accessSynchronously(new Runnable() { @Override public void run() { resultPanel.showMatchSearchInProgress(query); } }); // get the matches result = futureMatches.get(); // get the subgraph for each match, when the result is not empty if (result.getMatches().isEmpty()) { // check if thread was interrupted if (Thread.interrupted()) { return; } // nothing found, so inform the user about this. ui.access(new Runnable() { @Override public void run() { resultPanel.showNoResult(); } }); } else { if (Thread.interrupted()) { return; } // since annis found something, inform the user that subgraphs are created ui.access(new Runnable() { @Override public void run() { resultPanel.showSubgraphSearchInProgress(query, 0.0f); } }); // prepare fetching subgraphs final int totalResultSize = result.getMatches().size(); final BlockingQueue<SaltProject> queue = new ArrayBlockingQueue<>(totalResultSize); int current = 0; for (Match m : result.getMatches()) { if (Thread.interrupted()) { return; } List<Match> subList = new LinkedList<>(); subList.add(m); final SaltProject p = executeQuery(subgraphRes, new MatchGroup(subList), query.getLeftContext(), query.getRightContext(), query.getSegmentation(), SubgraphFilter.all); queue.put(p); log.debug("added match {} to queue", current + 1); if (current == 0) { PollControl.changePollingTime(ui, PollControl.DEFAULT_TIME); ui.access(new Runnable() { @Override public void run() { resultPanel.setQueryResultQueue(queue, query, totalResultSize); } }); } if (Thread.interrupted()) { return; } current++; } } // end if no results } catch (InterruptedException ex) { // just return } catch (final ExecutionException root) { ui.accessSynchronously(new Runnable() { @Override public void run() { if (resultPanel != null && resultPanel.getPaging() != null) { PagingComponent paging = resultPanel.getPaging(); Throwable cause = root.getCause(); if (cause instanceof UniformInterfaceException) { UniformInterfaceException ex = (UniformInterfaceException) cause; if (ex.getResponse().getStatus() == 400) { List<AqlParseError> errors = ex.getResponse() .getEntity(new GenericType<List<AqlParseError>>() { }); String errMsg = Joiner.on(" | ").join(errors); paging.setInfo("parsing error: " + errMsg); } else if (ex.getResponse().getStatus() == 504) { paging.setInfo("Timeout: query exeuction took too long"); } else if (ex.getResponse().getStatus() == 403) { paging.setInfo("Not authorized to query this corpus."); } else { paging.setInfo("unknown error: " + ex); } } else { log.error("Unexcepted ExecutionException cause", root); } resultPanel.showFinishedSubgraphSearch(); } } }); } // end catch }
From source file:org.apache.hadoop.hdfs.job.MyJob_20110926.java
public MyJob_20110926(DataNode dataNode, Configuration conf) { this(null, dataNode, conf, new ArrayBlockingQueue<Job>(conf.getInt("hdfs.job.tatol", 20)), true); }
From source file:io.hops.erasure_coding.ParallelStreamReader.java
/** * Reads data from multiple streams in parallel and puts the data in a queue. * * @param streams/* ww w.ja v a 2s .co m*/ * The input streams to read from. * @param bufSize * The amount of data to read from each stream in each go. * @param numThreads * Number of threads to use for parallelism. * @param boundedBuffer * The queue to place the results in. */ public ParallelStreamReader(Progressable reporter, InputStream[] streams, int bufSize, int numThreads, int boundedBufferCapacity, long maxBytesPerStream) throws IOException { this.reporter = reporter; this.streams = new InputStream[streams.length]; this.endOffsets = new long[streams.length]; for (int i = 0; i < streams.length; i++) { this.streams[i] = streams[i]; if (this.streams[i] instanceof DFSDataInputStream) { DFSDataInputStream stream = (DFSDataInputStream) this.streams[i]; // in directory raiding, the block size for each input stream // might be different, so we need to determine the endOffset of // each stream by their own block size. List<LocatedBlock> blocks = stream.getAllBlocks(); if (blocks.size() == 0) { this.endOffsets[i] = Long.MAX_VALUE; } else { this.endOffsets[i] = stream.getPos() + blocks.get(0).getBlockSize(); } } else { this.endOffsets[i] = Long.MAX_VALUE; } streams[i] = null; // Take over ownership of streams. } this.bufSize = bufSize; this.boundedBuffer = new ArrayBlockingQueue<ReadResult>(boundedBufferCapacity); if (numThreads > streams.length) { this.numThreads = streams.length; } else { this.numThreads = numThreads; } this.remainingBytesPerStream = maxBytesPerStream; this.slots = new Semaphore(this.numThreads); this.readPool = Executors.newFixedThreadPool(this.numThreads); this.mainThread = new MainThread(); }
From source file:com.streamsets.pipeline.kafka.impl.BaseKafkaConsumer09.java
public BaseKafkaConsumer09(String topic, Source.Context context, int batchSize) { this.topic = topic; this.topicPartitionToOffsetMetadataMap = new HashMap<>(); this.recordQueue = new ArrayBlockingQueue<>(batchSize); this.executorService = new ScheduledThreadPoolExecutor(1); this.pollCommitMutex = new Object(); this.rebalanceInProgress = new AtomicBoolean(false); this.needToCallPoll = new AtomicBoolean(false); this.context = context; this.rebalanceHistogram = context.createHistogram("Rebalance Time"); this.gaugeMap = context.createGauge("Internal state").getValue(); }
From source file:org.apache.hadoop.hdfs.job.MyJob.java
public MyJob(DataNode dataNode, Configuration conf) { this(null, dataNode, conf, new ArrayBlockingQueue<Job>(conf.getInt("hdfs.job.tatol", 20)), true); }