List of usage examples for java.util.concurrent LinkedBlockingDeque LinkedBlockingDeque
public LinkedBlockingDeque(Collection<? extends E> c)
From source file:io.apiman.gateway.engine.jdbc.JdbcMetrics.java
/** * Constructor.//from www. j a v a2s . co m * @param config map of configuration options */ public JdbcMetrics(Map<String, String> config) { super(config); int queueSize = DEFAULT_QUEUE_SIZE; String queueSizeConfig = config.get("queue.size"); //$NON-NLS-1$ if (queueSizeConfig != null) { queueSize = new Integer(queueSizeConfig); } queue = new LinkedBlockingDeque<>(queueSize); startConsumerThread(); }
From source file:org.sbs.goodcrawler.fetcher.FailedPageBackup.java
public void init() { ignoreFailedPage = Boolean.getBoolean(config.getString(GlobalConstants.ignoreFailedPages, "true")); if (!ignoreFailedPage) { Queue = new LinkedBlockingDeque<Page>(config.getInt(GlobalConstants.failedPagesQueueSize, 2000)); // //from w w w. j a v a2 s . c o m BackupFailedPages backup = new BackupFailedPages(); Thread failedPagesBackupThread = new Thread(backup, "failed-pages-backup-thread"); ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1); scheduler.scheduleAtFixedRate(failedPagesBackupThread, 60, 60, TimeUnit.SECONDS); } }
From source file:com.pinterest.terrapin.controller.TerrapinControllerServiceImpl.java
public TerrapinControllerServiceImpl(PropertiesConfiguration configuration, ZooKeeperManager zkManager, DFSClient hdfsClient, HelixAdmin helixAdmin, String clusterName) { this.configuration = configuration; this.zkManager = zkManager; this.hdfsClient = hdfsClient; this.helixAdmin = helixAdmin; this.clusterName = clusterName; ExecutorService threadPool = new ThreadPoolExecutor(100, 100, 0, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(1000), new ThreadFactoryBuilder().setDaemon(false).setNameFormat("controller-pool-%d").build()); this.futurePool = new ExecutorServiceFuturePool(threadPool); }
From source file:org.apache.hadoop.fs.nfs.stream.NFSBufferedOutputStream.java
public NFSBufferedOutputStream(Configuration configuration, FileHandle handle, Path path, NFSv3FileSystemStore store, Credentials credentials, boolean append) throws IOException { this.handle = handle; this.credentials = credentials; this.path = path; this.pathString = path.toUri().getPath(); this.statistics = new StreamStatistics(NFSBufferedInputStream.class + pathString, streamId.getAndIncrement(), false); this.store = store; this.blockSizeBits = store.getWriteSizeBits(); this.currentBlock = null; this.closed = new AtomicBoolean(false); assert (blockSizeBits >= 0 && blockSizeBits <= 22); // Create the task queues executors = new ThreadPoolExecutor(DEFAULT_WRITEBACK_POOL_SIZE, MAX_WRITEBACK_POOL_SIZE, 5, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(1024), new ThreadPoolExecutor.CallerRunsPolicy()); ongoing = new LinkedList<>(); // Set file offset to 0 or file length if (append) { Nfs3FileAttributes attributes = store.getFileAttributes(handle, credentials); if (attributes != null) { fileOffset = attributes.getSize(); LOG.info("Appending to file so starting at offset = " + fileOffset); } else {//from ww w . ja v a2 s.co m throw new IOException("Could not get file length"); } } else { fileOffset = 0L; } }
From source file:org.fim.internal.StateGenerator.java
public State generateState(String comment, Path rootDir, Path dirToScan) throws NoSuchAlgorithmException { this.rootDir = rootDir; int threadCount = context.getThreadCount(); Logger.info(String.format("Scanning recursively local files, using '%s' mode and %d %s", hashModeToString(context.getHashMode()), threadCount, English.plural("thread", threadCount))); if (hashProgress.isProgressDisplayed()) { System.out.printf("(Hash progress legend for files grouped %d by %d: %s)%n", PROGRESS_DISPLAY_FILE_COUNT, PROGRESS_DISPLAY_FILE_COUNT, hashProgress.hashLegend()); }/*from w w w . j a v a2s. c o m*/ State state = new State(); state.setComment(comment); state.setHashMode(context.getHashMode()); long start = System.currentTimeMillis(); hashProgress.outputInit(); filesToHashQueue = new LinkedBlockingDeque<>(FILES_QUEUE_CAPACITY); initializeFileHashers(); FimIgnore initialFimIgnore = fimIgnoreManager.loadInitialFimIgnore(); scanFileTree(filesToHashQueue, dirToScan, initialFimIgnore); // In case the FileHashers have not already been started startFileHashers(); waitAllFilesToBeHashed(); overallTotalBytesHashed = 0; for (FileHasher fileHasher : fileHashers) { state.getFileStates().addAll(fileHasher.getFileStates()); overallTotalBytesHashed += fileHasher.getTotalBytesHashed(); } Collections.sort(state.getFileStates(), fileNameComparator); state.setIgnoredFiles(fimIgnoreManager.getIgnoredFiles()); hashProgress.outputStop(); displayStatistics(start, state); return state; }
From source file:org.apache.hadoop.fs.nfs.stream.NFSBufferedInputStream.java
public NFSBufferedInputStream(NFSv3FileSystemStore store, FileHandle handle, Path f, Configuration conf, long splitSize, Credentials credentials, FileSystem.Statistics fsStat) throws IOException { this.store = store; this.handle = handle; this.credentials = credentials; this.pathString = f.toUri().getPath(); doPrefetch = conf.getBoolean("fs.nfs.prefetch", DEFAULT_PREFETCH_ENABLED); this.fileOffset = 0L; this.readBlockSizeBits = store.getReadSizeBits(); this.splitSize = splitSize; this.closed = new AtomicBoolean(false); this.ongoing = new ConcurrentHashMap<>(DEFAULT_PREFETCH_POOL_SIZE); this.cache = new ConcurrentHashMap<>(DEFAULT_CACHE_SIZE_IN_BLOCKS); this.statistics = new StreamStatistics(NFSBufferedInputStream.class + pathString, streamId.getAndIncrement(), true); this.executors = new ThreadPoolExecutor(DEFAULT_PREFETCH_POOL_SIZE, MAX_PREFETCH_POOL_SIZE, 5, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(1024), new ThreadPoolExecutor.CallerRunsPolicy()); // Keep track of the file length at file open // NOTE: The file does not get modified while this stream is open Nfs3FileAttributes attributes = store.getFileAttributes(handle, credentials); if (attributes != null) { this.fileLength = attributes.getSize(); this.prefetchBlockLimit = (long) (Math.min(fileLength, splitSize) >> readBlockSizeBits); if (this.fileLength < 0) { throw new IOException("File length is invalid: " + this.fileLength); }/* w ww . j av a2s .com*/ } else { throw new IOException("Could not get file length from NFS server"); } }
From source file:org.apache.drill.optiq.EnumerableDrill.java
@Override public Enumerator<E> enumerator() { // TODO: use a completion service from the container final ExecutorCompletionService<Collection<RunOutcome>> service = new ExecutorCompletionService<Collection<RunOutcome>>( new ThreadPoolExecutor(1, 1, 1, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(10))); // Run the plan using an executor. It runs in a different thread, writing // results to our queue. //// ww w .ja va 2 s.c om // TODO: use the result of task, and check for exceptions final Future<Collection<RunOutcome>> task = runPlan(service); return new Enumerator<E>() { private E current; @Override public E current() { return current; } @Override public boolean moveNext() { try { Object o = queue.take(); if (o instanceof RunOutcome.OutcomeType) { switch ((RunOutcome.OutcomeType) o) { case SUCCESS: return false; // end of data case CANCELED: throw new RuntimeException("canceled"); case FAILED: default: throw new RuntimeException("failed"); } } else { current = (E) parseJson((byte[]) o); return true; } } catch (InterruptedException e) { Thread.interrupted(); throw new RuntimeException(e); } } @Override public void reset() { throw new UnsupportedOperationException(); } }; }
From source file:org.opennms.netmgt.rtc.DataSender.java
/** * The constructor for this object/*from w ww.j a v a 2s .c o m*/ * * @param categories * The category map. * @param numSenders * The number of senders. */ public DataSender(final AvailabilityService dataMgr, final RTCConfigFactory configFactory) { m_dataMgr = dataMgr; // NMS-7622: Limit the number of queued update tasks with a bounded queue m_queue = new LinkedBlockingDeque<Runnable>(Math.max(4 * configFactory.getSenders(), 32)); m_dsrPool = new ThreadPoolExecutor(1, configFactory.getSenders(), 30, TimeUnit.SECONDS, m_queue, new LogPreservingThreadFactory(getClass().getSimpleName(), configFactory.getSenders())); // get post error limit POST_ERROR_LIMIT = configFactory.getErrorsBeforeUrlUnsubscribe(); }
From source file:com.datatorrent.lib.db.jdbc.AbstractJdbcPollInputOperator.java
@Override public void setup(OperatorContext context) { super.setup(context); intializeDSLContext();//from w ww . j a v a 2 s . com if (scanService == null) { scanService = Executors.newScheduledThreadPool(1); } execute = true; emitQueue = new LinkedBlockingDeque<>(queueCapacity); operatorId = context.getId(); windowManager.setup(context); }
From source file:org.loggo.server.Server.java
void initialize(HierarchicalINIConfiguration config) throws ConfigurationException, AccumuloException, AccumuloSecurityException { Configuration kafkaConsumerSection = config.getSection("KafkaConsumer"); Configuration serverSection = config.getSection("server"); Configuration accumuloSection = config.getSection("accumulo"); Configuration batchSection = config.getSection("batchwriter"); Configuration kafkaSection = config.getSection("kafka"); ClientConfiguration clientConfig = new ClientConfiguration(accumuloSection); // connect to accumulo, check on the table String username = batchSection.getString("user", Defaults.USER); String password = batchSection.getString("password", Defaults.PASSWORD); String table = batchSection.getString("table", Defaults.TABLE); Instance instance = new ZooKeeperInstance(clientConfig); Connector connector = instance.getConnector(username, new PasswordToken(password.getBytes())); if (!connector.tableOperations().exists(table)) { createTable(connector, table);//from w w w .ja v a 2 s .com } createTopic(kafkaConsumerSection.getString("zookeeper.connect"), kafkaSection); LinkedBlockingDeque<LogEntry> queue = new LinkedBlockingDeque<LogEntry>( config.getInt("queue.size", Defaults.QUEUE_SIZE)); this.writer = new Writer(queue, clientConfig, batchSection); ServerBootstrap b = new ServerBootstrap(); // @formatter:off // tcp b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .handler(new LoggingHandler(LogLevel.INFO)).childHandler(new LoggerReaderInitializer(queue)); // udp Bootstrap bb = new Bootstrap(); bb.group(dgramGroup).channel(NioDatagramChannel.class).handler(new DgramHandler(queue)); // @formatter:on String host = serverSection.getString("host", Defaults.HOST); serverSection.setProperty("host", host); if (host.equals(Defaults.HOST)) { try { serverSection.setProperty("host", InetAddress.getLocalHost().getHostName()); } catch (UnknownHostException ex) { throw new RuntimeException("Unable to determine local hostname: " + ex.toString()); } } try { int tcpPort = serverSection.getInteger("tcp.port", Defaults.PORT); channel = b.bind(host, tcpPort).sync().channel(); tcpPort = ((InetSocketAddress) channel.localAddress()).getPort(); serverSection.setProperty("tcp.port", tcpPort); int udpPort = serverSection.getInteger("udp.port", Defaults.PORT); Channel channel2 = bb.bind(host, udpPort).sync().channel(); udpPort = ((InetSocketAddress) channel2.localAddress()).getPort(); serverSection.setProperty("udp.port", udpPort); registerInZookeeper(serverSection); } catch (IOException | KeeperException | InterruptedException ex) { throw new RuntimeException(ex); } String zookeeperConnect = kafkaConsumerSection.getString("zookeeper.connect"); if (zookeeperConnect != null) { kafkaConsumer = new KafkaConsumer(); kafkaConsumer.initialize(config, queue); kafkaConsumer.start(); } }