List of usage examples for java.util.concurrent ThreadPoolExecutor shutdownNow
public List<Runnable> shutdownNow()
From source file:io.anserini.index.IndexWebCollection.java
public int indexWithThreads(int numThreads) throws IOException, InterruptedException { LOG.info("Indexing with " + numThreads + " threads to directory '" + indexPath.toAbsolutePath() + "'..."); final Directory dir = FSDirectory.open(indexPath); final IndexWriterConfig iwc = new IndexWriterConfig(new EnglishAnalyzer()); iwc.setSimilarity(new BM25Similarity()); iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE); iwc.setRAMBufferSizeMB(512);//ww w .ja va2 s . c om iwc.setUseCompoundFile(false); iwc.setMergeScheduler(new ConcurrentMergeScheduler()); final IndexWriter writer = new IndexWriter(dir, iwc); final ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numThreads); final String suffix = Collection.GOV2.equals(collection) ? ".gz" : ".warc.gz"; final Deque<Path> warcFiles = discoverWarcFiles(docDir, suffix); if (doclimit > 0 && warcFiles.size() < doclimit) for (int i = doclimit; i < warcFiles.size(); i++) warcFiles.removeFirst(); long totalWarcFiles = warcFiles.size(); LOG.info(totalWarcFiles + " many " + suffix + " files found under the docs path : " + docDir.toString()); for (int i = 0; i < 2000; i++) { if (!warcFiles.isEmpty()) executor.execute(new IndexerThread(writer, warcFiles.removeFirst())); else { if (!executor.isShutdown()) { Thread.sleep(30000); executor.shutdown(); } break; } } long first = 0; //add some delay to let some threads spawn by scheduler Thread.sleep(30000); try { // Wait for existing tasks to terminate while (!executor.awaitTermination(1, TimeUnit.MINUTES)) { final long completedTaskCount = executor.getCompletedTaskCount(); LOG.info(String.format("%.2f percentage completed", (double) completedTaskCount / totalWarcFiles * 100.0d)); if (!warcFiles.isEmpty()) for (long i = first; i < completedTaskCount; i++) { if (!warcFiles.isEmpty()) executor.execute(new IndexerThread(writer, warcFiles.removeFirst())); else { if (!executor.isShutdown()) executor.shutdown(); } } first = completedTaskCount; Thread.sleep(1000); } } catch (InterruptedException ie) { // (Re-)Cancel if current thread also interrupted executor.shutdownNow(); // Preserve interrupt status Thread.currentThread().interrupt(); } if (totalWarcFiles != executor.getCompletedTaskCount()) throw new RuntimeException("totalWarcFiles = " + totalWarcFiles + " is not equal to completedTaskCount = " + executor.getCompletedTaskCount()); int numIndexed = writer.maxDoc(); try { writer.commit(); if (optimize) writer.forceMerge(1); } finally { writer.close(); } return numIndexed; }
From source file:com.codefollower.lealone.omid.tso.TSOServer.java
@Override public void run() { // *** Start the Netty configuration *** // Start server with Nb of active threads = 2*NB CPU + 1 as maximum. ChannelFactory factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2); ServerBootstrap bootstrap = new ServerBootstrap(factory); // Create the global ChannelGroup ChannelGroup channelGroup = new DefaultChannelGroup(TSOServer.class.getName()); // threads max // int maxThreads = Runtime.getRuntime().availableProcessors() *2 + 1; int maxThreads = 5; // Memory limitation: 1MB by channel, 1GB global, 100 ms of timeout ThreadPoolExecutor pipelineExecutor = new OrderedMemoryAwareThreadPoolExecutor(maxThreads, 1048576, 1073741824, 100, TimeUnit.MILLISECONDS, new ObjectSizeEstimator() { @Override//from ww w . java 2 s .c o m public int estimateSize(Object o) { return 1000; } }, Executors.defaultThreadFactory()); state = BookKeeperStateBuilder.getState(config); if (state == null) { LOG.error("Couldn't build state"); return; } state.addRecord(new byte[] { LoggerProtocol.LOG_START }, new AddRecordCallback() { @Override public void addRecordComplete(int rc, Object ctx) { } }, null); LOG.info("PARAM MAX_ITEMS: " + TSOState.MAX_ITEMS); LOG.info("PARAM BATCH_SIZE: " + config.getBatchSize()); LOG.info("PARAM MAX_THREADS: " + maxThreads); final TSOHandler handler = new TSOHandler(channelGroup, state, config.getBatchSize()); handler.start(); bootstrap.setPipelineFactory(new TSOPipelineFactory(pipelineExecutor, handler)); bootstrap.setOption("tcpNoDelay", false); //setting buffer size can improve I/O bootstrap.setOption("child.sendBufferSize", 1048576); bootstrap.setOption("child.receiveBufferSize", 1048576); // better to have an receive buffer predictor bootstrap.setOption("receiveBufferSizePredictorFactory", new AdaptiveReceiveBufferSizePredictorFactory()); //if the server is sending 1000 messages per sec, optimum write buffer water marks will //prevent unnecessary throttling, Check NioSocketChannelConfig doc bootstrap.setOption("writeBufferLowWaterMark", 32 * 1024); bootstrap.setOption("writeBufferHighWaterMark", 64 * 1024); bootstrap.setOption("child.tcpNoDelay", false); bootstrap.setOption("child.keepAlive", true); bootstrap.setOption("child.reuseAddress", true); bootstrap.setOption("child.connectTimeoutMillis", 60000); // *** Start the Netty running *** // Create the monitor ThroughputMonitor monitor = new ThroughputMonitor(state); // Add the parent channel to the group Channel channel = bootstrap.bind(new InetSocketAddress(config.getPort())); channelGroup.add(channel); // Compacter handler ChannelFactory comFactory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2); ServerBootstrap comBootstrap = new ServerBootstrap(comFactory); ChannelGroup comGroup = new DefaultChannelGroup("compacter"); final CompacterHandler comHandler = new CompacterHandler(comGroup, state); comBootstrap.setPipelineFactory(new ChannelPipelineFactory() { @Override public ChannelPipeline getPipeline() throws Exception { ChannelPipeline pipeline = Channels.pipeline(); pipeline.addLast("decoder", new ObjectDecoder()); pipeline.addLast("encoder", new ObjectEncoder()); pipeline.addLast("handler", comHandler); return pipeline; } }); comBootstrap.setOption("tcpNoDelay", false); comBootstrap.setOption("child.tcpNoDelay", false); comBootstrap.setOption("child.keepAlive", true); comBootstrap.setOption("child.reuseAddress", true); comBootstrap.setOption("child.connectTimeoutMillis", 100); comBootstrap.setOption("readWriteFair", true); channel = comBootstrap.bind(new InetSocketAddress(config.getPort() + 1)); // Starts the monitor monitor.start(); synchronized (lock) { while (!finish) { try { lock.wait(); } catch (InterruptedException e) { break; } } } handler.stop(); comHandler.stop(); state.stop(); // *** Start the Netty shutdown *** // End the monitor LOG.info("End of monitor"); monitor.interrupt(); // Now close all channels LOG.info("End of channel group"); channelGroup.close().awaitUninterruptibly(); comGroup.close().awaitUninterruptibly(); // Close the executor for Pipeline LOG.info("End of pipeline executor"); pipelineExecutor.shutdownNow(); // Now release resources LOG.info("End of resources"); factory.releaseExternalResources(); comFactory.releaseExternalResources(); }
From source file:com.yahoo.omid.tso.TSOServer.java
@Override public void run() { // *** Start the Netty configuration *** // Start server with Nb of active threads = 2*NB CPU + 1 as maximum. ChannelFactory factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2); ServerBootstrap bootstrap = new ServerBootstrap(factory); // Create the global ChannelGroup ChannelGroup channelGroup = new DefaultChannelGroup(TSOServer.class.getName()); // threads max // int maxThreads = Runtime.getRuntime().availableProcessors() *2 + 1; int maxThreads = 5; // Memory limitation: 1MB by channel, 1GB global, 100 ms of timeout ThreadPoolExecutor pipelineExecutor = new OrderedMemoryAwareThreadPoolExecutor(maxThreads, 1048576, 1073741824, 100, TimeUnit.MILLISECONDS, new ObjectSizeEstimator() { @Override/*w w w . ja v a2 s .c o m*/ public int estimateSize(Object o) { return 1000; } }, Executors.defaultThreadFactory()); // This is the only object of timestamp oracle // TODO: make it singleton //TimestampOracle timestampOracle = new TimestampOracle(); // The wrapper for the shared state of TSO state = BookKeeperStateBuilder.getState(this.config); if (state == null) { LOG.error("Couldn't build state"); return; } state.addRecord(new byte[] { LoggerProtocol.LOGSTART }, new AddRecordCallback() { @Override public void addRecordComplete(int rc, Object ctx) { } }, null); TSOState.BATCH_SIZE = config.getBatchSize(); System.out.println("PARAM MAX_ITEMS: " + TSOState.MAX_ITEMS); System.out.println("PARAM BATCH_SIZE: " + TSOState.BATCH_SIZE); System.out.println("PARAM LOAD_FACTOR: " + TSOState.LOAD_FACTOR); System.out.println("PARAM MAX_THREADS: " + maxThreads); final TSOHandler handler = new TSOHandler(channelGroup, state); handler.start(); bootstrap.setPipelineFactory(new TSOPipelineFactory(pipelineExecutor, handler)); bootstrap.setOption("tcpNoDelay", false); //setting buffer size can improve I/O bootstrap.setOption("child.sendBufferSize", 1048576); bootstrap.setOption("child.receiveBufferSize", 1048576); // better to have an receive buffer predictor bootstrap.setOption("receiveBufferSizePredictorFactory", new AdaptiveReceiveBufferSizePredictorFactory()); //if the server is sending 1000 messages per sec, optimum write buffer water marks will //prevent unnecessary throttling, Check NioSocketChannelConfig doc bootstrap.setOption("writeBufferLowWaterMark", 32 * 1024); bootstrap.setOption("writeBufferHighWaterMark", 64 * 1024); bootstrap.setOption("child.tcpNoDelay", false); bootstrap.setOption("child.keepAlive", true); bootstrap.setOption("child.reuseAddress", true); bootstrap.setOption("child.connectTimeoutMillis", 60000); // *** Start the Netty running *** // Create the monitor ThroughputMonitor monitor = new ThroughputMonitor(state); // Add the parent channel to the group Channel channel = bootstrap.bind(new InetSocketAddress(config.getPort())); channelGroup.add(channel); // Compacter handler ChannelFactory comFactory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2); ServerBootstrap comBootstrap = new ServerBootstrap(comFactory); ChannelGroup comGroup = new DefaultChannelGroup("compacter"); final CompacterHandler comHandler = new CompacterHandler(comGroup, state); comBootstrap.setPipelineFactory(new ChannelPipelineFactory() { @Override public ChannelPipeline getPipeline() throws Exception { ChannelPipeline pipeline = Channels.pipeline(); pipeline.addLast("decoder", new ObjectDecoder()); pipeline.addLast("encoder", new ObjectEncoder()); pipeline.addLast("handler", comHandler); return pipeline; } }); comBootstrap.setOption("tcpNoDelay", false); comBootstrap.setOption("child.tcpNoDelay", false); comBootstrap.setOption("child.keepAlive", true); comBootstrap.setOption("child.reuseAddress", true); comBootstrap.setOption("child.connectTimeoutMillis", 100); comBootstrap.setOption("readWriteFair", true); channel = comBootstrap.bind(new InetSocketAddress(config.getPort() + 1)); // Starts the monitor monitor.start(); synchronized (lock) { while (!finish) { try { lock.wait(); } catch (InterruptedException e) { break; } } } //timestampOracle.stop(); handler.stop(); comHandler.stop(); state.stop(); // *** Start the Netty shutdown *** // End the monitor System.out.println("End of monitor"); monitor.interrupt(); // Now close all channels System.out.println("End of channel group"); channelGroup.close().awaitUninterruptibly(); comGroup.close().awaitUninterruptibly(); // Close the executor for Pipeline System.out.println("End of pipeline executor"); pipelineExecutor.shutdownNow(); // Now release resources System.out.println("End of resources"); factory.releaseExternalResources(); comFactory.releaseExternalResources(); }
From source file:org.apache.cxf.systest.jaxrs.JAXRSContinuationsTest.java
private void doTestContinuation(String pathSegment) throws Exception { ThreadPoolExecutor executor = new ThreadPoolExecutor(5, 5, 0, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(10)); CountDownLatch startSignal = new CountDownLatch(1); CountDownLatch doneSignal = new CountDownLatch(5); executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/1", "1", "CXF in Action1", startSignal, doneSignal)); executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/2", "2", "CXF in Action2", startSignal, doneSignal)); executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/3", "3", "CXF in Action3", startSignal, doneSignal)); executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/4", "4", "CXF in Action4", startSignal, doneSignal)); executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/5", "5", "CXF in Action5", startSignal, doneSignal)); startSignal.countDown();/*from w w w. j a v a 2 s . c o m*/ doneSignal.await(60, TimeUnit.SECONDS); executor.shutdownNow(); assertEquals("Not all invocations have completed", 0, doneSignal.getCount()); }
From source file:org.apache.hadoop.hbase.client.TestHCM.java
@Test public void testClusterConnection() throws IOException { ThreadPoolExecutor otherPool = new ThreadPoolExecutor(1, 1, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("test-hcm")); HConnection con1 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration()); HConnection con2 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration(), otherPool); // make sure the internally created ExecutorService is the one passed assertTrue(otherPool == ((HConnectionImplementation) con2).getCurrentBatchPool()); String tableName = "testClusterConnection"; TEST_UTIL.createTable(tableName.getBytes(), FAM_NAM).close(); HTable t = (HTable) con1.getTable(tableName, otherPool); // make sure passing a pool to the getTable does not trigger creation of an internal pool assertNull("Internal Thread pool should be null", ((HConnectionImplementation) con1).getCurrentBatchPool()); // table should use the pool passed assertTrue(otherPool == t.getPool()); t.close();//from www. j av a 2 s. co m t = (HTable) con2.getTable(tableName); // table should use the connectin's internal pool assertTrue(otherPool == t.getPool()); t.close(); t = (HTable) con2.getTable(Bytes.toBytes(tableName)); // try other API too assertTrue(otherPool == t.getPool()); t.close(); t = (HTable) con2.getTable(TableName.valueOf(tableName)); // try other API too assertTrue(otherPool == t.getPool()); t.close(); t = (HTable) con1.getTable(tableName); ExecutorService pool = ((HConnectionImplementation) con1).getCurrentBatchPool(); // make sure an internal pool was created assertNotNull("An internal Thread pool should have been created", pool); // and that the table is using it assertTrue(t.getPool() == pool); t.close(); t = (HTable) con1.getTable(tableName); // still using the *same* internal pool assertTrue(t.getPool() == pool); t.close(); con1.close(); // if the pool was created on demand it should be closed upon connection close assertTrue(pool.isShutdown()); con2.close(); // if the pool is passed, it is not closed assertFalse(otherPool.isShutdown()); otherPool.shutdownNow(); }
From source file:org.apache.hadoop.hbase.client.TestHCM.java
/** * Tests that a destroyed connection does not have a live zookeeper. * Below is timing based. We put up a connection to a table and then close the connection while * having a background thread running that is forcing close of the connection to try and * provoke a close catastrophe; we are hoping for a car crash so we can see if we are leaking * zk connections./*from ww w . j ava 2 s . c o m*/ * @throws Exception */ @Ignore("Flakey test: See HBASE-8996") @Test public void testDeleteForZKConnLeak() throws Exception { TEST_UTIL.createTable(TABLE_NAME4, FAM_NAM); final Configuration config = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); config.setInt("zookeeper.recovery.retry", 1); config.setInt("zookeeper.recovery.retry.intervalmill", 1000); config.setInt("hbase.rpc.timeout", 2000); config.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); ThreadPoolExecutor pool = new ThreadPoolExecutor(1, 10, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("test-hcm-delete")); pool.submit(new Runnable() { @Override public void run() { while (!Thread.interrupted()) { try { HConnection conn = HConnectionManager.getConnection(config); LOG.info("Connection " + conn); HConnectionManager.deleteStaleConnection(conn); LOG.info("Connection closed " + conn); // TODO: This sleep time should be less than the time that it takes to open and close // a table. Ideally we would do a few runs first to measure. For now this is // timing based; hopefully we hit the bad condition. Threads.sleep(10); } catch (Exception e) { } } } }); // Use connection multiple times. for (int i = 0; i < 30; i++) { HConnection c1 = null; try { c1 = ConnectionManager.getConnectionInternal(config); LOG.info("HTable connection " + i + " " + c1); HTable table = new HTable(config, TABLE_NAME4, pool); table.close(); LOG.info("HTable connection " + i + " closed " + c1); } catch (Exception e) { LOG.info("We actually want this to happen!!!! So we can see if we are leaking zk", e); } finally { if (c1 != null) { if (c1.isClosed()) { // cannot use getZooKeeper as method instantiates watcher if null Field zkwField = c1.getClass().getDeclaredField("keepAliveZookeeper"); zkwField.setAccessible(true); Object watcher = zkwField.get(c1); if (watcher != null) { if (((ZooKeeperWatcher) watcher).getRecoverableZooKeeper().getState().isAlive()) { // non-synchronized access to watcher; sleep and check again in case zk connection // hasn't been cleaned up yet. Thread.sleep(1000); if (((ZooKeeperWatcher) watcher).getRecoverableZooKeeper().getState().isAlive()) { pool.shutdownNow(); fail("Live zookeeper in closed connection"); } } } } c1.close(); } } } pool.shutdownNow(); }
From source file:org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure.java
/** * Create Split directory/*from ww w . jav a 2 s . c om*/ * @param env MasterProcedureEnv * @throws IOException */ private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); final Configuration conf = env.getMasterConfiguration(); // The following code sets up a thread pool executor with as many slots as // there's files to split. It then fires up everything, waits for // completion and finally checks for any exception // // Note: splitStoreFiles creates daughter region dirs under the parent splits dir // Nothing to unroll here if failure -- re-run createSplitsDir will // clean this up. int nbFiles = 0; for (String family : regionFs.getFamilies()) { Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family); if (storeFiles != null) { nbFiles += storeFiles.size(); } } if (nbFiles == 0) { // no file needs to be splitted. return new Pair<Integer, Integer>(0, 0); } // Default max #threads to use is the smaller of table's configured number of blocking store // files or the available number of logical cores. int defMaxThreads = Math.min( conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT), Runtime.getRuntime().availableProcessors()); // Max #threads is the smaller of the number of storefiles or the default max determined above. int maxThreads = Math.min(conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles); LOG.info("Preparing to split " + nbFiles + " storefiles for region " + parentHRI + " using " + maxThreads + " threads"); ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, Threads.getNamedThreadFactory("StoreFileSplitter-%1$d")); List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles); // Split each store file. final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); for (String family : regionFs.getFamilies()) { final HColumnDescriptor hcd = htd.getFamily(family.getBytes()); final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family); if (storeFiles != null && storeFiles.size() > 0) { final CacheConfig cacheConf = new CacheConfig(conf, hcd); for (StoreFileInfo storeFileInfo : storeFiles) { StoreFileSplitter sfs = new StoreFileSplitter(regionFs, family.getBytes(), new StoreFile( mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType())); futures.add(threadPool.submit(sfs)); } } } // Shutdown the pool threadPool.shutdown(); // Wait for all the tasks to finish long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000); try { boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS); if (stillRunning) { threadPool.shutdownNow(); // wait for the thread to shutdown completely. while (!threadPool.isTerminated()) { Thread.sleep(50); } throw new IOException( "Took too long to split the" + " files and create the references, aborting split"); } } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } int daughterA = 0; int daughterB = 0; // Look for any exception for (Future<Pair<Path, Path>> future : futures) { try { Pair<Path, Path> p = future.get(); daughterA += p.getFirst() != null ? 1 : 0; daughterB += p.getSecond() != null ? 1 : 0; } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { throw new IOException(e); } } if (LOG.isDebugEnabled()) { LOG.debug("Split storefiles for region " + parentHRI + " Daughter A: " + daughterA + " storefiles, Daughter B: " + daughterB + " storefiles."); } return new Pair<Integer, Integer>(daughterA, daughterB); }
From source file:org.apache.hadoop.hbase.regionserver.CompactSplit.java
private void waitFor(ThreadPoolExecutor t, String name) { boolean done = false; while (!done) { try {//from w w w. j av a 2 s . com done = t.awaitTermination(60, TimeUnit.SECONDS); LOG.info("Waiting for " + name + " to finish..."); if (!done) { t.shutdownNow(); } } catch (InterruptedException ie) { LOG.warn("Interrupted waiting for " + name + " to finish..."); } } }
From source file:org.apache.hadoop.hbase.regionserver.HRegion.java
private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status) throws IOException, UnsupportedEncodingException { // Load in all the HStores. long maxSeqId = -1; // initialized to -1 so that we pick up MemstoreTS from column families long maxMemstoreTS = -1; if (!htableDescriptor.getFamilies().isEmpty()) { // initialize the thread pool for opening stores in parallel. ThreadPoolExecutor storeOpenerThreadPool = getStoreOpenAndCloseThreadPool( "StoreOpener-" + this.getRegionInfo().getShortNameToLog()); CompletionService<HStore> completionService = new ExecutorCompletionService<HStore>( storeOpenerThreadPool);/* w ww . j a v a2s .c o m*/ // initialize each store in parallel for (final HColumnDescriptor family : htableDescriptor.getFamilies()) { status.setStatus("Instantiating store for column family " + family); completionService.submit(new Callable<HStore>() { @Override public HStore call() throws IOException { return instantiateHStore(family); } }); } boolean allStoresOpened = false; try { for (int i = 0; i < htableDescriptor.getFamilies().size(); i++) { Future<HStore> future = completionService.take(); HStore store = future.get(); this.stores.put(store.getColumnFamilyName().getBytes(), store); long storeMaxSequenceId = store.getMaxSequenceId(); maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), storeMaxSequenceId); if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) { maxSeqId = storeMaxSequenceId; } long maxStoreMemstoreTS = store.getMaxMemstoreTS(); if (maxStoreMemstoreTS > maxMemstoreTS) { maxMemstoreTS = maxStoreMemstoreTS; } } allStoresOpened = true; } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { storeOpenerThreadPool.shutdownNow(); if (!allStoresOpened) { // something went wrong, close all opened stores LOG.error("Could not initialize all stores for the region=" + this); for (Store store : this.stores.values()) { try { store.close(); } catch (IOException e) { LOG.warn(e.getMessage()); } } } } } mvcc.initialize(maxMemstoreTS + 1); // Recover any edits if available. maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, status)); return maxSeqId; }
From source file:org.apache.hadoop.hbase.regionserver.HRegion.java
private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status) throws IOException { if (isClosed()) { LOG.warn("Region " + this + " already closed"); return null; }//from w w w .j a v a2 s . c om if (coprocessorHost != null) { status.setStatus("Running coprocessor pre-close hooks"); this.coprocessorHost.preClose(abort); } status.setStatus("Disabling compacts and flushes for region"); synchronized (writestate) { // Disable compacting and flushing by background threads for this // region. writestate.writesEnabled = false; LOG.debug("Closing " + this + ": disabling compactions & flushes"); waitForFlushesAndCompactions(); } // If we were not just flushing, is it worth doing a preflush...one // that will clear out of the bulk of the memstore before we put up // the close flag? if (!abort && worthPreFlushing()) { status.setStatus("Pre-flushing region before close"); LOG.info("Running close preflush of " + this.getRegionNameAsString()); try { internalFlushcache(status); } catch (IOException ioe) { // Failed to flush the region. Keep going. status.setStatus("Failed pre-flush " + this + "; " + ioe.getMessage()); } } this.closing.set(true); status.setStatus("Disabling writes for close"); // block waiting for the lock for closing lock.writeLock().lock(); try { if (this.isClosed()) { status.abort("Already got closed by another process"); // SplitTransaction handles the null return null; } LOG.debug("Updates disabled for region " + this); // Don't flush the cache if we are aborting if (!abort) { int flushCount = 0; while (this.getMemstoreSize().get() > 0) { try { if (flushCount++ > 0) { int actualFlushes = flushCount - 1; if (actualFlushes > 5) { // If we tried 5 times and are unable to clear memory, abort // so we do not lose data throw new DroppedSnapshotException("Failed clearing memory after " + actualFlushes + " attempts on region: " + Bytes.toStringBinary(getRegionName())); } LOG.info("Running extra flush, " + actualFlushes + " (carrying snapshot?) " + this); } internalFlushcache(status); } catch (IOException ioe) { status.setStatus("Failed flush " + this + ", putting online again"); synchronized (writestate) { writestate.writesEnabled = true; } // Have to throw to upper layers. I can't abort server from here. throw ioe; } } } Map<byte[], List<StoreFile>> result = new TreeMap<byte[], List<StoreFile>>(Bytes.BYTES_COMPARATOR); if (!stores.isEmpty()) { // initialize the thread pool for closing stores in parallel. ThreadPoolExecutor storeCloserThreadPool = getStoreOpenAndCloseThreadPool( "StoreCloserThread-" + this.getRegionNameAsString()); CompletionService<Pair<byte[], Collection<StoreFile>>> completionService = new ExecutorCompletionService<Pair<byte[], Collection<StoreFile>>>( storeCloserThreadPool); // close each store in parallel for (final Store store : stores.values()) { assert abort || store.getFlushableSize() == 0; completionService.submit(new Callable<Pair<byte[], Collection<StoreFile>>>() { @Override public Pair<byte[], Collection<StoreFile>> call() throws IOException { return new Pair<byte[], Collection<StoreFile>>(store.getFamily().getName(), store.close()); } }); } try { for (int i = 0; i < stores.size(); i++) { Future<Pair<byte[], Collection<StoreFile>>> future = completionService.take(); Pair<byte[], Collection<StoreFile>> storeFiles = future.get(); List<StoreFile> familyFiles = result.get(storeFiles.getFirst()); if (familyFiles == null) { familyFiles = new ArrayList<StoreFile>(); result.put(storeFiles.getFirst(), familyFiles); } familyFiles.addAll(storeFiles.getSecond()); } } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { storeCloserThreadPool.shutdownNow(); } } this.closed.set(true); if (memstoreSize.get() != 0) LOG.error("Memstore size is " + memstoreSize.get()); if (coprocessorHost != null) { status.setStatus("Running coprocessor post-close hooks"); this.coprocessorHost.postClose(abort); } if (this.metricsRegion != null) { this.metricsRegion.close(); } if (this.metricsRegionWrapper != null) { Closeables.closeQuietly(this.metricsRegionWrapper); } status.markComplete("Closed"); LOG.info("Closed " + this); return result; } finally { lock.writeLock().unlock(); } }