List of usage examples for java.lang Thread getName
public final String getName()
From source file:hudson.model.Hudson.java
private synchronized void load() throws IOException { long startTime = System.currentTimeMillis(); XmlFile cfg = getConfigFile();//ww w . j a va2s .co m if (cfg.exists()) { // reset some data that may not exit in the disk file // so that we can take a proper compensation action later. primaryView = null; views.clear(); cfg.unmarshal(this); } clouds.setOwner(this); File projectsDir = new File(root, "jobs"); if (!projectsDir.isDirectory() && !projectsDir.mkdirs()) { if (projectsDir.exists()) throw new IOException(projectsDir + " is not a directory"); throw new IOException("Unable to create " + projectsDir + "\nPermission issue? Please create this directory manually."); } File[] subdirs = projectsDir.listFiles(new FileFilter() { public boolean accept(File child) { return child.isDirectory() && Items.getConfigFile(child).exists(); } }); items.clear(); if (PARALLEL_LOAD) { // load jobs in parallel for better performance LOGGER.info("Loading in " + TWICE_CPU_NUM + " parallel threads"); List<Future<TopLevelItem>> loaders = new ArrayList<Future<TopLevelItem>>(); for (final File subdir : subdirs) { loaders.add(threadPoolForLoad.submit(new Callable<TopLevelItem>() { public TopLevelItem call() throws Exception { Thread t = Thread.currentThread(); String name = t.getName(); t.setName("Loading " + subdir); try { long start = System.currentTimeMillis(); TopLevelItem item = (TopLevelItem) Items.load(Hudson.this, subdir); if (LOG_STARTUP_PERFORMANCE) LOGGER.info("Loaded " + item.getName() + " in " + (System.currentTimeMillis() - start) + "ms by " + name); return item; } finally { t.setName(name); } } })); } for (Future<TopLevelItem> loader : loaders) { try { TopLevelItem item = loader.get(); items.put(item.getName(), item); } catch (ExecutionException e) { LOGGER.log(Level.WARNING, "Failed to load a project", e.getCause()); } catch (InterruptedException e) { e.printStackTrace(); // this is probably not the right thing to do } } } else { for (File subdir : subdirs) { try { long start = System.currentTimeMillis(); TopLevelItem item = (TopLevelItem) Items.load(this, subdir); if (LOG_STARTUP_PERFORMANCE) LOGGER.info( "Loaded " + item.getName() + " in " + (System.currentTimeMillis() - start) + "ms"); items.put(item.getName(), item); } catch (Error e) { LOGGER.log(Level.WARNING, "Failed to load " + subdir, e); } catch (RuntimeException e) { LOGGER.log(Level.WARNING, "Failed to load " + subdir, e); } catch (IOException e) { LOGGER.log(Level.WARNING, "Failed to load " + subdir, e); } } } rebuildDependencyGraph(); {// recompute label objects for (Node slave : slaves) slave.getAssignedLabels(); getAssignedLabels(); } // initialize views by inserting the default view if necessary // this is both for clean Hudson and for backward compatibility. if (views.size() == 0 || primaryView == null) { View v = new AllView(Messages.Hudson_ViewName()); v.owner = this; views.add(0, v); primaryView = v.getViewName(); } // read in old data that doesn't have the security field set if (authorizationStrategy == null) { if (useSecurity == null || !useSecurity) authorizationStrategy = AuthorizationStrategy.UNSECURED; else authorizationStrategy = new LegacyAuthorizationStrategy(); } if (securityRealm == null) { if (useSecurity == null || !useSecurity) setSecurityRealm(SecurityRealm.NO_AUTHENTICATION); else setSecurityRealm(new LegacySecurityRealm()); } else { // force the set to proxy setSecurityRealm(securityRealm); } if (useSecurity != null && !useSecurity) { // forced reset to the unsecure mode. // this works as an escape hatch for people who locked themselves out. authorizationStrategy = AuthorizationStrategy.UNSECURED; setSecurityRealm(SecurityRealm.NO_AUTHENTICATION); } // Initialize the filter with the crumb issuer setCrumbIssuer(crumbIssuer); // auto register root actions actions.addAll(getExtensionList(RootAction.class)); LOGGER.info(String.format("Took %s ms to load", System.currentTimeMillis() - startTime)); if (KILL_AFTER_LOAD) System.exit(0); }
From source file:edu.brown.hstore.HStoreSite.java
/** * Constructor/* ww w .j a v a 2s. c o m*/ * @param coordinators * @param p_estimator */ protected HStoreSite(int site_id, CatalogContext catalogContext, HStoreConf hstore_conf) { assert (hstore_conf != null); assert (catalogContext != null); this.hstore_conf = hstore_conf; this.catalogContext = catalogContext; this.catalog_site = this.catalogContext.getSiteById(site_id); if (this.catalog_site == null) throw new RuntimeException("Invalid site #" + site_id); this.catalog_host = this.catalog_site.getHost(); this.site_id = this.catalog_site.getId(); this.site_name = HStoreThreadManager.getThreadName(this.site_id, null); final int num_partitions = this.catalogContext.numberOfPartitions; this.local_partitions.addAll(CatalogUtil.getLocalPartitionIds(catalog_site)); int num_local_partitions = this.local_partitions.size(); for (Status s : Status.values()) { this.deletable_txns.put(s, new ConcurrentLinkedQueue<Long>()); } // FOR this.executors = new PartitionExecutor[num_partitions]; this.executor_threads = new Thread[num_partitions]; this.depTrackers = new DependencyTracker[num_partitions]; // Get the hasher we will use for this HStoreSite this.hasher = ClassUtil.newInstance(hstore_conf.global.hasher_class, new Object[] { this.catalogContext, num_partitions }, new Class<?>[] { CatalogContext.class, int.class }); this.p_estimator = new PartitionEstimator(this.catalogContext, this.hasher); this.remoteTxnEstimator = new RemoteEstimator(this.p_estimator); // ARIES if (hstore_conf.site.aries) { // Don't use both recovery modes assert (hstore_conf.site.snapshot == false); LOG.warn("Starting ARIES recovery at site"); String siteName = HStoreThreadManager.formatSiteName(this.getSiteId()); String ariesSiteDirPath = hstore_conf.site.aries_dir + File.separatorChar + siteName + File.separatorChar; this.m_ariesLogFileName = ariesSiteDirPath + m_ariesDefaultLogFileName; int numPartitionsPerSite = this.catalog_site.getPartitions().size(); int numSites = this.catalogContext.numberOfSites; LOG.warn("ARIES : Log Native creation :: numSites : " + numSites + " numPartitionsPerSite : " + numPartitionsPerSite); this.m_ariesLog = new AriesLogNative(numSites, numPartitionsPerSite, this.m_ariesLogFileName); this.m_recoveryLog = new VoltLogger("RECOVERY"); } // **IMPORTANT** // Always clear out the CatalogUtil and BatchPlanner before we start our new HStoreSite // TODO: Move this cache information into CatalogContext CatalogUtil.clearCache(this.catalogContext.database); BatchPlanner.clear(this.catalogContext.numberOfPartitions); TransactionCounter.resetAll(this.catalogContext); // Only preload stuff if we were asked to if (hstore_conf.site.preload) { if (debug.val) LOG.debug("Preloading cached objects"); try { // Don't forget our CatalogUtil friend! CatalogUtil.preload(this.catalogContext.database); // Load up everything the QueryPlanUtil PlanNodeUtil.preload(this.catalogContext.database); // Then load up everything in the PartitionEstimator this.p_estimator.preload(); } catch (Exception ex) { throw new RuntimeException("Failed to prepare HStoreSite", ex); } } // Offset Hack this.local_partition_offsets = new int[num_partitions]; Arrays.fill(this.local_partition_offsets, HStoreConstants.NULL_PARTITION_ID); int offset = 0; for (int partition : this.local_partitions) { this.local_partition_offsets[partition] = offset++; } // FOR // ------------------------------- // THREADS // ------------------------------- EventObserver<Pair<Thread, Throwable>> observer = new EventObserver<Pair<Thread, Throwable>>() { @Override public void update(EventObservable<Pair<Thread, Throwable>> o, Pair<Thread, Throwable> arg) { Thread thread = arg.getFirst(); Throwable error = arg.getSecond(); String threadName = "<unknown>"; if (thread != null) threadName = thread.getName(); LOG.fatal(String.format("Thread %s had a fatal error: %s", threadName, (error != null ? error.getMessage() : null))); error.printStackTrace(); hstore_coordinator.shutdownClusterBlocking(error); } }; this.exceptionHandler.addObserver(observer); Thread.setDefaultUncaughtExceptionHandler(this.exceptionHandler); // HStoreSite Thread Manager (this always get invoked first) this.threadManager = new HStoreThreadManager(this); // Distributed Transaction Queue Manager this.txnQueueManager = new TransactionQueueManager(this); // One Transaction Cleaner for every eight partitions int numCleaners = (int) Math.ceil(num_local_partitions / 8.0); for (int i = 0; i < numCleaners; i++) { this.txnCleaners.add(new TransactionCleaner(this)); } // FOR // MapReduce Transaction helper thread if (catalogContext.getMapReduceProcedures().isEmpty() == false) { this.mr_helper = new MapReduceHelperThread(this); } else { this.mr_helper = null; } // Separate TransactionIdManager per partition if (hstore_conf.site.txn_partition_id_managers) { this.txnIdManagers = new TransactionIdManager[num_partitions]; for (int partition : this.local_partitions) { this.txnIdManagers[partition] = new TransactionIdManager(partition); } // FOR } // Single TransactionIdManager for the entire site else { this.txnIdManagers = new TransactionIdManager[] { new TransactionIdManager(this.site_id) }; } // Command Logger if (hstore_conf.site.commandlog_enable) { // It would be nice if we could come up with a unique name for this // invocation of the system (like the cluster instanceId). But for now // we'll just write out to our directory... java.util.Date date = new java.util.Date(); Timestamp current = new Timestamp(date.getTime()); String nonce = Long.toString(current.getTime()); File logFile = new File(hstore_conf.site.commandlog_dir + File.separator + this.getSiteName().toLowerCase() + "_" + nonce + CommandLogWriter.LOG_OUTPUT_EXT); this.commandLogger = new CommandLogWriter(this, logFile); } else { this.commandLogger = null; } // AdHoc Support if (hstore_conf.site.exec_adhoc_sql) { this.asyncCompilerWorkThread = new AsyncCompilerWorkThread(this, this.site_id); } else { this.asyncCompilerWorkThread = null; } // The AntiCacheManager will allow us to do special things down in the EE // for evicted tuples if (hstore_conf.site.anticache_enable) { this.anticacheManager = new AntiCacheManager(this); } else { this.anticacheManager = null; } // ------------------------------- // NETWORK SETUP // ------------------------------- this.voltNetwork = new VoltNetwork(this); this.clientInterface = new ClientInterface(this, this.catalog_site.getProc_port()); // ------------------------------- // TRANSACTION ESTIMATION // ------------------------------- // Transaction Properties Initializer this.txnInitializer = new TransactionInitializer(this); // CACHED MESSAGES this.REJECTION_MESSAGE = "Transaction was rejected by " + this.getSiteName(); // ------------------------------- // STATS SETUP // ------------------------------- this.initTxnProcessors(); this.initStatSources(); // Profiling if (hstore_conf.site.profiling) { this.profiler = new HStoreSiteProfiler(); if (hstore_conf.site.status_exec_info) { this.profiler.network_idle.resetOnEventObservable(this.startWorkload_observable); } } else { this.profiler = null; } this.status_monitor = new HStoreSiteStatus(this, hstore_conf); LoggerUtil.refreshLogging(hstore_conf.global.log_refresh); }
From source file:jenkins.model.Jenkins.java
/** * Executes a reactor.// w w w . ja v a 2s. co m * * @param is * If non-null, this can be consulted for ignoring some tasks. Only used during the initialization of Jenkins. */ private void executeReactor(final InitStrategy is, TaskBuilder... builders) throws IOException, InterruptedException, ReactorException { Reactor reactor = new Reactor(builders) { /** * Sets the thread name to the task for better diagnostics. */ @Override protected void runTask(Task task) throws Exception { if (is != null && is.skipInitTask(task)) return; ACL.impersonate(ACL.SYSTEM); // full access in the initialization thread String taskName = task.getDisplayName(); Thread t = Thread.currentThread(); String name = t.getName(); if (taskName != null) t.setName(taskName); try { long start = System.currentTimeMillis(); super.runTask(task); if (LOG_STARTUP_PERFORMANCE) LOGGER.info(String.format("Took %dms for %s by %s", System.currentTimeMillis() - start, taskName, name)); } finally { t.setName(name); SecurityContextHolder.clearContext(); } } }; new InitReactorRunner() { @Override protected void onInitMilestoneAttained(InitMilestone milestone) { initLevel = milestone; } }.run(reactor); }
From source file:org.apache.hadoop.hive.ql.parse.TestReplicationScenarios.java
@Test public void testBootstrapWithDropPartitionedTable() throws IOException { String name = testName.getMethodName(); String dbName = createDB(name, driver); String replDbName = dbName + "_dupe"; run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); String[] ptn_data = new String[] { "eleven", "twelve" }; String[] empty = new String[] {}; String ptn_locn = new Path(TEST_PATH, name + "_ptn").toUri().getPath(); createTestDataFile(ptn_locn, ptn_data); run("LOAD DATA LOCAL INPATH '" + ptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)", driver);//w w w .j a v a 2 s . c o m BehaviourInjection<Table, Table> ptnedTableRenamer = new BehaviourInjection<Table, Table>() { boolean success = false; @Nullable @Override public Table apply(@Nullable Table table) { if (injectionPathCalled) { nonInjectedPathCalled = true; } else { // getTable is invoked after fetching the table names injectionPathCalled = true; Thread t = new Thread(new Runnable() { @Override public void run() { LOG.info("Entered new thread"); IDriver driver2 = DriverFactory.newDriver(hconf); SessionState.start(new CliSessionState(hconf)); CommandProcessorResponse ret = driver2.run("DROP TABLE " + dbName + ".ptned"); success = (ret.getException() == null); assertTrue(success); LOG.info("Exit new thread success - {}", success, ret.getException()); } }); t.start(); LOG.info("Created new thread {}", t.getName()); try { t.join(); } catch (InterruptedException e) { throw new RuntimeException(e); } } return table; } }; InjectableBehaviourObjectStore.setGetTableBehaviour(ptnedTableRenamer); Tuple bootstrap = null; try { bootstrap = bootstrapLoadAndVerify(dbName, replDbName); ptnedTableRenamer.assertInjectionsPerformed(true, true); } finally { InjectableBehaviourObjectStore.resetGetTableBehaviour(); // reset the behaviour } incrementalLoadAndVerify(dbName, bootstrap.lastReplId, replDbName); verifyIfTableNotExist(replDbName, "ptned", metaStoreClientMirror); }
From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java
/** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} * from the region where this coprocessor is loaded. * Since this is a coprocessor endpoint, it always expects to be loaded * on a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of * {@code RegionCoprocessorEnvironment}// w w w . j a va 2 s . c om */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException( "SsccRegionEndpoint coprocessor: start - Must be loaded on a table region!"); } if (LOG.isTraceEnabled()) LOG.trace("SsccRegionEndpoint coprocessor: start"); RegionCoprocessorEnvironment tmp_env = (RegionCoprocessorEnvironment) env; this.m_Region = tmp_env.getRegion(); this.regionInfo = this.m_Region.getRegionInfo(); this.t_Region = (TransactionalRegion) tmp_env.getRegion(); this.fs = this.m_Region.getFilesystem(); org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration(); synchronized (stoppableLock) { try { this.transactionLeaseTimeout = HBaseConfiguration.getInt(conf, HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, DEFAULT_LEASE_TIME); this.scannerLeaseTimeoutPeriod = HBaseConfiguration.getInt(conf, HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); scannerThreadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); this.cleanTimer = conf.getInt(SLEEP_CONF, DEFAULT_SLEEP); if (this.transactionLeases == null) this.transactionLeases = new Leases(LEASE_CHECK_FREQUENCY); if (LOG.isTraceEnabled()) LOG.trace("Transaction lease time: " + transactionLeaseTimeout); if (LOG.isTraceEnabled()) LOG.trace("Scanner lease time: " + scannerThreadWakeFrequency); UncaughtExceptionHandler handler = new UncaughtExceptionHandler() { public void uncaughtException(final Thread t, final Throwable e) { LOG.fatal("CleanOldTransactionChore uncaughtException: " + t.getName(), e); } }; String n = Thread.currentThread().getName(); if (TransactionalLeasesThread == null) { TransactionalLeasesThread = new Thread(this.transactionLeases); if (TransactionalLeasesThread != null) { Threads.setDaemonThreadRunning(TransactionalLeasesThread, "Transactional leases"); } } } catch (Exception e) { throw new CoprocessorException("SsccRegionEndpoint coprocessor: start threw exception " + e); } } this.t_Region = (TransactionalRegion) tmp_env.getRegion(); this.fs = this.m_Region.getFilesystem(); tHLog = this.m_Region.getLog(); RegionServerServices rss = tmp_env.getRegionServerServices(); ServerName sn = rss.getServerName(); lv_hostName = sn.getHostname(); lv_port = sn.getPort(); if (LOG.isTraceEnabled()) LOG.trace("SsccRegionEndpoint coprocessor: Hostname " + lv_hostName + " port " + lv_port); this.regionInfo = this.m_Region.getRegionInfo(); this.nextLogSequenceId = this.m_Region.getSequenceId(); this.t_Region = (TransactionalRegion) tmp_env.getRegion(); zkw1 = rss.getZooKeeper(); this.configuredEarlyLogging = conf.getBoolean("hbase.regionserver.region.transactional.earlylogging", false); if (LOG.isTraceEnabled()) LOG.trace("SsccRegionEndpoint coprocessor: get the reference from Region CoprocessorEnvrionment "); if (tmp_env.getSharedData().isEmpty()) if (LOG.isTraceEnabled()) LOG.trace("SsccRegionEndpoint coprocessor: shared map is empty "); else if (LOG.isTraceEnabled()) LOG.trace("SsccRegionEndpoint coprocessor: shared map is NOT empty Yes ... "); transactionsByIdTestz = TrxRegionObserver.getRefMap(); if (transactionsByIdTestz.isEmpty()) if (LOG.isTraceEnabled()) LOG.trace("SsccRegionEndpoint coprocessor: reference map is empty "); else if (LOG.isTraceEnabled()) LOG.trace("SsccRegionEndpoint coprocessor: reference map is NOT empty Yes ... "); if (LOG.isTraceEnabled()) LOG.trace("SsccRegionEndpoint coprocessor: UUU Region " + this.m_Region.getRegionNameAsString() + " check indoubt list from reference map "); indoubtTransactionsById = (TreeMap<Long, WALEdit>) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeypendingTransactionsById); indoubtTransactionsCountByTmid = (TreeMap<Integer, Integer>) transactionsByIdTestz.get( this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeyindoubtTransactionsCountByTmid); if (indoubtTransactionsCountByTmid != null) { if (LOG.isTraceEnabled()) LOG.trace( "SsccRegionEndpoint coprocessor:OOO successfully get the reference from Region CoprocessorEnvrionment "); } try { idServer = new IdTm(false); } catch (Exception e) { LOG.error("SsccRegionEndpoint coprocessor: unble to new IdTm " + e); } long logSeqId = nextLogSequenceId.get(); long currentTime = System.currentTimeMillis(); long ssccSeqId = currentTime > logSeqId ? currentTime : logSeqId; nextSsccSequenceId = new AtomicLong(ssccSeqId); LOG.info("Generate SequenceID start from " + nextSsccSequenceId); LOG.info("SsccRegionEndpoint coprocessor: start"); }
From source file:org.apache.lens.server.query.QueryExecutionServiceImpl.java
public void prepareStopping() { super.prepareStopping(); Thread[] threadsToStop = new Thread[] { querySubmitter, statusPoller, queryPurger, prepareQueryPurger }; // Nudge the threads to stop for (Thread th : threadsToStop) { th.interrupt();/*from www . jav a2 s . co m*/ } // Nudge executor pools to stop // Hard shutdown, since it doesn't matter whether waiting queries were selected, all will be // selected in the next restart waitingQueriesSelectionSvc.shutdownNow(); // Soft shutdown, Wait for current estimate tasks estimatePool.shutdown(); // shutdown launcher pool queryLauncherPool.shutdown(); // Soft shutdown for result purger too. Purging shouldn't take much time. if (null != queryResultPurger) { queryResultPurger.shutdown(); } // shutdown query expirer queryExpirer.shutdownNow(); // Soft shutdown right now, will await termination in this method itself, since cancellation pool // should be terminated before query state gets persisted. queryCancellationPool.shutdown(); // Join the threads. for (Thread th : threadsToStop) { try { log.debug("Waiting for {}", th.getName()); th.join(); } catch (InterruptedException e) { log.error("Error waiting for thread: {}", th.getName(), e); } } // Needs to be done before queries' states are persisted, hence doing here. Await of other // executor services can be done after persistence, hence they are done in #stop awaitTermination(queryLauncherPool); awaitTermination(queryCancellationPool); }
From source file:org.apache.hadoop.hive.ql.parse.TestReplicationScenarios.java
@Test public void testBootstrapWithConcurrentRename() throws IOException { String name = testName.getMethodName(); String dbName = createDB(name, driver); String replDbName = dbName + "_dupe"; run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); String[] ptn_data = new String[] { "eleven", "twelve" }; String[] empty = new String[] {}; String ptn_locn = new Path(TEST_PATH, name + "_ptn").toUri().getPath(); createTestDataFile(ptn_locn, ptn_data); run("LOAD DATA LOCAL INPATH '" + ptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)", driver);/*from ww w .java2 s .co m*/ BehaviourInjection<Table, Table> ptnedTableRenamer = new BehaviourInjection<Table, Table>() { boolean success = false; @Nullable @Override public Table apply(@Nullable Table table) { if (injectionPathCalled) { nonInjectedPathCalled = true; } else { // getTable is invoked after fetching the table names injectionPathCalled = true; Thread t = new Thread(new Runnable() { @Override public void run() { LOG.info("Entered new thread"); IDriver driver2 = DriverFactory.newDriver(hconf); SessionState.start(new CliSessionState(hconf)); CommandProcessorResponse ret = driver2.run( "ALTER TABLE " + dbName + ".ptned PARTITION (b=1) RENAME TO PARTITION (b=10)"); success = (ret.getException() == null); assertFalse(success); ret = driver2 .run("ALTER TABLE " + dbName + ".ptned RENAME TO " + dbName + ".ptned_renamed"); success = (ret.getException() == null); assertFalse(success); LOG.info("Exit new thread success - {}", success); } }); t.start(); LOG.info("Created new thread {}", t.getName()); try { t.join(); } catch (InterruptedException e) { throw new RuntimeException(e); } } return table; } }; InjectableBehaviourObjectStore.setGetTableBehaviour(ptnedTableRenamer); try { // The intermediate rename would've failed as bootstrap dump in progress bootstrapLoadAndVerify(dbName, replDbName); ptnedTableRenamer.assertInjectionsPerformed(true, true); } finally { InjectableBehaviourObjectStore.resetGetTableBehaviour(); // reset the behaviour } // The ptned table should be there in both source and target as rename was not successful verifyRun("SELECT a from " + dbName + ".ptned WHERE (b=1) ORDER BY a", ptn_data, driver); verifyRun("SELECT a from " + replDbName + ".ptned WHERE (b=1) ORDER BY a", ptn_data, driverMirror); // Verify if Rename after bootstrap is successful run("ALTER TABLE " + dbName + ".ptned PARTITION (b=1) RENAME TO PARTITION (b=10)", driver); verifyIfPartitionNotExist(dbName, "ptned", new ArrayList<>(Arrays.asList("1")), metaStoreClient); run("ALTER TABLE " + dbName + ".ptned RENAME TO " + dbName + ".ptned_renamed", driver); verifyIfTableNotExist(dbName, "ptned", metaStoreClient); verifyRun("SELECT a from " + dbName + ".ptned_renamed WHERE (b=10) ORDER BY a", ptn_data, driver); }
From source file:org.sakaiproject.citation.tool.CitationHelperAction.java
/** * * @param data/*from www .j av a 2 s .c om*/ */ public void doCancelSearch(RunData data) { // get state and params SessionState state = ((JetspeedRunData) data).getPortletSessionState(((JetspeedRunData) data).getJs_peid()); ParameterParser params = data.getParameters(); int requestStateId = params.getInt("requestStateId", 0); restoreRequestState(state, new String[] { CitationHelper.RESOURCES_REQUEST_PREFIX, CitationHelper.CITATION_PREFIX }, requestStateId); // cancel the running search ActiveSearch search = (ActiveSearch) state.getAttribute(STATE_SEARCH_INFO); if (search != null) { Thread searchThread = search.getSearchThread(); if (searchThread != null) { try { searchThread.interrupt(); } catch (SecurityException se) { // not able to interrupt search logger.warn("doSearch() [in ThreadGroup " + Thread.currentThread().getThreadGroup().getName() + "] unable to interrupt search Thread [name=" + searchThread.getName() + ", id=" + searchThread.getId() + ", group=" + searchThread.getThreadGroup().getName() + "]"); } } } }
From source file:org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint.java
/** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} * from the region where this coprocessor is loaded. * Since this is a coprocessor endpoint, it always expects to be loaded * on a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of * {@code RegionCoprocessorEnvironment}/* w w w.j ava 2 s .c o m*/ */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException( "TrxRegionEndpoint coprocessor: start - Must be loaded on a table region!"); } if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: start"); RegionCoprocessorEnvironment tmp_env = (RegionCoprocessorEnvironment) env; this.m_Region = tmp_env.getRegion(); this.regionInfo = this.m_Region.getRegionInfo(); this.t_Region = (TransactionalRegion) tmp_env.getRegion(); this.fs = this.m_Region.getFilesystem(); org.apache.hadoop.conf.Configuration conf = tmp_env.getConfiguration(); synchronized (stoppableLock) { try { this.transactionLeaseTimeout = conf.getInt(LEASE_CONF, MINIMUM_LEASE_TIME); if (this.transactionLeaseTimeout < MINIMUM_LEASE_TIME) { if (LOG.isWarnEnabled()) LOG.warn("Transaction lease time: " + this.transactionLeaseTimeout + ", was less than the minimum lease time. Now setting the timeout to the minimum default value: " + MINIMUM_LEASE_TIME); this.transactionLeaseTimeout = MINIMUM_LEASE_TIME; } this.scannerLeaseTimeoutPeriod = HBaseConfiguration.getInt(conf, HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); this.scannerThreadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); this.cleanTimer = conf.getInt(SLEEP_CONF, DEFAULT_SLEEP); this.memoryUsageThreshold = conf.getInt(MEMORY_THRESHOLD, DEFAULT_MEMORY_THRESHOLD); this.memoryUsagePerformGC = conf.getBoolean(MEMORY_PERFORM_GC, DEFAULT_MEMORY_PERFORM_GC); this.memoryUsageWarnOnly = conf.getBoolean(MEMORY_WARN_ONLY, DEFAULT_MEMORY_WARN_ONLY); this.memoryUsageTimer = conf.getInt(MEMORY_CONF, DEFAULT_MEMORY_SLEEP); this.memoryUsageTimer = conf.getInt(MEMORY_CONF, DEFAULT_MEMORY_SLEEP); this.suppressOutOfOrderProtocolException = conf.getBoolean(SUPPRESS_OOP, DEFAULT_SUPPRESS_OOP); if (this.transactionLeases == null) this.transactionLeases = new Leases(LEASE_CHECK_FREQUENCY); //if (this.scannerLeases == null) // this.scannerLeases = new Leases(scannerThreadWakeFrequency); if (LOG.isTraceEnabled()) LOG.trace("Transaction lease time: " + this.transactionLeaseTimeout + " Scanner lease time: " + this.scannerThreadWakeFrequency + ", Scanner lease timeout period: " + this.scannerLeaseTimeoutPeriod + ", Clean timer: " + this.cleanTimer + ", MemoryUsage timer: " + this.memoryUsageTimer + ", MemoryUsageThreshold: " + this.memoryUsageThreshold + ", MemoryUsagePerformGC: " + this.memoryUsagePerformGC + ", MemoryUsageWarnOnly: " + this.memoryUsageWarnOnly + ", Suppress OutOfOrderProtocolException: " + this.suppressOutOfOrderProtocolException); // Start the clean core thread this.cleanOldTransactionsThread = new CleanOldTransactionsChore(this, cleanTimer, stoppable); UncaughtExceptionHandler handler = new UncaughtExceptionHandler() { public void uncaughtException(final Thread t, final Throwable e) { LOG.fatal("CleanOldTransactionChore uncaughtException: " + t.getName(), e); } }; String n = Thread.currentThread().getName(); ChoreThread = new Thread(this.cleanOldTransactionsThread); Threads.setDaemonThreadRunning(ChoreThread, n + ".oldTransactionCleaner", handler); // Start the memory usage chore thread if the threshold // selected is greater than the default of 100%. if (memoryUsageThreshold < DEFAULT_MEMORY_THRESHOLD && memoryUsageThread == null) { LOG.warn("TrxRegionEndpoint coprocessor: start - starting memoryUsageThread"); memoryUsageThread = new MemoryUsageChore(this, memoryUsageTimer, stoppable2); UncaughtExceptionHandler handler2 = new UncaughtExceptionHandler() { public void uncaughtException(final Thread t, final Throwable e) { LOG.fatal("MemoryUsageChore uncaughtException: " + t.getName(), e); } }; String n2 = Thread.currentThread().getName(); ChoreThread2 = new Thread(memoryUsageThread); Threads.setDaemonThreadRunning(ChoreThread2, n2 + ".memoryUsage", handler2); } if (TransactionalLeasesThread == null) { TransactionalLeasesThread = new Thread(this.transactionLeases); if (TransactionalLeasesThread != null) { Threads.setDaemonThreadRunning(TransactionalLeasesThread, "Transactional leases"); } } /* if (ScannerLeasesThread == null) { ScannerLeasesThread = new Thread(this.scannerLeases); if (ScannerLeasesThread != null) { Threads.setDaemonThreadRunning(ScannerLeasesThread, "Scanner leases"); } } */ } catch (Exception e) { throw new CoprocessorException("TrxRegionEndpoint coprocessor: start - Caught exception " + e); } } this.t_Region = (TransactionalRegion) tmp_env.getRegion(); this.fs = this.m_Region.getFilesystem(); tHLog = this.m_Region.getLog(); RegionServerServices rss = tmp_env.getRegionServerServices(); ServerName sn = rss.getServerName(); lv_hostName = sn.getHostname(); lv_port = sn.getPort(); if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: hostname " + lv_hostName + " port " + lv_port); this.regionInfo = this.m_Region.getRegionInfo(); this.nextLogSequenceId = this.m_Region.getSequenceId(); this.t_Region = (TransactionalRegion) tmp_env.getRegion(); zkw1 = rss.getZooKeeper(); this.configuredEarlyLogging = tmp_env.getConfiguration() .getBoolean("hbase.regionserver.region.transactional.earlylogging", false); if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: early logging setting is " + this.configuredEarlyLogging + "\nTrxRegionEndpoint coprocessor: get the reference from Region CoprocessorEnvironment "); this.configuredConflictReinstate = tmp_env.getConfiguration() .getBoolean("hbase.regionserver.region.transactional.conflictreinstate", false); if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: conflict reinstate setting is " + this.configuredConflictReinstate + "\nTrxRegionEndpoint coprocessor: get the reference from Region CoprocessorEnvironment "); if (tmp_env.getSharedData().isEmpty()) if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: shared map is empty "); else if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: shared map is NOT empty"); transactionsEPCPMap.put(this.m_Region.getRegionNameAsString() + trxkeyEPCPinstance, this); transactionsByIdTestz = TrxRegionObserver.getRefMap(); if (transactionsByIdTestz.isEmpty()) { if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: reference map is empty "); } else { if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: reference map is NOT empty "); } if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: Region " + this.m_Region.getRegionNameAsString() + " check indoubt list from reference map "); Map<Long, List<WALEdit>> indoubtTransactionsByIdCheck = (TreeMap<Long, List<WALEdit>>) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeypendingTransactionsById); if (indoubtTransactionsByIdCheck != null) { this.indoubtTransactionsById = indoubtTransactionsByIdCheck; } else { transactionsByIdTestz.put( this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeypendingTransactionsById, this.indoubtTransactionsById); } Map<Integer, Integer> indoubtTransactionsCountByTmidCheck = (TreeMap<Integer, Integer>) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeyindoubtTransactionsCountByTmid); if (indoubtTransactionsCountByTmidCheck != null) { this.indoubtTransactionsCountByTmid = indoubtTransactionsCountByTmidCheck; } else { transactionsByIdTestz.put( this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeyindoubtTransactionsCountByTmid, this.indoubtTransactionsCountByTmid); } Set<TrxTransactionState> commitPendingTransactionsCheck = (Set<TrxTransactionState>) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeycommitPendingTransactions); if (commitPendingTransactionsCheck != null) { this.commitPendingTransactions = commitPendingTransactionsCheck; } else { transactionsByIdTestz.put( this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeycommitPendingTransactions, this.commitPendingTransactions); } ConcurrentHashMap<String, TrxTransactionState> transactionsByIdCheck = (ConcurrentHashMap<String, TrxTransactionState>) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeytransactionsById); if (transactionsByIdCheck != null) { this.transactionsById = transactionsByIdCheck; } else { transactionsByIdTestz.put( this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeytransactionsById, this.transactionsById); } AtomicBoolean closingCheck = (AtomicBoolean) transactionsByIdTestz .get(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeyClosingVar); if (closingCheck != null) { this.closing = closingCheck; } else { transactionsByIdTestz.put(this.m_Region.getRegionNameAsString() + TrxRegionObserver.trxkeyClosingVar, this.closing); } // Set up the memoryBean from the ManagementFactory if (memoryUsageThreshold < DEFAULT_MEMORY_THRESHOLD) memoryBean = ManagementFactory.getMemoryMXBean(); if (LOG.isTraceEnabled()) LOG.trace("TrxRegionEndpoint coprocessor: start"); }