List of usage examples for java.util.concurrent RejectedExecutionException getMessage
public String getMessage()
From source file:org.apache.accumulo.server.problems.ProblemReports.java
public void report(final ProblemReport pr) { synchronized (problemReports) { if (problemReports.containsKey(pr)) { return; }//from w ww . j av a 2 s . c o m problemReports.put(pr, System.currentTimeMillis()); } Runnable r = new Runnable() { @Override public void run() { log.debug("Filing problem report " + pr.getTableName() + " " + pr.getProblemType() + " " + pr.getResource()); try { if (isMeta(pr.getTableName())) { // file report in zookeeper pr.saveToZooKeeper(); } else { // file report in metadata table pr.saveToMetadataTable(context); } } catch (Exception e) { log.error("Failed to file problem report " + pr.getTableName() + " " + pr.getProblemType() + " " + pr.getResource(), e); } } }; try { reportExecutor.execute(new LoggingRunnable(log, r)); } catch (RejectedExecutionException ree) { log.error("Failed to report problem {} {} {} {}", pr.getTableName(), pr.getProblemType(), pr.getResource(), ree.getMessage()); } }
From source file:org.apache.accumulo.server.problems.ProblemReports.java
public void deleteProblemReport(String table, ProblemType pType, String resource) { final ProblemReport pr = new ProblemReport(table, pType, resource, null); Runnable r = new Runnable() { @Override/* www .j ava 2 s. co m*/ public void run() { try { if (isMeta(pr.getTableName())) { // file report in zookeeper pr.removeFromZooKeeper(); } else { // file report in metadata table pr.removeFromMetadataTable(context); } } catch (Exception e) { log.error("Failed to delete problem report {} {} {}", pr.getTableName(), pr.getProblemType(), pr.getResource(), e); } } }; try { reportExecutor.execute(new LoggingRunnable(log, r)); } catch (RejectedExecutionException ree) { log.error("Failed to delete problem report {} {} {} {}", pr.getTableName(), pr.getProblemType(), pr.getResource(), ree.getMessage()); } }
From source file:org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.java
/** * Transfer all the queues of the specified to this region server. * First it tries to grab a lock and if it works it will move the * znodes and finally will delete the old znodes. * * It creates one old source for any type of source of the old rs. * @param rsZnode// w w w.j av a 2s.c o m */ private void transferQueues(String rsZnode) { NodeFailoverWorker transfer = new NodeFailoverWorker(rsZnode, this.replicationQueues, this.replicationPeers, this.clusterId); try { this.executor.execute(transfer); } catch (RejectedExecutionException ex) { LOG.info("Cancelling the transfer of " + rsZnode + " because of " + ex.getMessage()); } }
From source file:org.apache.lens.driver.jdbc.JDBCDriver.java
/** * Asynchronously execute the query.// w w w. j ava 2 s .com * * @param context The query context * @throws LensException the lens exception */ @Override public void executeAsync(QueryContext context) throws LensException { checkConfigured(); // Always use the driver rewritten query not user query. Since the // conf we are passing here is query context conf, we need to add jdbc xml in resource path String rewrittenQuery = rewriteQuery(context); JdbcQueryContext jdbcCtx = new JdbcQueryContext(context); jdbcCtx.setRewrittenQuery(rewrittenQuery); try { Future<QueryResult> future = asyncQueryPool.submit(new QueryCallable(jdbcCtx, logSegregationContext)); jdbcCtx.setResultFuture(future); } catch (RejectedExecutionException e) { log.error("Query execution rejected: {} reason:{}", context.getQueryHandle(), e.getMessage(), e); throw new LensException( "Query execution rejected: " + context.getQueryHandle() + " reason:" + e.getMessage(), e); } queryContextMap.put(context.getQueryHandle(), jdbcCtx); log.info("{} ExecuteAsync: {}", getFullyQualifiedName(), context.getQueryHandle()); }
From source file:org.cloudgraph.cassandra.graph.ParallelSubgraphTask.java
@Override public void start() { if (log.isDebugEnabled()) log.debug("start-" + level + "." + sequence); try {/*from ww w. j a va 2 s . co m*/ sharedAssembler.getExecutorService().execute(new Runnable() { @Override public void run() { // begin a breadth first traversal from the given node assemble(); shutdown(); } }); } catch (RejectedExecutionException e) { log.error(e.getMessage(), e); } }
From source file:org.cloudgraph.rdb.graph.ParallelSubgraphTask.java
@Override public void start() { if (log.isDebugEnabled()) log.debug("start-" + level + "." + sequence); try {/*from w w w .j a v a2 s . co m*/ sharedAssembler.getExecutorService().execute(new Runnable() { @Override public void run() { // begin a breadth first traversal from the given node assemble(); shutdown(); } }); } catch (RejectedExecutionException e) { log.error(e.getMessage(), e); } }
From source file:org.cloudifysource.rest.repo.UploadRepo.java
private void createScheduledExecutor() { final CleanUploadDirRunnable cleanupThread = new CleanUploadDirRunnable(restUploadDir, cleanupTimeoutMillis);/*from w w w . ja va2s. c om*/ executor = Executors.newSingleThreadScheduledExecutor(); try { executor.scheduleAtFixedRate(cleanupThread, 0, cleanupTimeoutMillis, TimeUnit.MILLISECONDS); } catch (RejectedExecutionException e) { logger.log(Level.WARNING, "failed to scheduled for execution - " + e.getMessage()); throw e; } }