List of usage examples for java.security PrivilegedExceptionAction PrivilegedExceptionAction
PrivilegedExceptionAction
From source file:org.apache.axis2.jaxws.message.databinding.JAXBUtils.java
private static Unmarshaller internalCreateUnmarshaller(final JAXBContext context) throws JAXBException { Unmarshaller unm;/* w w w . j a va 2s . c om*/ try { unm = (Unmarshaller) AccessController.doPrivileged(new PrivilegedExceptionAction() { public Object run() throws JAXBException { return context.createUnmarshaller(); } }); } catch (PrivilegedActionException e) { throw (JAXBException) e.getCause(); } return unm; }
From source file:org.apache.axis2.util.Utils.java
/** * Create a service object for a given service. The method first looks for * the {@link Constants#SERVICE_OBJECT_SUPPLIER} service parameter and if * this parameter is present, it will use the specified class to create the * service object. If the parameter is not present, it will create an * instance of the class specified by the {@link Constants#SERVICE_CLASS} * parameter.//from www . j a v a 2 s . co m * * @param service * the service * @return The service object or <code>null</code> if neither the * {@link Constants#SERVICE_OBJECT_SUPPLIER} nor the * {@link Constants#SERVICE_CLASS} parameter was found on the * service, i.e. if the service doesn't specify how to create a * service object. If the return value is non null, it will always * be a newly created instance. * @throws AxisFault * if an error occurred while attempting to instantiate the * service object */ public static Object createServiceObject(final AxisService service) throws AxisFault { try { ClassLoader classLoader = service.getClassLoader(); // allow alternative definition of makeNewServiceObject Parameter serviceObjectSupplierParam = service.getParameter(Constants.SERVICE_OBJECT_SUPPLIER); if (serviceObjectSupplierParam != null) { final Class<?> serviceObjectSupplierClass = Loader.loadClass(classLoader, ((String) serviceObjectSupplierParam.getValue()).trim()); if (ServiceObjectSupplier.class.isAssignableFrom(serviceObjectSupplierClass)) { ServiceObjectSupplier serviceObjectSupplier = org.apache.axis2.java.security.AccessController .doPrivileged(new PrivilegedExceptionAction<ServiceObjectSupplier>() { public ServiceObjectSupplier run() throws InstantiationException, IllegalAccessException { return (ServiceObjectSupplier) serviceObjectSupplierClass.newInstance(); } }); return serviceObjectSupplier.getServiceObject(service); } else { // Prior to r439555 service object suppliers were actually defined by a static method // with a given signature defined on an arbitrary class. The ServiceObjectSupplier // interface was only introduced by r439555. We still support the old way, but // issue a warning inviting the user to provide a proper ServiceObjectSupplier // implementation. // Find static getServiceObject() method, call it if there final Method method = org.apache.axis2.java.security.AccessController .doPrivileged(new PrivilegedExceptionAction<Method>() { public Method run() throws NoSuchMethodException { return serviceObjectSupplierClass.getMethod("getServiceObject", AxisService.class); } }); log.warn("The class specified by the " + Constants.SERVICE_OBJECT_SUPPLIER + " property on service " + service.getName() + " does not implement the " + ServiceObjectSupplier.class.getName() + " interface. This is deprecated."); return org.apache.axis2.java.security.AccessController .doPrivileged(new PrivilegedExceptionAction<Object>() { public Object run() throws InvocationTargetException, IllegalAccessException, InstantiationException { return method.invoke(serviceObjectSupplierClass.newInstance(), new Object[] { service }); } }); } } else { Parameter serviceClassParam = service.getParameter(Constants.SERVICE_CLASS); if (serviceClassParam != null) { final Class<?> serviceClass = Loader.loadClass(classLoader, ((String) serviceClassParam.getValue()).trim()); String className = ((String) serviceClassParam.getValue()).trim(); Class serviceObjectMaker = Loader.loadClass(classLoader, className); if (serviceObjectMaker.getModifiers() != Modifier.PUBLIC) { throw new AxisFault("Service class " + className + " must have public as access Modifier"); } return org.apache.axis2.java.security.AccessController .doPrivileged(new PrivilegedExceptionAction<Object>() { public Object run() throws InstantiationException, IllegalAccessException { return serviceClass.newInstance(); } }); } else { return null; } } } catch (Exception e) { throw AxisFault.makeFault(e); } }
From source file:org.apache.hadoop.hbase.regionserver.TestHStore.java
@Test public void testHandleErrorsInFlush() throws Exception { LOG.info("Setting up a faulty file system that cannot write"); final Configuration conf = HBaseConfiguration.create(); User user = User.createUserForTesting(conf, "testhandleerrorsinflush", new String[] { "foo" }); // Inject our faulty LocalFileSystem conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class); user.runAs(new PrivilegedExceptionAction<Object>() { @Override/*from w ww.ja v a 2 s. c om*/ public Object run() throws Exception { // Make sure it worked (above is sensitive to caching details in hadoop core) FileSystem fs = FileSystem.get(conf); assertEquals(FaultyFileSystem.class, fs.getClass()); // Initialize region init(name.getMethodName(), conf); LOG.info("Adding some data"); store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null); store.add(new KeyValue(row, family, qf2, 1, (byte[]) null), null); store.add(new KeyValue(row, family, qf3, 1, (byte[]) null), null); LOG.info("Before flush, we should have no files"); Collection<StoreFileInfo> files = store.getRegionFileSystem() .getStoreFiles(store.getColumnFamilyName()); assertEquals(0, files != null ? files.size() : 0); //flush try { LOG.info("Flushing"); flush(1); fail("Didn't bubble up IOE!"); } catch (IOException ioe) { assertTrue(ioe.getMessage().contains("Fault injected")); } LOG.info("After failed flush, we should still have no files!"); files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); assertEquals(0, files != null ? files.size() : 0); store.getHRegion().getWAL().close(); return null; } }); FileSystem.closeAllForUGI(user.getUGI()); }
From source file:org.apache.hadoop.hbase.regionserver.TestStore.java
@Test public void testHandleErrorsInFlush() throws Exception { LOG.info("Setting up a faulty file system that cannot write"); final Configuration conf = HBaseConfiguration.create(); User user = User.createUserForTesting(conf, "testhandleerrorsinflush", new String[] { "foo" }); // Inject our faulty LocalFileSystem conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class); user.runAs(new PrivilegedExceptionAction<Object>() { public Object run() throws Exception { // Make sure it worked (above is sensitive to caching details in hadoop core) FileSystem fs = FileSystem.get(conf); Assert.assertEquals(FaultyFileSystem.class, fs.getClass()); // Initialize region init(name.getMethodName(), conf); LOG.info("Adding some data"); store.add(new KeyValue(row, family, qf1, 1, (byte[]) null)); store.add(new KeyValue(row, family, qf2, 1, (byte[]) null)); store.add(new KeyValue(row, family, qf3, 1, (byte[]) null)); LOG.info("Before flush, we should have no files"); Collection<StoreFileInfo> files = store.getRegionFileSystem() .getStoreFiles(store.getColumnFamilyName()); Assert.assertEquals(0, files != null ? files.size() : 0); //flush try { LOG.info("Flushing"); flush(1);/* ww w . ja v a 2 s . c o m*/ Assert.fail("Didn't bubble up IOE!"); } catch (IOException ioe) { Assert.assertTrue(ioe.getMessage().contains("Fault injected")); } LOG.info("After failed flush, we should still have no files!"); files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); Assert.assertEquals(0, files != null ? files.size() : 0); store.getHRegion().getLog().closeAndDelete(); return null; } }); FileSystem.closeAllForUGI(user.getUGI()); }
From source file:org.apache.catalina.core.StandardWrapper.java
/** * Load and initialize an instance of this servlet, if there is not already * at least one initialized instance. This can be used, for example, to * load servlets that are marked in the deployment descriptor to be loaded * at server startup time./*from ww w . j a va 2 s. co m*/ */ public synchronized Servlet loadServlet() throws ServletException { // Nothing to do if we already have an instance or an instance pool if (!singleThreadModel && (instance != null)) return instance; PrintStream out = System.out; if (swallowOutput) { SystemLogHandler.startCapture(); } Servlet servlet; try { long t1 = System.currentTimeMillis(); // If this "servlet" is really a JSP file, get the right class. // HOLD YOUR NOSE - this is a kludge that avoids having to do special // case Catalina-specific code in Jasper - it also requires that the // servlet path be replaced by the <jsp-file> element content in // order to be completely effective String actualClass = servletClass; if ((actualClass == null) && (jspFile != null)) { Wrapper jspWrapper = (Wrapper) ((Context) getParent()).findChild(Constants.JSP_SERVLET_NAME); if (jspWrapper != null) actualClass = jspWrapper.getServletClass(); } // Complain if no servlet class has been specified if (actualClass == null) { unavailable(null); throw new ServletException(sm.getString("standardWrapper.notClass", getName())); } // Acquire an instance of the class loader to be used Loader loader = getLoader(); if (loader == null) { unavailable(null); throw new ServletException(sm.getString("standardWrapper.missingLoader", getName())); } ClassLoader classLoader = loader.getClassLoader(); // Special case class loader for a container provided servlet // if (isContainerProvidedServlet(actualClass) && !((Context) getParent()).getPrivileged()) { // If it is a priviledged context - using its own // class loader will work, since it's a child of the container // loader classLoader = this.getClass().getClassLoader(); } // Load the specified servlet class from the appropriate class loader Class classClass = null; try { if (System.getSecurityManager() != null) { final ClassLoader fclassLoader = classLoader; final String factualClass = actualClass; try { classClass = (Class) AccessController.doPrivileged(new PrivilegedExceptionAction() { public Object run() throws Exception { if (fclassLoader != null) { return fclassLoader.loadClass(factualClass); } else { return Class.forName(factualClass); } } }); } catch (PrivilegedActionException pax) { Exception ex = pax.getException(); if (ex instanceof ClassNotFoundException) { throw (ClassNotFoundException) ex; } else { getServletContext().log("Error loading " + fclassLoader + " " + factualClass, ex); } } } else { if (classLoader != null) { classClass = classLoader.loadClass(actualClass); } else { classClass = Class.forName(actualClass); } } } catch (ClassNotFoundException e) { unavailable(null); getServletContext().log("Error loading " + classLoader + " " + actualClass, e); throw new ServletException(sm.getString("standardWrapper.missingClass", actualClass), e); } if (classClass == null) { unavailable(null); throw new ServletException(sm.getString("standardWrapper.missingClass", actualClass)); } // Instantiate and initialize an instance of the servlet class itself try { servlet = (Servlet) classClass.newInstance(); } catch (ClassCastException e) { unavailable(null); // Restore the context ClassLoader throw new ServletException(sm.getString("standardWrapper.notServlet", actualClass), e); } catch (Throwable e) { unavailable(null); // Restore the context ClassLoader throw new ServletException(sm.getString("standardWrapper.instantiate", actualClass), e); } // Check if loading the servlet in this web application should be // allowed if (!isServletAllowed(servlet)) { throw new SecurityException(sm.getString("standardWrapper.privilegedServlet", actualClass)); } // Special handling for ContainerServlet instances if ((servlet instanceof ContainerServlet) && (isContainerProvidedServlet(actualClass) || ((Context) getParent()).getPrivileged())) { ((ContainerServlet) servlet).setWrapper(this); } classLoadTime = (int) (System.currentTimeMillis() - t1); // Call the initialization method of this servlet try { instanceSupport.fireInstanceEvent(InstanceEvent.BEFORE_INIT_EVENT, servlet); if (System.getSecurityManager() != null) { Class[] classType = new Class[] { ServletConfig.class }; Object[] args = new Object[] { ((ServletConfig) facade) }; SecurityUtil.doAsPrivilege("init", servlet, classType, args); } else { servlet.init(facade); } // Invoke jspInit on JSP pages if ((loadOnStartup >= 0) && (jspFile != null)) { // Invoking jspInit DummyRequest req = new DummyRequest(); req.setServletPath(jspFile); req.setQueryString("jsp_precompile=true"); DummyResponse res = new DummyResponse(); if (System.getSecurityManager() != null) { Class[] classType = new Class[] { ServletRequest.class, ServletResponse.class }; Object[] args = new Object[] { req, res }; SecurityUtil.doAsPrivilege("service", servlet, classType, args); } else { servlet.service(req, res); } } instanceSupport.fireInstanceEvent(InstanceEvent.AFTER_INIT_EVENT, servlet); } catch (UnavailableException f) { instanceSupport.fireInstanceEvent(InstanceEvent.AFTER_INIT_EVENT, servlet, f); unavailable(f); throw f; } catch (ServletException f) { instanceSupport.fireInstanceEvent(InstanceEvent.AFTER_INIT_EVENT, servlet, f); // If the servlet wanted to be unavailable it would have // said so, so do not call unavailable(null). throw f; } catch (Throwable f) { getServletContext().log("StandardWrapper.Throwable", f); instanceSupport.fireInstanceEvent(InstanceEvent.AFTER_INIT_EVENT, servlet, f); // If the servlet wanted to be unavailable it would have // said so, so do not call unavailable(null). throw new ServletException(sm.getString("standardWrapper.initException", getName()), f); } // Register our newly initialized instance singleThreadModel = servlet instanceof SingleThreadModel; if (singleThreadModel) { if (instancePool == null) instancePool = new Stack(); } fireContainerEvent("load", this); loadTime = System.currentTimeMillis() - t1; } finally { if (swallowOutput) { String log = SystemLogHandler.stopCapture(); if (log != null && log.length() > 0) { if (getServletContext() != null) { getServletContext().log(log); } else { out.println(log); } } } } return servlet; }
From source file:org.apache.hadoop.hbase.security.access.TestCellACLWithMultipleVersions.java
@Test public void testCellPermissionsForIncrementWithMultipleVersions() throws Exception { final byte[] TEST_ROW1 = Bytes.toBytes("r1"); final byte[] TEST_Q1 = Bytes.toBytes("q1"); final byte[] TEST_Q2 = Bytes.toBytes("q2"); final byte[] ZERO = Bytes.toBytes(0L); final User user1 = User.createUserForTesting(conf, "user1", new String[0]); final User user2 = User.createUserForTesting(conf, "user2", new String[0]); verifyAllowed(new AccessTestAction() { @Override//from w w w.ja v a2 s.c o m public Object run() throws Exception { HTable t = new HTable(conf, TEST_TABLE.getTableName()); try { Map<String, Permission> permsU1andOwner = new HashMap<String, Permission>(); permsU1andOwner.put(user1.getShortName(), new Permission(Permission.Action.READ, Permission.Action.WRITE)); permsU1andOwner.put(USER_OWNER.getShortName(), new Permission(Permission.Action.READ, Permission.Action.WRITE)); Map<String, Permission> permsU2andOwner = new HashMap<String, Permission>(); permsU2andOwner.put(user2.getShortName(), new Permission(Permission.Action.READ, Permission.Action.WRITE)); permsU2andOwner.put(USER_OWNER.getShortName(), new Permission(Permission.Action.READ, Permission.Action.WRITE)); Put p = new Put(TEST_ROW1); p.add(TEST_FAMILY1, TEST_Q1, 123, ZERO); p.setACL(permsU1andOwner); t.put(p); p = new Put(TEST_ROW1); p.add(TEST_FAMILY1, TEST_Q2, 123, ZERO); p.setACL(permsU2andOwner); t.put(p); p = new Put(TEST_ROW1); p.add(TEST_FAMILY1, TEST_Q1, 127, ZERO); p.setACL(permsU2andOwner); t.put(p); p = new Put(TEST_ROW1); p.add(TEST_FAMILY1, TEST_Q2, 127, ZERO); p.setACL(permsU1andOwner); t.put(p); } finally { t.close(); } return null; } }, USER_OWNER); // Increment considers the TimeRange set on it. user1.runAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { HTable t = new HTable(conf, TEST_TABLE.getTableName()); try { Increment inc = new Increment(TEST_ROW1); inc.setTimeRange(0, 123); inc.addColumn(TEST_FAMILY1, TEST_Q1, 2L); t.increment(inc); t.incrementColumnValue(TEST_ROW1, TEST_FAMILY1, TEST_Q2, 1L); } finally { t.close(); } return null; } }); user2.runAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { HTable t = new HTable(conf, TEST_TABLE.getTableName()); try { Increment inc = new Increment(TEST_ROW1); inc.setTimeRange(0, 127); inc.addColumn(TEST_FAMILY1, TEST_Q2, 2L); t.increment(inc); fail(); } catch (Exception e) { } finally { t.close(); } return null; } }); }
From source file:org.apache.axis2.jaxws.message.databinding.JAXBUtils.java
private static Marshaller internalCreateMarshaller(final JAXBContext context) throws JAXBException { Marshaller marshaller;// ww w.j a va 2s.c o m try { marshaller = (Marshaller) AccessController.doPrivileged(new PrivilegedExceptionAction() { public Object run() throws JAXBException { return context.createMarshaller(); } }); } catch (PrivilegedActionException e) { throw (JAXBException) e.getCause(); } return marshaller; }
From source file:org.apache.hadoop.hbase.security.visibility.TestVisibilityLabelsWithDeletes.java
@Test public void testDeleteColumnsWithAndWithoutVisibilityLabels() throws Exception { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); Admin hBaseAdmin = TEST_UTIL.getAdmin(); HColumnDescriptor colDesc = new HColumnDescriptor(fam); HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(colDesc);//w w w . j av a 2s. c o m hBaseAdmin.createTable(desc); try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put put = new Put(row1); put.addColumn(fam, qual, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); Delete d = new Delete(row1); // with visibility d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumns(fam, qual, HConstants.LATEST_TIMESTAMP); table.delete(d); PrivilegedExceptionAction<Void> scanAction = new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); assertEquals(next.length, 0); } catch (Throwable t) { throw new IOException(t); } return null; } }; SUPERUSER.runAs(scanAction); d = new Delete(row1); // without visibility d.addColumns(fam, qual, HConstants.LATEST_TIMESTAMP); table.delete(d); scanAction = new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); assertEquals(next.length, 0); } catch (Throwable t) { throw new IOException(t); } return null; } }; SUPERUSER.runAs(scanAction); } }
From source file:org.apache.hadoop.mapred.JobInProgress.java
/** * Construct the splits, etc. This is invoked from an async * thread so that split-computation doesn't block anyone. *//*from ww w . ja v a 2 s.co m*/ public synchronized void initTasks() throws IOException, KillInterruptedException, UnknownHostException { if (tasksInited || isComplete()) { return; } synchronized (jobInitKillStatus) { if (jobInitKillStatus.killed || jobInitKillStatus.initStarted) { return; } jobInitKillStatus.initStarted = true; } LOG.info("Initializing " + jobId); final long startTimeFinal = this.startTime; // log job info as the user running the job try { userUGI.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { JobHistory.JobInfo.logSubmitted(getJobID(), conf, jobFile, startTimeFinal, hasRestarted()); return null; } }); } catch (InterruptedException ie) { throw new IOException(ie); } // log the job priority setPriority(this.priority); // // generate security keys needed by Tasks // generateAndStoreTokens(); // // read input splits and create a map per a split // TaskSplitMetaInfo[] splits = createSplits(jobId); if (numMapTasks != splits.length) { throw new IOException("Number of maps in JobConf doesn't match number of " + "recieved splits for job " + jobId + "! " + "numMapTasks=" + numMapTasks + ", #splits=" + splits.length); } numMapTasks = splits.length; // Sanity check the locations so we don't create/initialize unnecessary tasks for (TaskSplitMetaInfo split : splits) { NetUtils.verifyHostnames(split.getLocations()); } jobtracker.getInstrumentation().addWaitingMaps(getJobID(), numMapTasks); jobtracker.getInstrumentation().addWaitingReduces(getJobID(), numReduceTasks); this.queueMetrics.addWaitingMaps(getJobID(), numMapTasks); this.queueMetrics.addWaitingReduces(getJobID(), numReduceTasks); maps = new TaskInProgress[numMapTasks]; for (int i = 0; i < numMapTasks; ++i) { inputLength += splits[i].getInputDataLength(); maps[i] = new TaskInProgress(jobId, jobFile, splits[i], jobtracker, conf, this, i, numSlotsPerMap); } LOG.info("Input size for job " + jobId + " = " + inputLength + ". Number of splits = " + splits.length); // Set localityWaitFactor before creating cache localityWaitFactor = conf.getFloat(LOCALITY_WAIT_FACTOR, DEFAULT_LOCALITY_WAIT_FACTOR); if (numMapTasks > 0) { nonRunningMapCache = createCache(splits, maxLevel); } // set the launch time this.launchTime = jobtracker.getClock().getTime(); // // Create reduce tasks // this.reduces = new TaskInProgress[numReduceTasks]; for (int i = 0; i < numReduceTasks; i++) { reduces[i] = new TaskInProgress(jobId, jobFile, numMapTasks, i, jobtracker, conf, this, numSlotsPerReduce); nonRunningReduces.add(reduces[i]); } // Calculate the minimum number of maps to be complete before // we should start scheduling reduces completedMapsForReduceSlowstart = (int) Math.ceil((conf.getFloat("mapred.reduce.slowstart.completed.maps", DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART) * numMapTasks)); // ... use the same for estimating the total output of all maps resourceEstimator.setThreshhold(completedMapsForReduceSlowstart); // create cleanup two cleanup tips, one map and one reduce. cleanup = new TaskInProgress[2]; // cleanup map tip. This map doesn't use any splits. Just assign an empty // split. TaskSplitMetaInfo emptySplit = JobSplit.EMPTY_TASK_SPLIT; cleanup[0] = new TaskInProgress(jobId, jobFile, emptySplit, jobtracker, conf, this, numMapTasks, 1); cleanup[0].setJobCleanupTask(); // cleanup reduce tip. cleanup[1] = new TaskInProgress(jobId, jobFile, numMapTasks, numReduceTasks, jobtracker, conf, this, 1); cleanup[1].setJobCleanupTask(); // create two setup tips, one map and one reduce. setup = new TaskInProgress[2]; // setup map tip. This map doesn't use any split. Just assign an empty // split. setup[0] = new TaskInProgress(jobId, jobFile, emptySplit, jobtracker, conf, this, numMapTasks + 1, 1); setup[0].setJobSetupTask(); // setup reduce tip. setup[1] = new TaskInProgress(jobId, jobFile, numMapTasks, numReduceTasks + 1, jobtracker, conf, this, 1); setup[1].setJobSetupTask(); synchronized (jobInitKillStatus) { jobInitKillStatus.initDone = true; // set this before the throw to make sure cleanup works properly tasksInited = true; if (jobInitKillStatus.killed) { throw new KillInterruptedException("Job " + jobId + " killed in init"); } } JobHistory.JobInfo.logInited(profile.getJobID(), this.launchTime, numMapTasks, numReduceTasks); // Log the number of map and reduce tasks LOG.info("Job " + jobId + " initialized successfully with " + numMapTasks + " map tasks and " + numReduceTasks + " reduce tasks."); }
From source file:org.apache.flume.sink.hdfs.BucketWriter.java
/** * Execute the callable on a separate thread and wait for the completion for * the specified amount of time in milliseconds. In case of timeout cancel * the callable and throw an IOException *///from w w w .java2 s.c om private <T> T callWithTimeout(final CallRunner<T> callRunner) throws IOException, InterruptedException { Future<T> future = callTimeoutPool.submit(new Callable<T>() { @Override public T call() throws Exception { return proxyUser.execute(new PrivilegedExceptionAction<T>() { @Override public T run() throws Exception { return callRunner.call(); } }); } }); try { if (callTimeout > 0) { return future.get(callTimeout, TimeUnit.MILLISECONDS); } else { return future.get(); } } catch (TimeoutException eT) { future.cancel(true); sinkCounter.incrementConnectionFailedCount(); throw new IOException("Callable timed out after " + callTimeout + " ms" + " on file: " + bucketPath, eT); } catch (ExecutionException e1) { sinkCounter.incrementConnectionFailedCount(); Throwable cause = e1.getCause(); if (cause instanceof IOException) { throw (IOException) cause; } else if (cause instanceof InterruptedException) { throw (InterruptedException) cause; } else if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } else if (cause instanceof Error) { throw (Error) cause; } else { throw new RuntimeException(e1); } } catch (CancellationException ce) { throw new InterruptedException("Blocked callable interrupted by rotation event"); } catch (InterruptedException ex) { LOG.warn("Unexpected Exception " + ex.getMessage(), ex); throw ex; } }