List of usage examples for java.io IOException initCause
public synchronized Throwable initCause(Throwable cause)
From source file:org.cloudata.core.client.SortDirectUploader.java
@Override public synchronized void close() throws IOException { LOG.info("Started Uploader closing:" + ctable.getTableName() + "," + columnNames[0]); try {// w ww. jav a 2s .c o m internalClose(); LOG.info("End internalClose:" + ctable.getTableName() + "," + columnNames[0]); //send map file info to tablet server for (UploadTarget eachUploadTarget : uploadTargets.values()) { String[] mapFileIds = new String[columnNames.length]; String[] mapFilePaths = new String[columnNames.length]; for (int i = 0; i < columnNames.length; i++) { TabletMapFile mapFile = eachUploadTarget.tabletMapFiles.get(columnNames[i]); if (mapFile == null) { mapFileIds[i] = null; mapFilePaths[i] = null; } else { mapFileIds[i] = mapFile.getFileId(); mapFilePaths[i] = mapFile.getFilePath().toString(); } } //END TabletServer async . // endBatchUploader ? ? ? . TabletInfo liveTabletInfo = eachUploadTarget.tabletInfo; String taskId = null; int retry = 0; Exception exception = null; DataServiceProtocol tabletServer = null; while (retry < 10) { try { LOG.info("Call endBatchUploader:" + liveTabletInfo); tabletServer = CTableManager.connectTabletServer(liveTabletInfo, ctable.getConf()); taskId = tabletServer.endBatchUploader(eachUploadTarget.uploadActionId, eachUploadTarget.tabletInfo.getTabletName(), columnNames, mapFileIds, mapFilePaths); if (taskId != null) { break; } } catch (Exception e) { LOG.error("Error tabletServer.endBatchUploader cause:" + e.getMessage() + ". but retry(" + retry + ")"); exception = e; } retry++; try { Thread.sleep(5000); } catch (InterruptedException e1) { return; } if (liveTabletInfo != null) { //local cache clear. ctable.locationCache.clearTabletCache(eachUploadTarget.tabletInfo.getTableName(), eachUploadTarget.tabletInfo.getEndRowKey(), liveTabletInfo); } TabletInfo previousTabletInfo = liveTabletInfo; //tablet .( Tablet? ?? ? ?) ctable.locationCache.clearTabletCache(eachUploadTarget.tabletInfo.getTableName(), eachUploadTarget.tabletInfo.getEndRowKey(), liveTabletInfo); liveTabletInfo = ctable.lookupTargetTablet(eachUploadTarget.tabletInfo.getEndRowKey()); LOG.info("retry tabletServer.endBatchUploader, change tableinfo from [" + previousTabletInfo + "] to [" + liveTabletInfo + "]"); } //? ? if (retry >= 10 && exception != null) { try { LOG.error("error while endBatchUploader cause:" + exception.getMessage(), exception); rollback(); } catch (Exception e) { LOG.error(e); } IOException err = new IOException(exception.getMessage()); err.initCause(exception); throw err; } //? ? long startTime = System.currentTimeMillis(); Row.Key endRowKey = liveTabletInfo.getEndRowKey(); String tabletName = liveTabletInfo.getTabletName(); LOG.info("Check TabletServer ending status:" + tabletName + "," + columnNames[0]); while (true) { //FIXME ? ? // WARN . try { AsyncTaskStatus task = tabletServer.getAsyncTaskStatus(taskId); if (task != null) { if (task.isEnd()) { if (task.isError()) { //? rollback(); throw new IOException(task.getErrorTrace()); } else { break; } } } else { //task == null LOG.warn("No Upload closing response from tablerserer(" + liveTabletInfo.getAssignedHostName() + "). " + "tablet=" + liveTabletInfo.getTabletName()); // TabletServer ? ?. try { Thread.sleep(5 * 1000); if (liveTabletInfo != null) { ctable.locationCache.clearTabletCache(liveTabletInfo.getTableName(), endRowKey, liveTabletInfo); } TabletInfo lookupedTabletInfo = ctable.lookupTargetTablet(endRowKey); if (lookupedTabletInfo != null) { //Tablet? host ? if (!lookupedTabletInfo.getAssignedHostName() .equals(liveTabletInfo.getAssignedHostName())) { tabletServer = CTableManager.connectTabletServer(lookupedTabletInfo, ctable.getConf()); taskId = tabletServer.endBatchUploader(eachUploadTarget.uploadActionId, tabletName, columnNames, mapFileIds, mapFilePaths); liveTabletInfo = lookupedTabletInfo; } } } catch (Exception e) { LOG.error("Error while retrying endBatchUploader(" + liveTabletInfo.getAssignedHostName() + "). " + "tablet=" + liveTabletInfo.getTabletName(), e); } } } catch (Exception e) { LOG.warn(e.getMessage(), e); try { Thread.sleep(5 * 1000); if (liveTabletInfo != null) { ctable.locationCache.clearTabletCache(eachUploadTarget.tabletInfo.getTableName(), eachUploadTarget.tabletInfo.getEndRowKey(), liveTabletInfo); } TabletInfo lookupedTabletInfo = ctable.lookupTargetTablet(endRowKey); if (lookupedTabletInfo != null) { //Tablet? host ? if (!lookupedTabletInfo.getAssignedHostName() .equals(liveTabletInfo.getAssignedHostName())) { tabletServer = CTableManager.connectTabletServer(lookupedTabletInfo, ctable.getConf()); taskId = tabletServer.endBatchUploader(eachUploadTarget.uploadActionId, tabletName, columnNames, mapFileIds, mapFilePaths); liveTabletInfo = lookupedTabletInfo; } } } catch (Exception err) { LOG.error( "Error while retrying endBatchUploader(" + liveTabletInfo.getAssignedHostName() + "). " + "tablet=" + liveTabletInfo.getTabletName(), err); } } try { Thread.sleep(2000); } catch (InterruptedException e) { return; } if (System.currentTimeMillis() - startTime > 180 * 1000) { //FIXME ? ? ? ? ? . // ? LOG.error("Timeout while endBatchUploader(" + (System.currentTimeMillis() - startTime) + " ms)" + liveTabletInfo); throw new IOException("Timeout while endBatchUploader(" + (System.currentTimeMillis() - startTime) + " ms)" + liveTabletInfo); } } } //for each UploadTarget } finally { for (UploadTarget uploadTarget : uploadTargets.values()) { String path = ctable.getConf().get("cloudata.local.temp") + "/" + uploadTarget.uploadActionId; LOG.info("deleting local temp dir:" + path); if (!FileUtil.delete(path, true)) { try { Thread.sleep(2 * 1000); } catch (InterruptedException e) { return; } if (!FileUtil.delete(path, true)) { LOG.warn("Can't delete local temp file for uploading:" + path); } } } LOG.info("Ended Uploader closing"); } }
From source file:org.apache.maven.doxia.linkcheck.DefaultLinkCheck.java
/** * Create the XML document from the currently available details. * * @throws IOException if any//from w ww . j a v a2s . c om */ private void createDocument(LinkcheckModel model) throws IOException { if (this.reportOutput == null) { return; } File dir = this.reportOutput.getParentFile(); if (dir != null) { dir.mkdirs(); } Writer writer = null; LinkcheckModelXpp3Writer xpp3Writer = new LinkcheckModelXpp3Writer(); try { writer = WriterFactory.newXmlWriter(this.reportOutput); xpp3Writer.write(writer, model); } catch (IllegalStateException e) { IOException ioe = new IOException( e.getMessage() + " Maybe try to specify an other encoding instead of '" + encoding + "'."); ioe.initCause(e); throw ioe; } finally { IOUtil.close(writer); } dir = null; }
From source file:org.apache.hadoop.hbase.master.MasterFileSystem.java
/** * Get the rootdir. Make sure its wholesome and exists before returning. * @param rd/*from w ww . j a v a 2 s. c om*/ * @param c * @param fs * @return hbase.rootdir (after checks for existence and bootstrapping if * needed populating the directory with necessary bootup files). * @throws IOException */ @SuppressWarnings("deprecation") private Path checkRootDir(final Path rd, final Configuration c, final FileSystem fs) throws IOException { // If FS is in safe mode wait till out of it. FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000)); // Filesystem is good. Go ahead and check for hbase.rootdir. try { if (!fs.exists(rd)) { fs.mkdirs(rd); // DFS leaves safe mode with 0 DNs when there are 0 blocks. // We used to handle this by checking the current DN count and waiting until // it is nonzero. With security, the check for datanode count doesn't work -- // it is a privileged op. So instead we adopt the strategy of the jobtracker // and simply retry file creation during bootstrap indefinitely. As soon as // there is one datanode it will succeed. Permission problems should have // already been caught by mkdirs above. FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), c.getInt( HConstants.VERSION_FILE_WRITE_ATTEMPTS, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS)); } else { if (!fs.isDirectory(rd)) { throw new IllegalArgumentException(rd.toString() + " is not a directory"); } // as above FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), c.getInt( HConstants.VERSION_FILE_WRITE_ATTEMPTS, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS)); } } catch (DeserializationException de) { LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de); IOException ioe = new IOException(); ioe.initCause(de); throw ioe; } catch (IllegalArgumentException iae) { LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR + " " + rd.toString(), iae); throw iae; } // Make sure cluster ID exists if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) { FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000)); } clusterId = FSUtils.getClusterId(fs, rd); // Make sure the meta region directory exists! if (!FSUtils.metaRegionExists(fs, rd)) { bootstrap(rd, c); } else { // Migrate table descriptor files if necessary org.apache.hadoop.hbase.util.FSTableDescriptorMigrationToSubdir.migrateFSTableDescriptorsIfNecessary(fs, rd); } // Create tableinfo-s for hbase:meta if not already there. new FSTableDescriptors(fs, rd).createTableDescriptor(HTableDescriptor.META_TABLEDESC); return rd; }
From source file:com.gc.iotools.stream.os.OutputStreamToInputStream.java
private void internalClose(final boolean join, final TimeUnit timeUnit, final long timeout) throws IOException { if (!this.closeCalled) { initializeIfNecessary();/*from ww w .j a va2 s . c o m*/ this.closeCalled = true; super.close(); if (join) { // waiting for thread to finish.. try { this.writingResult.get(timeout, timeUnit); } catch (final ExecutionException e) { final IOException e1 = new IOException( "The doRead() threw exception. Use " + "getCause() for details."); e1.initCause(e.getCause()); throw e1; } catch (final InterruptedException e) { final IOException e1 = new IOException("Waiting of the thread has been interrupted"); e1.initCause(e); throw e1; } catch (final TimeoutException e) { if (!this.writingResult.isDone()) { this.writingResult.cancel(true); } final IOException e1 = new IOException("Waiting for the internal " + "thread to finish took more than [" + timeout + "] " + timeUnit); e1.initCause(e); throw e1; } } afterClose(); } }
From source file:com.adito.vfs.webdav.DAVTransaction.java
private void doAuth(String expectingRealm, String username, AuthenticationScheme authScheme) throws DAVAuthenticationRequiredException, IOException { if (authScheme == null) { throw new DAVAuthenticationRequiredException("No valid authentication scheme."); }//from ww w . j a v a 2s . co m // Find user try { User user = UserDatabaseManager.getInstance().getDefaultUserDatabase().getAccount(username); authScheme.setUser(user); LogonAction.authenticate(authScheme, req); LogonAction.finishAuthentication(authScheme, req, res); } catch (InvalidLoginCredentialsException ilce) { // Incorrect details, try again throw new DAVAuthenticationRequiredException(expectingRealm); } catch (Exception e) { IOException ioe = new IOException("Failed to authenticate using scheme."); ioe.initCause(e); throw ioe; } }
From source file:org.geotools.gce.imagemosaic.catalog.GTDataStoreGranuleCatalog.java
@Override public SimpleFeatureCollection getGranules(Query q) throws IOException { Utilities.ensureNonNull("query", q); q = mergeHints(q);//www .ja va 2 s.c o m String typeName = q.getTypeName(); final Lock lock = rwLock.readLock(); try { lock.lock(); checkStore(); // // Load tiles informations, especially the bounds, which will be // reused // final SimpleFeatureSource featureSource = tileIndexStore.getFeatureSource(typeName); if (featureSource == null) { throw new NullPointerException( "The provided SimpleFeatureSource is null, it's impossible to create an index!"); } return featureSource.getFeatures(q); } catch (Throwable e) { final IOException ioe = new IOException(); ioe.initCause(e); throw ioe; } finally { lock.unlock(); } }
From source file:com.granule.json.utils.internal.JSONObject.java
/** * Internal method for doing a simple indention write. * @param writer The writer to use while writing the JSON text. * @param indentDepth How deep to indent the text. * @throws IOException Trhown if an error occurs on write. *///from w ww . j a v a 2s. c o m private void writeIndention(Writer writer, int indentDepth) throws IOException { if (logger.isLoggable(Level.FINER)) logger.entering(className, "writeIndention(Writer, int)"); try { for (int i = 0; i < indentDepth; i++) { writer.write(indent); } } catch (Exception ex) { IOException iox = new IOException("Error occurred on serialization of JSON text."); iox.initCause(ex); throw iox; } if (logger.isLoggable(Level.FINER)) logger.exiting(className, "writeIndention(Writer, int)"); }
From source file:org.geotools.gce.imagemosaic.catalog.GTDataStoreGranuleCatalog.java
@Override public int getGranulesCount(Query q) throws IOException { Utilities.ensureNonNull("query", q); q = mergeHints(q);/*ww w . j av a 2 s .c o m*/ String typeName = q.getTypeName(); final Lock lock = rwLock.readLock(); try { lock.lock(); checkStore(); // // Load tiles informations, especially the bounds, which will be // reused // final SimpleFeatureSource featureSource = tileIndexStore.getFeatureSource(typeName); if (featureSource == null) { throw new NullPointerException( "The provided SimpleFeatureSource is null, it's impossible to create an index!"); } int count = featureSource.getCount(q); if (count == -1) { return featureSource.getFeatures(q).size(); } return count; } catch (Throwable e) { final IOException ioe = new IOException(); ioe.initCause(e); throw ioe; } finally { lock.unlock(); } }
From source file:org.apache.hadoop.fs.s3a.S3AUtils.java
/** * Translate an exception raised in an operation into an IOException. * The specific type of IOException depends on the class of * {@link AmazonClientException} passed in, and any status codes included * in the operation. That is: HTTP error codes are examined and can be * used to build a more specific response. * @param operation operation/*w w w. ja v a 2 s .com*/ * @param path path operated on (may be null) * @param exception amazon exception raised * @return an IOE which wraps the caught exception. */ @SuppressWarnings("ThrowableInstanceNeverThrown") public static IOException translateException(String operation, String path, AmazonClientException exception) { String message = String.format("%s%s: %s", operation, path != null ? (" on " + path) : "", exception); if (!(exception instanceof AmazonServiceException)) { return new AWSClientIOException(message, exception); } else { IOException ioe; AmazonServiceException ase = (AmazonServiceException) exception; // this exception is non-null if the service exception is an s3 one AmazonS3Exception s3Exception = ase instanceof AmazonS3Exception ? (AmazonS3Exception) ase : null; int status = ase.getStatusCode(); switch (status) { case 301: if (s3Exception != null) { if (s3Exception.getAdditionalDetails() != null && s3Exception.getAdditionalDetails().containsKey(ENDPOINT_KEY)) { message = String.format("Received permanent redirect response to " + "endpoint %s. This likely indicates that the S3 endpoint " + "configured in %s does not match the AWS region containing " + "the bucket.", s3Exception.getAdditionalDetails().get(ENDPOINT_KEY), ENDPOINT); } ioe = new AWSS3IOException(message, s3Exception); } else { ioe = new AWSServiceIOException(message, ase); } break; // permissions case 401: case 403: ioe = new AccessDeniedException(path, null, message); ioe.initCause(ase); break; // the object isn't there case 404: case 410: ioe = new FileNotFoundException(message); ioe.initCause(ase); break; // out of range. This may happen if an object is overwritten with // a shorter one while it is being read. case 416: ioe = new EOFException(message); break; default: // no specific exit code. Choose an IOE subclass based on the class // of the caught exception ioe = s3Exception != null ? new AWSS3IOException(message, s3Exception) : new AWSServiceIOException(message, ase); break; } return ioe; } }
From source file:com.granule.json.utils.internal.JSONObject.java
/** * Internal method to write out a proper JSON attribute string. * @param writer The writer to use while serializing * @param name The attribute name to use. * @param value The value to assign to the attribute. * @param depth How far to indent the JSON text. * @param compact Flag to denote whether or not to use pretty indention, or compact format, when writing. * @throws IOException Trhown if an error occurs on write. *//*from w ww.ja v a 2 s. c o m*/ private void writeAttribute(Writer writer, String name, String value, int depth, boolean compact) throws IOException { if (logger.isLoggable(Level.FINER)) logger.entering(className, "writeAttribute(Writer, String, String, int)"); if (!compact) { writeIndention(writer, depth); } try { if (!compact) { writer.write("\"" + name + "\"" + " : " + "\"" + escapeStringSpecialCharacters(value) + "\""); } else { writer.write("\"" + name + "\"" + ":" + "\"" + escapeStringSpecialCharacters(value) + "\""); } } catch (Exception ex) { IOException iox = new IOException("Error occurred on serialization of JSON text."); iox.initCause(ex); throw iox; } if (logger.isLoggable(Level.FINER)) logger.exiting(className, "writeAttribute(Writer, String, String, int)"); }