List of usage examples for java.io IOException initCause
public synchronized Throwable initCause(Throwable cause)
From source file:org.eclipse.b3.p2.maven.loader.Maven2RepositoryLoader.java
private InstallableUnit createIU(VersionEntry versionEntry, IProgressMonitor monitor) throws IOException { InstallableUnitImpl iu = (InstallableUnitImpl) P2Factory.eINSTANCE.createInstallableUnit(); iu.setId(createP2Id(versionEntry.groupId, versionEntry.artifactId)); iu.setVersion(versionEntry.version); iu.getPropertyMap().put(PROP_MAVEN_ID, versionEntry.artifactId); iu.getPropertyMap().put(PROP_MAVEN_GROUP, versionEntry.groupId); iu.getPropertyMap().put(PROP_ORIGINAL_PATH, versionEntry.groupId.replace('.', '/')); iu.getPropertyMap().put(PROP_ORIGINAL_ID, versionEntry.artifactId); iu.setFilter(null);/* w w w .j ava 2 s. c o m*/ POM pom; Model model; try { pom = POM.getPOM(location.toString(), versionEntry.groupId, versionEntry.artifactId, versionEntry.version.getOriginal()); String md5 = pom.getMd5(); String sha1 = pom.getSha1(); Long timestamp = pom.getTimestamp(); if (md5 != null) iu.getPropertyMap().put(PROP_POM_MD5, md5); if (sha1 != null) iu.getPropertyMap().put(PROP_POM_SHA1, sha1); if (timestamp != null) iu.getPropertyMap().put(PROP_POM_TIMESTAMP, timestamp.toString()); if (!versionEntry.groupId.equals(pom.getGroupId())) throw new IOException(String.format("Bad groupId in POM: expected %s, found %s", versionEntry.groupId, pom.getGroupId())); if (!versionEntry.artifactId.equals(pom.getArtifactId())) throw new IOException(String.format("Bad artifactId in POM: expected %s, found %s", versionEntry.artifactId, pom.getArtifactId())); model = pom.getProject(); if (model.getDependencies() != null) { for (Dependency dependency : model.getDependencies().getDependency()) { // TODO What about the namespace ? String namespace = dependency.isSetType() ? POM.expandProperties(dependency.getType(), pom) : "jar"; // TODO What about the groupId ? // Yes, include: good for native maven, but not for "mavenized" p2 since artifactId is full // No, don't include: good for "mavenized" p2, but not for native maven (may lead to duplicities) // For now: include if artifactId is not equal to groupId or does not start with groupId followed // by a dot String groupId = POM.expandProperties(dependency.getGroupId(), pom); String artifactId = POM.expandProperties(dependency.getArtifactId(), pom); String versionRange = POM.expandProperties(dependency.getVersion(), pom); if (versionRange == null) versionRange = MAVEN_EMPTY_RANGE_STRING; VersionRange vr = VersionUtil.createVersionRange(versionRange); IRequirement rc = P2Bridge.importToModel(MetadataFactory.createRequirement(namespace, createP2Id(groupId, artifactId), vr, null, dependency.isSetOptional(), false, true)); iu.getRequirements().add(rc); } } // Add 2 provided capabilities - one for an IU, another one for packaging ProvidedCapabilityImpl pc = (ProvidedCapabilityImpl) P2Factory.eINSTANCE.createProvidedCapability(); String version = pom.getVersion(); pc.setNamespace(IInstallableUnit.NAMESPACE_IU_ID); pc.setName(iu.getId()); pc.setVersion(VersionUtil.createVersion(version)); iu.getProvidedCapabilities().add(pc); pc = (ProvidedCapabilityImpl) P2Factory.eINSTANCE.createProvidedCapability(); // TODO Namespace? See discussion above (regarding dependencies) pc.setNamespace(model.getPackaging()); pc.setName(iu.getId()); pc.setVersion(VersionUtil.createVersion(version)); iu.getProvidedCapabilities().add(pc); if (model.getLicenses() != null) { List<License> toLicense = new ArrayList<License>(); List<License> toCopyright = new ArrayList<License>(); for (License license : model.getLicenses().getLicense()) { String match = "copyright"; String name = license.getName(); String comments = license.getComments(); if (name != null && name.toLowerCase().contains(match) || comments != null && comments.toLowerCase().contains(match)) toCopyright.add(license); else toLicense.add(license); } if (toCopyright.size() > 0) { LicenseHelper copyrightHelper = buildLicense(toCopyright); CopyrightImpl copyright = (CopyrightImpl) P2Factory.eINSTANCE.createCopyright(); copyright.setBody(copyrightHelper.body); copyright.setLocation(copyrightHelper.location); iu.setCopyright(copyright); } if (toLicense.size() > 0) { for (License license : toLicense) { LicenseHelper licenseHelper = buildLicense(Collections.singletonList(license)); LicenseImpl p2License = (LicenseImpl) P2Factory.eINSTANCE.createLicense(); p2License.setBody(licenseHelper.body); p2License.setLocation(licenseHelper.location); p2License.setUUID(licenseHelper.getDigest().toString()); iu.getLicenses().add(p2License); } } } if (!"pom".equals(model.getPackaging())) { ArtifactKeyImpl artifact = (ArtifactKeyImpl) P2Factory.eINSTANCE.createArtifactKey(); artifact.setId(iu.getId()); artifact.setVersion(iu.getVersion()); artifact.setClassifier(model.getPackaging()); iu.getArtifacts().add(artifact); } } catch (CoreException e) { IOException ioe = new IOException(e.getMessage()); ioe.initCause(e); throw ioe; } TouchpointTypeImpl touchpointType = (TouchpointTypeImpl) P2Factory.eINSTANCE.createTouchpointType(); // TODO Set up a touchpoint! What is an installation of a maven artifact supposed to do? touchpointType.setId(ITouchpointType.NONE.getId()); touchpointType.setVersion(ITouchpointType.NONE.getVersion()); iu.setTouchpointType(touchpointType); LogUtils.debug("Adding IU: %s#%s", iu.getId(), VersionUtil.getVersionString(iu.getVersion())); return iu; }
From source file:org.apache.directory.server.core.api.LdapCoreSessionConnection.java
/** * {@inheritDoc}/*from w w w .j a va 2 s . c om*/ */ @Override public void close() throws IOException { try { unBind(); } catch (Exception e) { IOException ioe = new IOException(e.getMessage()); ioe.initCause(e); throw ioe; } }
From source file:org.cloudata.core.client.scanner.MultiTabletScanner.java
private void openSingleTabletScanner(Row.Key rowKey, boolean great) throws IOException { Row.Key targetRowKey = null;/*from w w w . j ava 2 s . c o m*/ if (great) { byte[] endKeyBytes = new byte[rowKey.getLength() + 1]; System.arraycopy(rowKey.getBytes(), 0, endKeyBytes, 0, rowKey.getLength()); endKeyBytes[rowKey.getLength()] = (byte) 0xff; targetRowKey = new Row.Key(endKeyBytes); } else { targetRowKey = new Row.Key(rowKey); } long startTime = System.currentTimeMillis(); Exception exception = null; while (true) { TabletInfo tabletInfo = null; try { tabletInfo = ctable.lookupTargetTablet(targetRowKey); if (tabletInfo != null) { if (currentScanTablet != null && currentScanTablet.equals(tabletInfo)) { if (currentScanner != null) { currentScanner.close(); } currentScanner = null; currentScanTablet = null; return; } currentScanner = (SingleTabletScanner) ScannerFactory.openScanner(ctable.getConf(), targetRowKey, tabletInfo, rowFilter); scanners.add(currentScanner); currentScanTablet = currentScanner.tabletInfo; return; } } catch (IOException e) { exception = e; LOG.warn("error openSingleTabletScanner:" + e.getMessage() + ", but rerty", e); if (currentScanner != null) { currentScanner.close(); } } try { Thread.sleep(1 * 1000); } catch (InterruptedException e) { return; } if (timeout > 0) { if (System.currentTimeMillis() - startTime > timeout * 1000) { LOG.error("Timeout while opening scanner: timeout=" + timeout + ", rowKey=" + rowKey + ", last exception=" + exception); IOException e = new IOException("Timeout while opening scanner: timeout=" + timeout + ", rowKey=" + rowKey + ", last exception=" + exception); if (exception != null) { e.initCause(exception); } throw e; } } } // end of while }
From source file:org.cloudata.core.tabletserver.MemorySSTable.java
public Map<String, TabletMapFile[]> splitAndSaveCanCommit(Row.Key midRowKey, TabletInfo[] splitedTabletInfos) throws IOException { initMemoryForCompaction();// w w w. j ava2s . c o m try { return splitAndSave(midRowKey, splitedTabletInfos, compactingColumnCollections, false); } catch (Exception e) { cancelCompaction(null); if (e instanceof IOException) { throw (IOException) e; } else { IOException err = new IOException(e.getMessage()); err.initCause(e); throw err; } } }
From source file:org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure.java
/** * Prepare merge and do some check//from w w w . j av a2 s.c o m * @param env MasterProcedureEnv * @throws IOException */ private boolean prepareMergeRegion(final MasterProcedureEnv env) throws IOException { // Note: the following logic assumes that we only have 2 regions to merge. In the future, // if we want to extend to more than 2 regions, the code needs to modify a little bit. // CatalogJanitor catalogJanitor = env.getMasterServices().getCatalogJanitor(); boolean regionAHasMergeQualifier = !catalogJanitor.cleanMergeQualifier(regionsToMerge[0]); if (regionAHasMergeQualifier || !catalogJanitor.cleanMergeQualifier(regionsToMerge[1])) { String msg = "Skip merging regions " + HRegionInfo.getShortNameToLog(regionsToMerge) + ", because region " + (regionAHasMergeQualifier ? regionsToMerge[0].getEncodedName() : regionsToMerge[1].getEncodedName()) + " has merge qualifier"; LOG.warn(msg); throw new MergeRegionException(msg); } RegionStates regionStates = env.getAssignmentManager().getRegionStates(); RegionState regionStateA = regionStates.getRegionState(regionsToMerge[0].getEncodedName()); RegionState regionStateB = regionStates.getRegionState(regionsToMerge[1].getEncodedName()); if (regionStateA == null || regionStateB == null) { throw new UnknownRegionException( regionStateA == null ? regionsToMerge[0].getEncodedName() : regionsToMerge[1].getEncodedName()); } if (!regionStateA.isOpened() || !regionStateB.isOpened()) { throw new MergeRegionException( "Unable to merge regions not online " + regionStateA + ", " + regionStateB); } if (!env.getMasterServices().isSplitOrMergeEnabled(MasterSwitchType.MERGE)) { String regionsStr = Arrays.deepToString(regionsToMerge); LOG.warn("merge switch is off! skip merge of " + regionsStr); super.setFailure(getClass().getSimpleName(), new IOException("Merge of " + regionsStr + " failed because merge switch is off")); return false; } // Ask the remote regionserver if regions are mergeable. If we get an IOE, report it // along w/ the failure so can see why we are not mergeable at this time. IOException mergeableCheckIOE = null; boolean mergeable = false; RegionState current = regionStateA; try { mergeable = isMergeable(env, current); } catch (IOException e) { mergeableCheckIOE = e; } if (mergeable && mergeableCheckIOE == null) { current = regionStateB; try { mergeable = isMergeable(env, current); } catch (IOException e) { mergeableCheckIOE = e; } } if (!mergeable) { IOException e = new IOException(current.getRegion().getShortNameToLog() + " NOT mergeable"); if (mergeableCheckIOE != null) e.initCause(mergeableCheckIOE); super.setFailure(getClass().getSimpleName(), e); return false; } return true; }
From source file:org.archive.crawler.frontier.AbstractFrontier.java
public void importURIs(String jsonParams) throws IOException { JSONObject params;//ww w . ja v a 2 s.c o m try { params = new JSONObject(jsonParams); } catch (JSONException e) { IOException ioe = new IOException(e.getMessage()); ioe.initCause(e); throw ioe; } if ("recoveryLog".equals(params.optString("format"))) { FrontierJournal.importRecoverLog(params, this); return; } // otherwise, do a 'simple' import importURIsSimple(params); }
From source file:org.apache.hadoop.metrics2.sink.RollingFileSystemSink.java
/** * Create a new log file and return the {@link FSDataOutputStream}. If a * file with the specified path already exists, open the file for append * instead.// ww w . j av a2 s . c o m * * Once the file is open, update {@link #currentFSOutStream}, * {@link #currentOutStream}, and {@#link #currentFilePath}. * * @param initial the target path * @throws IOException thrown if the call to see the append operation fails. */ private void createOrAppendLogFile(Path targetFile) throws IOException { // First try blindly creating the file. If we fail, it either means // the file exists, or the operation actually failed. We do it this way // because if we check whether the file exists, it might still be created // by the time we try to create it. Creating first works like a // test-and-set. try { currentFSOutStream = fileSystem.create(targetFile, false); currentOutStream = new PrintStream(currentFSOutStream, true, StandardCharsets.UTF_8.name()); } catch (IOException ex) { // Try appending instead. If we fail, if means the file doesn't // actually exist yet or the operation actually failed. try { currentFSOutStream = fileSystem.append(targetFile); currentOutStream = new PrintStream(currentFSOutStream, true, StandardCharsets.UTF_8.name()); } catch (IOException ex2) { // If the original create failed for a legit but transitory // reason, the append will fail because the file now doesn't exist, // resulting in a confusing stack trace. To avoid that, we set // the cause of the second exception to be the first exception. // It's still a tiny bit confusing, but it's enough // information that someone should be able to figure it out. ex2.initCause(ex); throw ex2; } } currentFilePath = targetFile; }
From source file:com.archivas.clienttools.arcutils.impl.adapter.Hcp3AuthNamespaceAdapter.java
public ArcMoverFile createArcMoverFileObject(XMLStreamReader xmlr, ArcMoverDirectory caller) throws StorageAdapterException { ArcMoverFile retVal = null;//from ww w .j ava2 s .com try { // The urlName is the byte stream representation of the file name, the utf8Name is the // HCP's utf8 version // of that byte stream. Depending on the libraries installed on the client that name may // not translate // so it is best to use the urlName. String fileName = xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_FILE_NAME); if (fileName == null) { fileName = xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_FILE_UTF8_NAME); } else { try { // Not all chars are encoded. Fix it. fileName = RFC2396Encoder.fixEncoding(fileName); } catch (UnsupportedEncodingException e) { throw new StorageAdapterException(e.getMessage(), e); // This should never // happen but just in // case. } } String fileType = xmlr.getAttributeValue(null, HttpGatewayConstants.MD_TYPE); if (fileType != null) { String state = xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_STATE); if (DIRECTORY.equals(fileType)) { // Metadata is not available for a directory in the authenticated namespace. retVal = ArcMoverDirectory.getDirInstance(caller, fileName, null, this); } else if (SYMLINK.equals(fileType)) { String symlinkTarget = xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_SYMLINK_TARGET); retVal = ArcMoverSymlink.getSymlinkInstance(caller, fileName, symlinkTarget, this); } else if (OBJECT.equals(fileType)) { long size = 0; long version = 0; long retentionValue = 0; int dpl = 0; boolean shred = false; boolean retentionHold = false; boolean searchIndex = false; boolean replicated = false; boolean hasCustomMetadata = false; /* * We have access to all of the metadata already so we just do that to construct * the FileMetadata Object rather than going through the getMetadata call below. * We collect all the same data here. See @getMetadata */ try { size = Long.parseLong(xmlr.getAttributeValue(null, HttpGatewayConstants.MD_PARAM_SIZE)); } catch (NumberFormatException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } try { version = Long.parseLong(xmlr.getAttributeValue(null, HttpGatewayConstants.MD_VERSION)); } catch (NumberFormatException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } String ingestTimeString = xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_INJEST_TIME); Date ingestTime = (ingestTimeString == null ? null : new Date(Long.parseLong(ingestTimeString) * 1000)); try { size = Long .parseLong(xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_SIZE)); } catch (NumberFormatException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } try { version = Long.parseLong( xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_VERSION)); } catch (NumberFormatException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } try { retentionValue = Long.parseLong( xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_RETENTION)); } catch (NumberFormatException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } try { dpl = Integer .parseInt(xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_DPL)); } catch (NumberFormatException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } try { shred = Boolean.parseBoolean( xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_SHRED)); } catch (NullPointerException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } try { retentionHold = Boolean.parseBoolean( xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_HOLD)); } catch (NullPointerException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } try { searchIndex = Boolean.parseBoolean( xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_INDEX)); } catch (NullPointerException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } try { replicated = Boolean.parseBoolean( xmlr.getAttributeValue(null, HttpGatewayConstants.MD_PARAM_REPLICATED)); } catch (NullPointerException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } String retentionClass = xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_RENTENTION_CLASS); String hashScheme = xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_HASH_SCHEME); String hashValue = xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_HASH); // Construct the retention object Retention retention = null; if (retentionClass != null && !retentionClass.equals("")) { retention = Retention.fromRetentionClass(retentionClass, retentionValue); } else { retention = Retention.fromRetentionValue(retentionValue); } FileMetadata metadata = new FileMetadata(FileType.FILE, ingestTime, null, null, null, size, null, null, null, null, null, version, dpl, hashScheme, hashValue, shred, retention, retentionHold, searchIndex, replicated, null, null, null); metadata.setIsVersion(caller.isVersionList()); retVal = ArcMoverFile.getFileInstance(getProfile(), caller, fileName, metadata); getAdditionalMetadata(xmlr, retVal.getMetadata(), retVal.getPath()); try { hasCustomMetadata = Boolean.parseBoolean( xmlr.getAttributeValue(null, HttpGatewayConstants.MD_AUTH_PARAM_CUSTOM_METADATA)); } catch (NullPointerException e) { LOG.log(Level.WARNING, "Exception parsing metadata for: " + fileName, e); } CustomMetadata customMetadata = null; if (hasCustomMetadata) { customMetadata = new CustomMetadata(CustomMetadata.Form.PROFILED, retVal.getPath()); } retVal.setCustomMetadata(customMetadata); } if (retVal != null) { retVal.getMetadata().setRestState(state); } } } catch (Throwable e) { String msg = "Error parsing directory for: " + caller.getPath(); IOException e2 = new IOException(msg); e2.initCause(e); throw new StorageAdapterException(e2.getMessage(), e2); } return retVal; }
From source file:org.codehaus.plexus.archiver.zip.AbstractZipArchiver.java
protected void close() throws IOException { // Close the output stream. try {/*from w ww. ja v a2s . c om*/ if (zipArchiveOutputStream != null) { zOut.writeTo(zipArchiveOutputStream); zipArchiveOutputStream.close(); } } catch (IOException ex) { // If we're in this finally clause because of an // exception, we don't really care if there's an // exception when closing the stream. E.g. if it // throws "ZIP file must have at least one entry", // because an exception happened before we added // any files, then we must swallow this // exception. Otherwise, the error that's reported // will be the close() error, which is not the // real cause of the problem. if (success) { throw ex; } } catch (InterruptedException e) { IOException ex = new IOException("InterruptedException exception"); ex.initCause(e.getCause()); throw ex; } catch (ExecutionException e) { IOException ex = new IOException("Execution exception"); ex.initCause(e.getCause()); throw ex; } }
From source file:ch.cyberduck.core.s3.S3Path.java
@Override public InputStream read(final TransferStatus status) throws IOException { try {/* w w w . j a v a 2s .c o m*/ if (this.attributes().isDuplicate()) { return this.getSession().getClient() .getVersionedObject(attributes().getVersionId(), this.getContainerName(), this.getKey(), null, // ifModifiedSince null, // ifUnmodifiedSince null, // ifMatch null, // ifNoneMatch status.isResume() ? status.getCurrent() : null, null) .getDataInputStream(); } return this.getSession().getClient().getObject(this.getContainerName(), this.getKey(), null, // ifModifiedSince null, // ifUnmodifiedSince null, // ifMatch null, // ifNoneMatch status.isResume() ? status.getCurrent() : null, null).getDataInputStream(); } catch (ServiceException e) { IOException failure = new IOException(e.getMessage()); failure.initCause(e); throw failure; } }