List of usage examples for java.net URI getAuthority
public String getAuthority()
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
/** Called after a new FileSystem instance is constructed. * @param name a uri whose authority section names the host, port, etc. * for this FileSystem/* w w w . j a v a 2 s.c o m*/ * @param conf the configuration */ public void initialize(URI name, Configuration conf) throws IOException { super.initialize(name, conf); uri = URI.create(name.getScheme() + "://" + name.getAuthority()); workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this.uri, this.getWorkingDirectory()); // Try to get our credentials or just connect anonymously String accessKey = conf.get(ACCESS_KEY, null); String secretKey = conf.get(SECRET_KEY, null); String userInfo = name.getUserInfo(); if (userInfo != null) { int index = userInfo.indexOf(':'); if (index != -1) { accessKey = userInfo.substring(0, index); secretKey = userInfo.substring(index + 1); } else { accessKey = userInfo; } } AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain( new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(), new AnonymousAWSCredentialsProvider()); bucket = name.getHost(); ClientConfiguration awsConf = new ClientConfiguration(); awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS)); boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS); awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP); awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES)); awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT, DEFAULT_ESTABLISH_TIMEOUT)); awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT)); String proxyHost = conf.getTrimmed(PROXY_HOST, ""); int proxyPort = conf.getInt(PROXY_PORT, -1); if (!proxyHost.isEmpty()) { awsConf.setProxyHost(proxyHost); if (proxyPort >= 0) { awsConf.setProxyPort(proxyPort); } else { if (secureConnections) { LOG.warn("Proxy host set without port. Using HTTPS default 443"); awsConf.setProxyPort(443); } else { LOG.warn("Proxy host set without port. Using HTTP default 80"); awsConf.setProxyPort(80); } } String proxyUsername = conf.getTrimmed(PROXY_USERNAME); String proxyPassword = conf.getTrimmed(PROXY_PASSWORD); if ((proxyUsername == null) != (proxyPassword == null)) { String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other."; LOG.error(msg); throw new IllegalArgumentException(msg); } awsConf.setProxyUsername(proxyUsername); awsConf.setProxyPassword(proxyPassword); awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN)); awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION)); if (LOG.isDebugEnabled()) { LOG.debug( "Using proxy server {}:{} as user {} with password {} on " + "domain {} as workstation {}", awsConf.getProxyHost(), awsConf.getProxyPort(), String.valueOf(awsConf.getProxyUsername()), awsConf.getProxyPassword(), awsConf.getProxyDomain(), awsConf.getProxyWorkstation()); } } else if (proxyPort >= 0) { String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST; LOG.error(msg); throw new IllegalArgumentException(msg); } s3 = new AmazonS3Client(credentials, awsConf); String endPoint = conf.getTrimmed(ENDPOINT, ""); if (!endPoint.isEmpty()) { try { s3.setEndpoint(endPoint); } catch (IllegalArgumentException e) { String msg = "Incorrect endpoint: " + e.getMessage(); LOG.error(msg); throw new IllegalArgumentException(msg, e); } } maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS); partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE); multiPartThreshold = conf.getInt(MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD); if (partSize < 5 * 1024 * 1024) { LOG.error(MULTIPART_SIZE + " must be at least 5 MB"); partSize = 5 * 1024 * 1024; } if (multiPartThreshold < 5 * 1024 * 1024) { LOG.error(MIN_MULTIPART_THRESHOLD + " must be at least 5 MB"); multiPartThreshold = 5 * 1024 * 1024; } int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS); int coreThreads = conf.getInt(CORE_THREADS, DEFAULT_CORE_THREADS); if (maxThreads == 0) { maxThreads = Runtime.getRuntime().availableProcessors() * 8; } if (coreThreads == 0) { coreThreads = Runtime.getRuntime().availableProcessors() * 8; } long keepAliveTime = conf.getLong(KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME); LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>( maxThreads * conf.getInt(MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS)); threadPoolExecutor = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, newDaemonThreadFactory("s3a-transfer-shared-")); threadPoolExecutor.allowCoreThreadTimeOut(true); TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMinimumUploadPartSize(partSize); transferConfiguration.setMultipartUploadThreshold(multiPartThreshold); transfers = new TransferManager(s3, threadPoolExecutor); transfers.setConfiguration(transferConfiguration); String cannedACLName = conf.get(CANNED_ACL, DEFAULT_CANNED_ACL); if (!cannedACLName.isEmpty()) { cannedACL = CannedAccessControlList.valueOf(cannedACLName); } else { cannedACL = null; } if (!s3.doesBucketExist(bucket)) { throw new IOException("Bucket " + bucket + " does not exist"); } boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART, DEFAULT_PURGE_EXISTING_MULTIPART); long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE); if (purgeExistingMultipart) { Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000); transfers.abortMultipartUploads(bucket, purgeBefore); } serverSideEncryptionAlgorithm = conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM); setConf(conf); }
From source file:org.kie.commons.java.nio.fs.jgit.JGitFileSystemProvider.java
private void checkURI(final String paramName, final URI uri) throws IllegalArgumentException { checkNotNull("uri", uri); if (uri.getAuthority() == null || uri.getAuthority().isEmpty()) { throw new IllegalArgumentException( "Parameter named '" + paramName + "' is invalid, missing host repository!"); }//from w w w. jav a 2 s .c o m int atIndex = uri.getPath().indexOf("@"); if (atIndex != -1 && !uri.getAuthority().contains("@")) { if (uri.getPath().indexOf("/", atIndex) == -1) { throw new IllegalArgumentException( "Parameter named '" + paramName + "' is invalid, missing host repository!"); } } }
From source file:org.kie.commons.java.nio.fs.jgit.JGitFileSystemProvider.java
private String extractHost(final URI uri) { checkNotNull("uri", uri); int atIndex = uri.getPath().indexOf("@"); if (atIndex != -1 && !uri.getAuthority().contains("@")) { return uri.getAuthority() + uri.getPath().substring(0, uri.getPath().indexOf("/", atIndex)); }//from w w w .j av a2s .c om return uri.getAuthority(); }
From source file:org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet.java
@Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException { try {/*from w w w . j ava 2s .c om*/ String userApprovedParamS = req.getParameter(ProxyUriUtils.PROXY_APPROVAL_PARAM); boolean userWasWarned = false; boolean userApproved = (userApprovedParamS != null && Boolean.valueOf(userApprovedParamS)); boolean securityEnabled = isSecurityEnabled(); final String remoteUser = req.getRemoteUser(); final String pathInfo = req.getPathInfo(); String parts[] = pathInfo.split("/", 3); if (parts.length < 2) { LOG.warn(remoteUser + " Gave an invalid proxy path " + pathInfo); notFound(resp, "Your path appears to be formatted incorrectly."); return; } //parts[0] is empty because path info always starts with a / String appId = parts[1]; String rest = parts.length > 2 ? parts[2] : ""; ApplicationId id = Apps.toAppID(appId); if (id == null) { LOG.warn(req.getRemoteUser() + " Attempting to access " + appId + " that is invalid"); notFound(resp, appId + " appears to be formatted incorrectly."); return; } if (securityEnabled) { String cookieName = getCheckCookieName(id); Cookie[] cookies = req.getCookies(); if (cookies != null) { for (Cookie c : cookies) { if (cookieName.equals(c.getName())) { userWasWarned = true; userApproved = userApproved || Boolean.valueOf(c.getValue()); break; } } } } boolean checkUser = securityEnabled && (!userWasWarned || !userApproved); ApplicationReport applicationReport = null; try { applicationReport = getApplicationReport(id); } catch (ApplicationNotFoundException e) { applicationReport = null; } if (applicationReport == null) { LOG.warn(req.getRemoteUser() + " Attempting to access " + id + " that was not found"); URI toFetch = ProxyUriUtils.getUriFromTrackingPlugins(id, this.trackingUriPlugins); if (toFetch != null) { resp.sendRedirect(resp.encodeRedirectURL(toFetch.toString())); return; } notFound(resp, "Application " + appId + " could not be found, " + "please try the history server"); return; } String original = applicationReport.getOriginalTrackingUrl(); URI trackingUri = null; // fallback to ResourceManager's app page if no tracking URI provided if (original == null || original.equals("N/A")) { resp.sendRedirect(resp.encodeRedirectURL(StringHelper.pjoin(rmAppPageUrlBase, id.toString()))); return; } else { if (ProxyUriUtils.getSchemeFromUrl(original).isEmpty()) { trackingUri = ProxyUriUtils.getUriFromAMUrl(WebAppUtils.getHttpSchemePrefix(conf), original); } else { trackingUri = new URI(original); } } String runningUser = applicationReport.getUser(); if (checkUser && !runningUser.equals(remoteUser)) { LOG.info("Asking " + remoteUser + " if they want to connect to the " + "app master GUI of " + appId + " owned by " + runningUser); warnUserPage(resp, ProxyUriUtils.getPathAndQuery(id, rest, req.getQueryString(), true), runningUser, id); return; } URI toFetch = new URI(trackingUri.getScheme(), trackingUri.getAuthority(), StringHelper.ujoin(trackingUri.getPath(), rest), req.getQueryString(), null); LOG.info(req.getRemoteUser() + " is accessing unchecked " + toFetch + " which is the app master GUI of " + appId + " owned by " + runningUser); switch (applicationReport.getYarnApplicationState()) { case KILLED: case FINISHED: case FAILED: resp.sendRedirect(resp.encodeRedirectURL(toFetch.toString())); return; } Cookie c = null; if (userWasWarned && userApproved) { c = makeCheckCookie(id, true); } proxyLink(req, resp, toFetch, c, getProxyHost()); } catch (URISyntaxException e) { throw new IOException(e); } catch (YarnException e) { throw new IOException(e); } }
From source file:org.jopendocument.dom.ODSingleXMLDocument.java
/** * Prefix a path.// www. ja va2 s .c om * * @param href a path inside the pkg, eg "./Object 1/content.xml". * @return the prefixed path or <code>null</code> if href is external, eg "./3_Object * 1/content.xml". */ private String prefixPath(final String href) { if (this.getVersion().equals(org.jopendocument.dom.XMLVersion.OOo)) { // in OOo 1.x inPKG is denoted by a # final boolean sharp = href.startsWith("#"); if (sharp) // eg #Pictures/100000000000006C000000ABCC02339E.png return "#" + this.prefix(href.substring(1)); else // eg ../../../../Program%20Files/OpenOffice.org1.1.5/share/gallery/apples.gif return null; } else { URI uri; try { uri = new URI(href); } catch (URISyntaxException e) { // OO doesn't escape characters for files uri = null; } // section 17.5 final boolean inPKGFile = uri == null || uri.getScheme() == null && uri.getAuthority() == null && uri.getPath().charAt(0) != '/'; if (inPKGFile) { final String dotSlash = "./"; if (href.startsWith(dotSlash)) return dotSlash + this.prefix(href.substring(dotSlash.length())); else return this.prefix(href); } else return null; } }
From source file:com.buaa.cfs.fs.AbstractFileSystem.java
/** * Get the URI for the file system based on the given URI. The path, query part of the given URI is stripped out and * default file system port is used to form the URI. * * @param uri FileSystem URI. * @param authorityNeeded if true authority cannot be null in the URI. If false authority must be null. * @param defaultPort default port to use if port is not specified in the URI. * * @return URI of the file system//from www.j a va 2s . c om * * @throws URISyntaxException <code>uri</code> has syntax error */ private URI getUri(URI uri, String supportedScheme, boolean authorityNeeded, int defaultPort) throws URISyntaxException { checkScheme(uri, supportedScheme); // A file system implementation that requires authority must always // specify default port if (defaultPort < 0 && authorityNeeded) { throw new HadoopIllegalArgumentException( "FileSystem implementation error - default port " + defaultPort + " is not valid"); } String authority = uri.getAuthority(); if (authority == null) { if (authorityNeeded) { throw new HadoopIllegalArgumentException("Uri without authority: " + uri); } else { return new URI(supportedScheme + ":///"); } } // authority is non null - AuthorityNeeded may be true or false. int port = uri.getPort(); port = (port == -1 ? defaultPort : port); if (port == -1) { // no port supplied and default port is not specified return new URI(supportedScheme, authority, "/", null); } return new URI(supportedScheme + "://" + uri.getHost() + ":" + port); }
From source file:com.ibm.jaggr.core.impl.AbstractAggregatorImpl.java
protected void processResourceRequest(HttpServletRequest req, HttpServletResponse resp, IResource res, String path) {//from www . j ava2 s . c om final String sourceMethod = "processRequest"; //$NON-NLS-1$ boolean isTraceLogging = log.isLoggable(Level.FINER); if (isTraceLogging) { log.entering(AbstractAggregatorImpl.class.getName(), sourceMethod, new Object[] { req, resp, res, path }); } try { URI uri = res.getURI(); if (path != null && path.length() > 0 && !uri.getPath().endsWith("/")) { //$NON-NLS-1$ // Make sure we resolve against a folder path uri = new URI(uri.getScheme(), uri.getAuthority(), uri.getPath() + "/", uri.getQuery(), //$NON-NLS-1$ uri.getFragment()); res = newResource(uri); } IResource resolved = res.resolve(path); if (!resolved.exists()) { throw new NotFoundException(resolved.getURI().toString()); } resp.setDateHeader("Last-Modified", resolved.lastModified()); //$NON-NLS-1$ int expires = getConfig().getExpires(); resp.addHeader("Cache-Control", //$NON-NLS-1$ "public" + (expires > 0 ? (", max-age=" + expires) : "") //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$ ); InputStream is = res.resolve(path).getInputStream(); OutputStream os = resp.getOutputStream(); CopyUtil.copy(is, os); } catch (NotFoundException e) { if (log.isLoggable(Level.INFO)) { log.log(Level.INFO, e.getMessage() + " - " + req.getRequestURI(), e); //$NON-NLS-1$ } resp.setStatus(HttpServletResponse.SC_NOT_FOUND); } catch (Exception e) { if (log.isLoggable(Level.WARNING)) { log.log(Level.WARNING, e.getMessage() + " - " + req.getRequestURI(), e); //$NON-NLS-1$ } resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } if (isTraceLogging) { log.exiting(AbstractAggregatorImpl.class.getName(), sourceMethod); } }
From source file:org.apache.hadoop.fs.HarFileSystem.java
/** * Initialize a Har filesystem per har archive. The * archive home directory is the top level directory * in the filesystem that contains the HAR archive. * Be careful with this method, you do not want to go * on creating new Filesystem instances per call to * path.getFileSystem()./*from ww w . j a v a 2 s. c o m*/ * the uri of Har is * har://underlyingfsscheme-host:port/archivepath. * or * har:///archivepath. This assumes the underlying filesystem * to be used in case not specified. */ @Override public void initialize(URI name, Configuration conf) throws IOException { // initialize the metadata cache, if needed initializeMetadataCache(conf); // decode the name URI underLyingURI = decodeHarURI(name, conf); // we got the right har Path- now check if this is // truly a har filesystem Path harPath = archivePath(new Path(name.getScheme(), name.getAuthority(), name.getPath())); if (harPath == null) { throw new IOException("Invalid path for the Har Filesystem. " + name.toString()); } if (fs == null) { fs = FileSystem.get(underLyingURI, conf); } uri = harPath.toUri(); archivePath = new Path(uri.getPath()); harAuth = getHarAuth(underLyingURI); //check for the underlying fs containing // the index file Path masterIndexPath = new Path(archivePath, "_masterindex"); Path archiveIndexPath = new Path(archivePath, "_index"); if (!fs.exists(masterIndexPath) || !fs.exists(archiveIndexPath)) { throw new IOException("Invalid path for the Har Filesystem. " + "No index file in " + harPath); } metadata = harMetaCache.get(uri); if (metadata != null) { FileStatus mStat = fs.getFileStatus(masterIndexPath); FileStatus aStat = fs.getFileStatus(archiveIndexPath); if (mStat.getModificationTime() != metadata.getMasterIndexTimestamp() || aStat.getModificationTime() != metadata.getArchiveIndexTimestamp()) { // the archive has been overwritten since we last read it // remove the entry from the meta data cache metadata = null; harMetaCache.remove(uri); } } if (metadata == null) { metadata = new HarMetaData(fs, masterIndexPath, archiveIndexPath); metadata.parseMetaData(); harMetaCache.put(uri, metadata); } }
From source file:org.gridgain.grid.ggfs.hadoop.v2.GridGgfsHadoopFileSystem.java
/** * @param name URI passed to constructor. * @param cfg Configuration passed to constructor. * @throws IOException If initialization failed. */// www.ja v a 2 s . c o m private void initialize(URI name, Configuration cfg) throws IOException { enterBusy(); try { if (rmtClient != null) throw new IOException("File system is already initialized: " + rmtClient); A.notNull(name, "name"); A.notNull(cfg, "cfg"); if (!GGFS_SCHEME.equals(name.getScheme())) throw new IOException("Illegal file system URI [expected=" + GGFS_SCHEME + "://[name]/[optional_path], actual=" + name + ']'); uriAuthority = name.getAuthority(); // Override sequential reads before prefetch if needed. seqReadsBeforePrefetch = parameter(cfg, PARAM_GGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0); if (seqReadsBeforePrefetch > 0) seqReadsBeforePrefetchOverride = true; // In GG replication factor is controlled by data cache affinity. // We use replication factor to force the whole file to be stored on local node. dfltReplication = (short) cfg.getInt("dfs.replication", 3); // Get file colocation control flag. colocateFileWrites = parameter(cfg, PARAM_GGFS_COLOCATED_WRITES, uriAuthority, false); preferLocFileWrites = cfg.getBoolean(PARAM_GGFS_PREFER_LOCAL_WRITES, false); // Get log directory. String logDirCfg = parameter(cfg, PARAM_GGFS_LOG_DIR, uriAuthority, DFLT_GGFS_LOG_DIR); File logDirFile = U.resolveGridGainPath(logDirCfg); String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null; rmtClient = new GridGgfsHadoopWrapper(uriAuthority, logDir, cfg, LOG); // Handshake. GridGgfsHandshakeResponse handshake = rmtClient.handshake(logDir); grpBlockSize = handshake.blockSize(); GridGgfsPaths paths = handshake.secondaryPaths(); Boolean logEnabled = parameter(cfg, PARAM_GGFS_LOG_ENABLED, uriAuthority, false); if (handshake.sampling() != null ? handshake.sampling() : logEnabled) { // Initiate client logger. if (logDir == null) throw new IOException("Failed to resolve log directory: " + logDirCfg); Integer batchSize = parameter(cfg, PARAM_GGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_GGFS_LOG_BATCH_SIZE); clientLog = GridGgfsLogger.logger(uriAuthority, handshake.ggfsName(), logDir, batchSize); } else clientLog = GridGgfsLogger.disabledLogger(); modeRslvr = new GridGgfsModeResolver(paths.defaultMode(), paths.pathModes()); boolean initSecondary = paths.defaultMode() == PROXY; if (paths.pathModes() != null) { for (T2<GridGgfsPath, GridGgfsMode> pathMode : paths.pathModes()) { GridGgfsMode mode = pathMode.getValue(); initSecondary |= mode == PROXY; } } if (initSecondary) { Map<String, String> props = paths.properties(); String secUri = props.get(GridGgfsHadoopFileSystemWrapper.SECONDARY_FS_URI); String secConfPath = props.get(GridGgfsHadoopFileSystemWrapper.SECONDARY_FS_CONFIG_PATH); if (secConfPath == null) throw new IOException("Failed to connect to the secondary file system because configuration " + "path is not provided."); if (secUri == null) throw new IOException( "Failed to connect to the secondary file system because URI is not " + "provided."); if (secConfPath == null) throw new IOException("Failed to connect to the secondary file system because configuration " + "path is not provided."); if (secUri == null) throw new IOException( "Failed to connect to the secondary file system because URI is not " + "provided."); try { secondaryUri = new URI(secUri); URL secondaryCfgUrl = U.resolveGridGainUrl(secConfPath); if (secondaryCfgUrl == null) throw new IOException("Failed to resolve secondary file system config URL: " + secConfPath); Configuration conf = new Configuration(); conf.addResource(secondaryCfgUrl); String prop = String.format("fs.%s.impl.disable.cache", secondaryUri.getScheme()); conf.setBoolean(prop, true); secondaryFs = AbstractFileSystem.get(secondaryUri, conf); } catch (URISyntaxException ignore) { throw new IOException("Failed to resolve secondary file system URI: " + secUri); } catch (IOException e) { throw new IOException("Failed to connect to the secondary file system: " + secUri, e); } } } finally { leaveBusy(); } }
From source file:de.unirostock.sems.cbarchive.web.rest.ShareApi.java
private void addAdditionalFiles(UserManager user, ImportRequest request, Archive archive, List<FormDataBodyPart> uploadedFiles) throws ImporterException { for (ImportRequest.AdditionalFile addFile : request.getAdditionalFiles()) { java.nio.file.Path temp = null; try {/*from ww w. j av a2s. c o m*/ URI remoteUri = new URI(addFile.getRemoteUrl()); // copy the stream to a temp file temp = Files.createTempFile(Fields.TEMP_FILE_PREFIX, FilenameUtils.getBaseName(remoteUri.toString())); // write file to disk OutputStream output = new FileOutputStream(temp.toFile()); InputStream input = null; String protocol = remoteUri.getScheme().toLowerCase(); if (protocol.equals("http") || protocol.equals("https")) { input = remoteUri.toURL().openStream(); } else if (protocol.equals("post") && uploadedFiles != null && uploadedFiles.size() > 0) { // use a file from the post String fileName = remoteUri.getAuthority(); for (FormDataBodyPart file : uploadedFiles) { if (file.getFormDataContentDisposition().getFileName().equals(fileName)) { input = file.getEntityAs(InputStream.class); break; } } } else { output.close(); throw new ImporterException( "Unknown protocol " + protocol + " while adding " + remoteUri.toString()); } if (input == null) { output.close(); throw new ImporterException("Cannot open stream to import file: " + remoteUri.toString()); } long downloadedFileSize = IOUtils.copy(input, output); output.flush(); output.close(); input.close(); // quota stuff // max size for upload if (Fields.QUOTA_UPLOAD_SIZE != Fields.QUOTA_UNLIMITED && Tools.checkQuota(downloadedFileSize, Fields.QUOTA_UPLOAD_SIZE) == false) { LOGGER.warn("QUOTA_UPLOAD_SIZE reached in workspace ", user.getWorkspaceId()); throw new ImporterException("The additional file is to big: " + addFile.getRemoteUrl()); } // max archive size if (user != null && Fields.QUOTA_ARCHIVE_SIZE != Fields.QUOTA_UNLIMITED && Tools.checkQuota( user.getWorkspace().getArchiveSize(archive.getId()) + downloadedFileSize, Fields.QUOTA_ARCHIVE_SIZE) == false) { LOGGER.warn("QUOTA_ARCHIVE_SIZE reached in workspace ", user.getWorkspaceId(), " while trying to adv import archive"); throw new ImporterException( "The maximum size of the archive is reached, while adding " + addFile.getRemoteUrl()); } // max workspace size if (user != null && Fields.QUOTA_WORKSPACE_SIZE != Fields.QUOTA_UNLIMITED && Tools.checkQuota( QuotaManager.getInstance().getWorkspaceSize(user.getWorkspace()) + downloadedFileSize, Fields.QUOTA_WORKSPACE_SIZE) == false) { LOGGER.warn("QUOTA_WORKSPACE_SIZE reached in workspace ", user.getWorkspaceId()); throw new ImporterException( "The maximum size of the workspace is reached, while adding " + addFile.getRemoteUrl()); } // max total size if (user != null && Fields.QUOTA_TOTAL_SIZE != Fields.QUOTA_UNLIMITED && Tools.checkQuota(QuotaManager.getInstance().getTotalSize() + downloadedFileSize, Fields.QUOTA_TOTAL_SIZE) == false) { LOGGER.warn("QUOTA_TOTAL_SIZE reached in workspace ", user.getWorkspaceId()); throw new ImporterException( "The maximum size is reached, while adding " + addFile.getRemoteUrl()); } String path = addFile.getArchivePath(); if (path == null || path.isEmpty()) path = FilenameUtils.getBaseName(remoteUri.toString()); // remove leading slash if (path.startsWith("/")) path = path.substring(1); // add it ArchiveEntry entry = archive.addArchiveEntry(path, temp, ReplaceStrategy.RENAME); // set file format uri if (addFile.getFileFormat() != null) entry.setFormat(addFile.getFileFormat()); // add all meta data objects if (addFile.getMetaData() != null) for (MetaObjectDataholder meta : addFile.getMetaData()) { entry.addDescription(meta.getCombineArchiveMetaObject()); } } catch (URISyntaxException e) { LOGGER.error(e, "Wrong defined remoteUrl"); throw new ImporterException("Cannot parse remote URL: " + addFile.getRemoteUrl(), e); } catch (IOException | CombineArchiveWebException e) { LOGGER.error(e, "Cannot download an additional file. ", addFile.getRemoteUrl()); throw new ImporterException("Cannot download and add an additional file: " + addFile.getRemoteUrl(), e); } finally { if (temp != null && temp.toFile().exists()) temp.toFile().delete(); } } }