Example usage for java.net URI getAuthority

List of usage examples for java.net URI getAuthority

Introduction

In this page you can find the example usage for java.net URI getAuthority.

Prototype

public String getAuthority() 

Source Link

Document

Returns the decoded authority component of this URI.

Usage

From source file:org.apache.hadoop.hive.metastore.HiveAlterHandler.java

@Override
public void alterTable(RawStore msdb, Warehouse wh, String dbname, String name, Table newt,
        EnvironmentContext environmentContext, HMSHandler handler)
        throws InvalidOperationException, MetaException {
    name = name.toLowerCase();//w  ww . j  a va2s.  c  o m
    dbname = dbname.toLowerCase();

    final boolean cascade = environmentContext != null && environmentContext.isSetProperties()
            && StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(StatsSetupConst.CASCADE));
    if (newt == null) {
        throw new InvalidOperationException("New table is invalid: " + newt);
    }

    String newTblName = newt.getTableName().toLowerCase();
    String newDbName = newt.getDbName().toLowerCase();

    if (!MetaStoreUtils.validateName(newTblName, hiveConf)) {
        throw new InvalidOperationException(newTblName + " is not a valid object name");
    }
    String validate = MetaStoreUtils.validateTblColumns(newt.getSd().getCols());
    if (validate != null) {
        throw new InvalidOperationException("Invalid column " + validate);
    }

    Path srcPath = null;
    FileSystem srcFs = null;
    Path destPath = null;
    FileSystem destFs = null;

    boolean success = false;
    boolean dataWasMoved = false;
    Table oldt = null;
    List<MetaStoreEventListener> transactionalListeners = null;
    if (handler != null) {
        transactionalListeners = handler.getTransactionalListeners();
    }

    try {
        boolean rename = false;
        boolean isPartitionedTable = false;
        List<Partition> parts = null;

        // check if table with the new name already exists
        if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
            if (msdb.getTable(newDbName, newTblName) != null) {
                throw new InvalidOperationException(
                        "new table " + newDbName + "." + newTblName + " already exists");
            }
            rename = true;
        }

        msdb.openTransaction();
        // get old table
        oldt = msdb.getTable(dbname, name);
        if (oldt == null) {
            throw new InvalidOperationException("table " + dbname + "." + name + " doesn't exist");
        }

        if (oldt.getPartitionKeysSize() != 0) {
            isPartitionedTable = true;
        }

        if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
                false)) {
            // Throws InvalidOperationException if the new column types are not
            // compatible with the current column types.
            MetaStoreUtils.throwExceptionIfIncompatibleColTypeChange(oldt.getSd().getCols(),
                    newt.getSd().getCols());
        }

        //check that partition keys have not changed, except for virtual views
        //however, allow the partition comments to change
        boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(),
                newt.getPartitionKeys());

        if (!oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())) {
            if (!partKeysPartiallyEqual) {
                throw new InvalidOperationException("partition keys can not be changed.");
            }
        }

        // rename needs change the data location and move the data to the new location corresponding
        // to the new name if:
        // 1) the table is not a virtual view, and
        // 2) the table is not an external table, and
        // 3) the user didn't change the default location (or new location is empty), and
        // 4) the table was not initially created with a specified location
        if (rename && !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())
                && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0
                        || StringUtils.isEmpty(newt.getSd().getLocation()))
                && !MetaStoreUtils.isExternalTable(oldt)) {
            Database olddb = msdb.getDatabase(dbname);
            // if a table was created in a user specified location using the DDL like
            // create table tbl ... location ...., it should be treated like an external table
            // in the table rename, its data location should not be changed. We can check
            // if the table directory was created directly under its database directory to tell
            // if it is such a table
            srcPath = new Path(oldt.getSd().getLocation());
            String oldtRelativePath = (new Path(olddb.getLocationUri()).toUri()).relativize(srcPath.toUri())
                    .toString();
            boolean tableInSpecifiedLoc = !oldtRelativePath.equalsIgnoreCase(name)
                    && !oldtRelativePath.equalsIgnoreCase(name + Path.SEPARATOR);

            if (!tableInSpecifiedLoc) {
                srcFs = wh.getFs(srcPath);

                // get new location
                Database db = msdb.getDatabase(newDbName);
                Path databasePath = constructRenamedPath(wh.getDatabasePath(db), srcPath);
                destPath = new Path(databasePath, newTblName);
                destFs = wh.getFs(destPath);

                newt.getSd().setLocation(destPath.toString());

                // check that destination does not exist otherwise we will be
                // overwriting data
                // check that src and dest are on the same file system
                if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
                    throw new InvalidOperationException("table new location " + destPath
                            + " is on a different file system than the old location " + srcPath
                            + ". This operation is not supported");
                }

                try {
                    if (destFs.exists(destPath)) {
                        throw new InvalidOperationException("New location for this table " + newDbName + "."
                                + newTblName + " already exists : " + destPath);
                    }
                    // check that src exists and also checks permissions necessary, rename src to dest
                    if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath, true)) {
                        dataWasMoved = true;
                    }
                } catch (IOException | MetaException e) {
                    LOG.error("Alter Table operation for " + dbname + "." + name + " failed.", e);
                    throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name
                            + " failed to move data due to: '" + getSimpleMessage(e)
                            + "' See hive log file for details.");
                }
            }

            if (isPartitionedTable) {
                String oldTblLocPath = srcPath.toUri().getPath();
                String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null;

                // also the location field in partition
                parts = msdb.getPartitions(dbname, name, -1);
                Map<Partition, ColumnStatistics> columnStatsNeedUpdated = new HashMap<Partition, ColumnStatistics>();
                for (Partition part : parts) {
                    String oldPartLoc = part.getSd().getLocation();
                    if (dataWasMoved && oldPartLoc.contains(oldTblLocPath)) {
                        URI oldUri = new Path(oldPartLoc).toUri();
                        String newPath = oldUri.getPath().replace(oldTblLocPath, newTblLocPath);
                        Path newPartLocPath = new Path(oldUri.getScheme(), oldUri.getAuthority(), newPath);
                        part.getSd().setLocation(newPartLocPath.toString());
                    }
                    part.setDbName(newDbName);
                    part.setTableName(newTblName);
                    ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name,
                            part.getValues(), part.getSd().getCols(), oldt, part);
                    if (colStats != null) {
                        columnStatsNeedUpdated.put(part, colStats);
                    }
                }
                msdb.alterTable(dbname, name, newt);
                // alterPartition is only for changing the partition location in the table rename
                if (dataWasMoved) {
                    for (Partition part : parts) {
                        msdb.alterPartition(newDbName, newTblName, part.getValues(), part);
                    }
                }

                for (Entry<Partition, ColumnStatistics> partColStats : columnStatsNeedUpdated.entrySet()) {
                    ColumnStatistics newPartColStats = partColStats.getValue();
                    newPartColStats.getStatsDesc().setDbName(newDbName);
                    newPartColStats.getStatsDesc().setTableName(newTblName);
                    msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues());
                }
            } else {
                alterTableUpdateTableColumnStats(msdb, oldt, newt);
            }
        } else {
            // operations other than table rename
            if (MetaStoreUtils.requireCalStats(hiveConf, null, null, newt, environmentContext)
                    && !isPartitionedTable) {
                Database db = msdb.getDatabase(newDbName);
                // Update table stats. For partitioned table, we update stats in alterPartition()
                MetaStoreUtils.updateTableStatsFast(db, newt, wh, false, true, environmentContext);
            }

            if (cascade && isPartitionedTable) {
                //Currently only column related changes can be cascaded in alter table
                if (!MetaStoreUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols())) {
                    parts = msdb.getPartitions(dbname, name, -1);
                    for (Partition part : parts) {
                        List<FieldSchema> oldCols = part.getSd().getCols();
                        part.getSd().setCols(newt.getSd().getCols());
                        ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name,
                                part.getValues(), oldCols, oldt, part);
                        assert (colStats == null);
                        msdb.alterPartition(dbname, name, part.getValues(), part);
                    }
                    msdb.alterTable(dbname, name, newt);
                } else {
                    LOG.warn("Alter table does not cascade changes to its partitions.");
                }
            } else {
                alterTableUpdateTableColumnStats(msdb, oldt, newt);
            }
        }

        if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
            MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_TABLE,
                    new AlterTableEvent(oldt, newt, false, true, handler), environmentContext);
        }
        // commit the changes
        success = msdb.commitTransaction();
    } catch (InvalidObjectException e) {
        LOG.debug("Failed to get object from Metastore ", e);
        throw new InvalidOperationException("Unable to change partition or table."
                + " Check metastore logs for detailed stack." + e.getMessage());
    } catch (InvalidInputException e) {
        LOG.debug("Accessing Metastore failed due to invalid input ", e);
        throw new InvalidOperationException("Unable to change partition or table."
                + " Check metastore logs for detailed stack." + e.getMessage());
    } catch (NoSuchObjectException e) {
        LOG.debug("Object not found in metastore ", e);
        throw new InvalidOperationException("Unable to change partition or table. Database " + dbname
                + " does not exist" + " Check metastore logs for detailed stack." + e.getMessage());
    } finally {
        if (!success) {
            LOG.error("Failed to alter table " + dbname + "." + name);
            msdb.rollbackTransaction();
            if (dataWasMoved) {
                try {
                    if (destFs.exists(destPath)) {
                        if (!destFs.rename(destPath, srcPath)) {
                            LOG.error("Failed to restore data from " + destPath + " to " + srcPath
                                    + " in alter table failure. Manual restore is needed.");
                        }
                    }
                } catch (IOException e) {
                    LOG.error("Failed to restore data from " + destPath + " to " + srcPath
                            + " in alter table failure. Manual restore is needed.");
                }
            }
        }
    }
}

From source file:com.buaa.cfs.fs.FileContext.java

/**
 * Are qualSrc and qualDst of the same file system?
 *
 * @param qualPath1 - fully qualified path
 * @param qualPath2 - fully qualified path
 *
 * @return/*from   w  ww. j a  v a2  s  .co  m*/
 */
private static boolean isSameFS(Path qualPath1, Path qualPath2) {
    URI srcUri = qualPath1.toUri();
    URI dstUri = qualPath2.toUri();
    return (srcUri.getScheme().equals(dstUri.getScheme()) && !(srcUri.getAuthority() != null
            && dstUri.getAuthority() != null && srcUri.getAuthority().equals(dstUri.getAuthority())));
}

From source file:com.facebook.presto.hive.s3.PrestoS3FileSystem.java

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    requireNonNull(uri, "uri is null");
    requireNonNull(conf, "conf is null");
    super.initialize(uri, conf);
    setConf(conf);//from w w w  . j a  v a  2s  . c o  m

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR));

    HiveS3Config defaults = new HiveS3Config();
    this.stagingDirectory = new File(
            conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
    this.maxBackoffTime = Duration
            .valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
    this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration
            .valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
    this.multiPartUploadMinFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE,
            defaults.getS3MultipartMinFileSize().toBytes());
    this.multiPartUploadMinPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE,
            defaults.getS3MultipartMinPartSize().toBytes());
    this.isPathStyleAccess = conf.getBoolean(S3_PATH_STYLE_ACCESS, defaults.isS3PathStyleAccess());
    this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS,
            defaults.isS3UseInstanceCredentials());
    this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION,
            defaults.isPinS3ClientToCurrentRegion());
    verify((pinS3ClientToCurrentRegion && conf.get(S3_ENDPOINT) == null) || !pinS3ClientToCurrentRegion,
            "Invalid configuration: either endpoint can be set or S3 client can be pinned to the current region");
    this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());
    this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name()));
    this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId());
    String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());

    ClientConfiguration configuration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
            .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections)
            .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(S3_USER_AGENT_SUFFIX);

    this.credentialsProvider = createAwsCredentialsProvider(uri, conf);
    this.s3 = createAmazonS3Client(conf, configuration);
}

From source file:org.eclipse.orion.server.git.servlets.GitTreeHandlerV1.java

private JSONObject listEntry(String name, long timeStamp, boolean isDir, long length, URI location,
        boolean appendName) {
    JSONObject jsonObject = new JSONObject();
    try {//from  ww  w  .j av a  2s  . c  o m
        jsonObject.put(ProtocolConstants.KEY_NAME, name);
        jsonObject.put(ProtocolConstants.KEY_LOCAL_TIMESTAMP, timeStamp);
        jsonObject.put(ProtocolConstants.KEY_DIRECTORY, isDir);
        jsonObject.put(ProtocolConstants.KEY_LENGTH, length);
        if (location != null) {
            if (isDir && !location.getPath().endsWith("/")) {
                location = URIUtil.append(location, "");
            }
            if (appendName) {
                location = URIUtil.append(location, name);
                if (isDir) {
                    location = URIUtil.append(location, "");
                }
            }
            jsonObject.put(ProtocolConstants.KEY_LOCATION, location);
            if (isDir) {
                try {
                    jsonObject.put(ProtocolConstants.KEY_CHILDREN_LOCATION, new URI(location.getScheme(),
                            location.getAuthority(), location.getPath(), "depth=1", location.getFragment())); //$NON-NLS-1$
                } catch (URISyntaxException e) {
                    throw new RuntimeException(e);
                }
            }
        }
        JSONObject attributes = new JSONObject();
        attributes.put("ReadOnly", true);
        jsonObject.put(ProtocolConstants.KEY_ATTRIBUTES, attributes);

    } catch (JSONException e) {
        //cannot happen because the key is non-null and the values are strings
        throw new RuntimeException(e);
    }
    return jsonObject;
}

From source file:org.apache.hadoop.hdfs.server.namenode.bookkeeper.BookKeeperJournalManager.java

public BookKeeperJournalManager(Configuration conf, URI uri, NamespaceInfo nsInfo, NameNodeMetrics metrics)
        throws IOException {
    this.conf = conf;
    this.metrics = metrics;
    quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE, BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT);
    ensembleSize = conf.getInt(BKJM_BOOKKEEPER_ENSEMBLE_SIZE, BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT);
    digestPw = conf.get(BKJM_BOOKKEEPER_DIGEST_PW, BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT);
    String zkConnect = uri.getAuthority().replace(";", ",");
    zkParentPath = uri.getPath();//from  www  .  j a va 2s  .c o  m
    String ledgersAvailablePath = conf.get(BKJM_ZK_LEDGERS_AVAILABLE_PATH,
            BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT);
    formatInfoPath = joinPath(zkParentPath, "version");
    String currentInProgressPath = joinPath(zkParentPath, "CurrentInProgress");
    String maxTxIdPath = joinPath(zkParentPath, "maxtxid");
    int zkSessionTimeoutMs = conf.getInt(BKJM_ZK_SESSION_TIMEOUT, BKJM_ZK_SESSION_TIMEOUT_DEFAULT);
    int zkMaxRetries = conf.getInt(BKJM_ZK_MAX_RETRIES, BKJM_ZK_MAX_RETRIES_DEFAULT);
    int zkRetryIntervalMs = conf.getInt(BKJM_ZK_RETRY_INTERVAL, BKJM_ZK_RETRY_INTERVAL_DEFAULT);
    CountDownLatch connectLatch = new CountDownLatch(1);
    ConnectionWatcher connectionWatcher = new ConnectionWatcher(connectLatch);
    ZooKeeper zooKeeper = new ZooKeeper(zkConnect, zkSessionTimeoutMs, connectionWatcher);
    // Use twice session timeout as the connection timeout
    int zkConnectTimeoutMs = zkSessionTimeoutMs * 2;

    if (!connectionWatcher.await(zkConnectTimeoutMs)) {
        throw new IOException(
                "Timed out waiting to connect to " + zkConnect + " after " + (zkSessionTimeoutMs * 2) + " ms.");
    }
    prepareBookKeeperEnv(ledgersAvailablePath, zooKeeper);

    try {
        ClientConfiguration clientConf = new ClientConfiguration();
        clientConf.setClientTcpNoDelay(conf.getBoolean(BKJM_BOOKKEEPER_CLIENT_TCP_NODELAY,
                BKJM_BOOKKEEPER_CLIENT_TCP_NO_DELAY_DEFAULT));
        clientConf.setThrottleValue(
                conf.getInt(BKJM_BOOKKEEPER_CLIENT_THROTTLE, BKJM_BOOKKEEPER_CLIENT_THROTTLE_DEFAULT));
        bookKeeperClient = new BookKeeper(clientConf, zooKeeper);
    } catch (KeeperException e) {
        keeperException("Unrecoverable ZooKeeper creating BookKeeper client", e);
        throw new IllegalStateException(e); // never reached
    } catch (InterruptedException e) {
        interruptedException("Interrupted creating a BookKeeper client", e);
        throw new IllegalStateException(e); // never reached
    }
    zk = new RecoveringZooKeeper(new BasicZooKeeper(zooKeeper), zkMaxRetries, zkRetryIntervalMs);
    metadataManager = new BookKeeperJournalMetadataManager(zk, zkParentPath);
    maxTxId = new MaxTxId(zk, maxTxIdPath);
    currentInProgressMetadata = new CurrentInProgressMetadata(zk, currentInProgressPath);
    createZkMetadataIfNotExists(nsInfo);
    metadataManager.init();
}

From source file:org.apache.hadoop.hive.metastore.tools.SchemaToolTaskValidate.java

/**
 * Check if the location is valid for the given entity.
 * @param entity          the entity to represent a database, partition or table
 * @param entityLocation  the location//from   w  w  w.  j  a  v a  2 s. c  o m
 * @param defaultServers  a list of the servers that the location needs to match.
 *                        The location host needs to match one of the given servers.
 *                        If empty, then no check against such list.
 * @return true if the location is valid
 */
private boolean checkLocation(String entity, String entityLocation, URI[] defaultServers) {
    boolean isValid = true;

    if (entityLocation == null) {
        System.err.println(entity + ", Error: empty location");
        isValid = false;
    } else {
        try {
            URI currentUri = new Path(entityLocation).toUri();
            String scheme = currentUri.getScheme();
            String path = currentUri.getPath();
            if (StringUtils.isEmpty(scheme)) {
                System.err.println(
                        entity + ", Location: " + entityLocation + ", Error: missing location scheme.");
                isValid = false;
            } else if (StringUtils.isEmpty(path)) {
                System.err
                        .println(entity + ", Location: " + entityLocation + ", Error: missing location path.");
                isValid = false;
            } else if (ArrayUtils.isNotEmpty(defaultServers) && currentUri.getAuthority() != null) {
                String authority = currentUri.getAuthority();
                boolean matchServer = false;
                for (URI server : defaultServers) {
                    if (StringUtils.equalsIgnoreCase(server.getScheme(), scheme)
                            && StringUtils.equalsIgnoreCase(server.getAuthority(), authority)) {
                        matchServer = true;
                        break;
                    }
                }
                if (!matchServer) {
                    System.err
                            .println(entity + ", Location: " + entityLocation + ", Error: mismatched server.");
                    isValid = false;
                }
            }

            // if there is no path element other than "/", report it but not fail
            if (isValid && StringUtils.containsOnly(path, "/")) {
                System.err.println(entity + ", Location: " + entityLocation + ", Warn: location set to root, "
                        + "not a recommended config.");
            }
        } catch (Exception pe) {
            System.err.println(entity + ", Error: invalid location - " + pe.getMessage());
            isValid = false;
        }
    }

    return isValid;
}

From source file:org.apache.hadoop.fs.HarFileSystem.java

/**
 * decode the raw URI to get the underlying URI
 * @param rawURI raw Har URI//from www  .jav a 2  s  . c  om
 * @return filtered URI of the underlying fileSystem
 */
private URI decodeHarURI(URI rawURI, Configuration conf) throws IOException {
    String tmpAuth = rawURI.getAuthority();
    //we are using the default file
    //system in the config 
    //so create a underlying uri and 
    //return it
    if (tmpAuth == null) {
        //create a path 
        return FileSystem.getDefaultUri(conf);
    }
    String authority = rawURI.getAuthority();

    int i = authority.indexOf('-');
    if (i < 0) {
        throw new IOException("URI: " + rawURI + " is an invalid Har URI since '-' not found."
                + "  Expecting har://<scheme>-<host>/<path>.");
    }

    if (rawURI.getQuery() != null) {
        // query component not allowed
        throw new IOException("query component in Path not supported  " + rawURI);
    }

    URI tmp;
    try {
        // convert <scheme>-<host> to <scheme>://<host>
        URI baseUri = new URI(authority.replaceFirst("-", "://"));

        tmp = new URI(baseUri.getScheme(), baseUri.getAuthority(), rawURI.getPath(), rawURI.getQuery(),
                rawURI.getFragment());
    } catch (URISyntaxException e) {
        throw new IOException(
                "URI: " + rawURI + " is an invalid Har URI. Expecting har://<scheme>-<host>/<path>.");
    }
    return tmp;
}

From source file:org.apache.hadoop.fs.azure.NativeAzureFileSystem.java

/**
 * Puts in the authority of the default file system if it is a WASB file
 * system and the given URI's authority is null.
 * //from ww w .  j a va 2  s . c om
 * @return The URI with reconstructed authority if necessary and possible.
 */
private static URI reconstructAuthorityIfNeeded(URI uri, Configuration conf) {
    if (null == uri.getAuthority()) {
        // If WASB is the default file system, get the authority from there
        URI defaultUri = FileSystem.getDefaultUri(conf);
        if (defaultUri != null && isWasbScheme(defaultUri.getScheme())) {
            try {
                // Reconstruct the URI with the authority from the default URI.
                return new URI(uri.getScheme(), defaultUri.getAuthority(), uri.getPath(), uri.getQuery(),
                        uri.getFragment());
            } catch (URISyntaxException e) {
                // This should never happen.
                throw new Error("Bad URI construction", e);
            }
        }
    }
    return uri;
}

From source file:org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.java

/**
 * @param name URI passed to constructor.
 * @param cfg Configuration passed to constructor.
 * @throws IOException If initialization failed.
 *///from   w  ww .j av  a2s  .  com
@SuppressWarnings("ConstantConditions")
private void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();

    try {
        if (rmtClient != null)
            throw new IOException("File system is already initialized: " + rmtClient);

        A.notNull(name, "name");
        A.notNull(cfg, "cfg");

        if (!IGFS_SCHEME.equals(name.getScheme()))
            throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME
                    + "://[name]/[optional_path], actual=" + name + ']');

        uriAuthority = name.getAuthority();

        // Override sequential reads before prefetch if needed.
        seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);

        if (seqReadsBeforePrefetch > 0)
            seqReadsBeforePrefetchOverride = true;

        // In Ignite replication factor is controlled by data cache affinity.
        // We use replication factor to force the whole file to be stored on local node.
        dfltReplication = (short) cfg.getInt("dfs.replication", 3);

        // Get file colocation control flag.
        colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
        preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);

        // Get log directory.
        String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);

        File logDirFile = U.resolveIgnitePath(logDirCfg);

        String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;

        rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);

        // Handshake.
        IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);

        grpBlockSize = handshake.blockSize();

        Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);

        if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
            // Initiate client logger.
            if (logDir == null)
                throw new IOException("Failed to resolve log directory: " + logDirCfg);

            Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority,
                    DFLT_IGFS_LOG_BATCH_SIZE);

            clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
        } else
            clientLog = IgfsLogger.disabledLogger();
    } finally {
        leaveBusy();
    }
}

From source file:org.openpaas.paasta.portal.api.common.CustomCloudControllerClientImpl.java

private void extractUriInfo(Map<String, UUID> domains, String uri, Map<String, String> uriInfo) {
    URI newUri = URI.create(uri);
    String authority = newUri.getScheme() != null ? newUri.getAuthority() : newUri.getPath();
    for (String domain : domains.keySet()) {
        if (authority != null && authority.endsWith(domain)) {
            String previousDomain = uriInfo.get("domainName");
            if (previousDomain == null || domain.length() > previousDomain.length()) {
                //Favor most specific subdomains
                uriInfo.put("domainName", domain);
                if (domain.length() < authority.length()) {
                    uriInfo.put("host", authority.substring(0, authority.indexOf(domain) - 1));
                } else if (domain.length() == authority.length()) {
                    uriInfo.put("host", "");
                }/*from   ww  w .jav a2 s.  c o  m*/
            }
        }
    }
    if (uriInfo.get("domainName") == null) {
        throw new IllegalArgumentException("Domain not found for URI " + uri);
    }
    if (uriInfo.get("host") == null) {
        throw new IllegalArgumentException(
                "Invalid URI " + uri + " -- host not specified for domain " + uriInfo.get("domainName"));
    }
}