Example usage for java.util Properties putAll

List of usage examples for java.util Properties putAll

Introduction

In this page you can find the example usage for java.util Properties putAll.

Prototype

@Override
    public synchronized void putAll(Map<?, ?> t) 

Source Link

Usage

From source file:org.apache.falcon.oozie.OozieBundleBuilder.java

@Override
public Properties build(Cluster cluster, Path buildPath) throws FalconException {
    String clusterName = cluster.getName();
    if (EntityUtil.getStartTime(entity, clusterName)
            .compareTo(EntityUtil.getEndTime(entity, clusterName)) >= 0) {
        LOG.info("process validity start <= end for cluster {}. Skipping schedule", clusterName);
        return null;
    }//  w  w  w.  j  av a  2 s  . c  o m

    List<Properties> coords = buildCoords(cluster, buildPath);
    if (coords == null || coords.isEmpty()) {
        return null;
    }

    BUNDLEAPP bundle = new BUNDLEAPP();
    bundle.setName(EntityUtil.getWorkflowName(entity).toString());
    // all the properties are set prior to bundle and coordinators creation

    for (Properties coordProps : coords) {
        // add the coordinator to the bundle
        COORDINATOR coord = new COORDINATOR();
        String coordPath = coordProps.getProperty(OozieEntityBuilder.ENTITY_PATH);
        final String coordName = coordProps.getProperty(OozieEntityBuilder.ENTITY_NAME);
        coord.setName(coordName);
        coord.setAppPath(getStoragePath(coordPath));
        coordProps.put(OozieClient.USER_NAME, CurrentUser.getUser());
        coordProps.setProperty(AbstractWorkflowEngine.NAME_NODE, ClusterHelper.getStorageUrl(cluster));
        if (EntityUtil.isTableStorageType(cluster, entity)) {
            Tag tag = EntityUtil.getWorkflowNameTag(coordName, entity);
            if (tag == Tag.REPLICATION) {
                // todo: kludge send source hcat creds for coord dependency check to pass
                String srcClusterName = EntityUtil.getWorkflowNameSuffix(coordName, entity);
                coordProps.putAll(HiveUtil.getHiveCredentials(ClusterHelper.getCluster(srcClusterName)));
            } else {
                coordProps.putAll(HiveUtil.getHiveCredentials(cluster));
            }
        }

        coord.setConfiguration(getConfig(coordProps));
        bundle.getCoordinator().add(coord);
    }

    Path marshalPath = marshal(cluster, bundle, buildPath); // write the bundle
    Properties properties = getProperties(marshalPath, entity.getName());
    properties.setProperty(OozieClient.BUNDLE_APP_PATH, getStoragePath(buildPath));
    properties.setProperty(AbstractWorkflowEngine.NAME_NODE, ClusterHelper.getStorageUrl(cluster));

    //Add libpath
    String libPath = getOozieLibPath(buildPath);
    if (StringUtils.isNotBlank(libPath)) {
        properties.put(OozieClient.LIBPATH, libPath);
    }

    return properties;
}

From source file:org.apache.archiva.metadata.repository.file.FileMetadataRepository.java

@Override
public void addMetadataFacet(String repositoryId, MetadataFacet metadataFacet) {
    Properties properties = new Properties();
    properties.putAll(metadataFacet.toProperties());

    try {//from  ww  w. ja  va  2s  . co m
        File directory = new File(getMetadataDirectory(repositoryId, metadataFacet.getFacetId()),
                metadataFacet.getName());
        writeProperties(properties, directory, METADATA_KEY);
    } catch (IOException e) {
        // TODO!
        log.error(e.getMessage(), e);
    }
}

From source file:edu.umd.cs.submit.CommandLineSubmit.java

/**
 * @param p//from  w ww  .j a  v  a 2s  .  co m
 * @param find
 * @param files
 * @param userProps
 * @return
 * @throws IOException
 * @throws FileNotFoundException
 */
public static MultipartPostMethod createFilePost(Properties p, FindAllFiles find, Collection<File> files,
        Properties userProps) throws IOException, FileNotFoundException {
    // ========================== assemble zip file in byte array
    // ==============================
    String loginName = userProps.getProperty("loginName");
    String classAccount = userProps.getProperty("classAccount");
    String from = classAccount;
    if (loginName != null && !loginName.equals(classAccount))
        from += "/" + loginName;
    System.out.println(" submitted by " + from);
    System.out.println();
    System.out.println("Submitting the following files");
    ByteArrayOutputStream bytes = new ByteArrayOutputStream(4096);
    byte[] buf = new byte[4096];
    ZipOutputStream zipfile = new ZipOutputStream(bytes);
    zipfile.setComment("zipfile for CommandLineTurnin, version " + VERSION);
    for (File resource : files) {
        if (resource.isDirectory())
            continue;
        String relativePath = resource.getCanonicalPath().substring(find.rootPathLength + 1);
        System.out.println(relativePath);
        ZipEntry entry = new ZipEntry(relativePath);
        entry.setTime(resource.lastModified());

        zipfile.putNextEntry(entry);
        InputStream in = new FileInputStream(resource);
        try {
            while (true) {
                int n = in.read(buf);
                if (n < 0)
                    break;
                zipfile.write(buf, 0, n);
            }
        } finally {
            in.close();
        }
        zipfile.closeEntry();

    } // for each file
    zipfile.close();

    MultipartPostMethod filePost = new MultipartPostMethod(p.getProperty("submitURL"));

    p.putAll(userProps);
    // add properties
    for (Map.Entry<?, ?> e : p.entrySet()) {
        String key = (String) e.getKey();
        String value = (String) e.getValue();
        if (!key.equals("submitURL"))
            filePost.addParameter(key, value);
    }
    filePost.addParameter("submitClientTool", "CommandLineTool");
    filePost.addParameter("submitClientVersion", VERSION);
    byte[] allInput = bytes.toByteArray();
    filePost.addPart(new FilePart("submittedFiles", new ByteArrayPartSource("submit.zip", allInput)));
    return filePost;
}

From source file:org.apache.hadoop.hive.ql.plan.PlanUtils.java

/**
  * Generate a table descriptor from a createTableDesc.
  *//*from  www  .  j  av  a2 s  . c  o  m*/
public static TableDesc getTableDesc(CreateTableDesc crtTblDesc, String cols, String colTypes) {

    Class<? extends Deserializer> serdeClass = LazySimpleSerDe.class;
    String separatorCode = Integer.toString(Utilities.ctrlaCode);
    String columns = cols;
    String columnTypes = colTypes;
    boolean lastColumnTakesRestOfTheLine = false;
    TableDesc ret;

    try {
        if (crtTblDesc.getSerName() != null) {
            Class c = JavaUtils.loadClass(crtTblDesc.getSerName());
            serdeClass = c;
        }

        if (crtTblDesc.getFieldDelim() != null) {
            separatorCode = crtTblDesc.getFieldDelim();
        }

        ret = getTableDesc(serdeClass, separatorCode, columns, columnTypes, lastColumnTakesRestOfTheLine,
                false);

        // set other table properties
        Properties properties = ret.getProperties();

        if (crtTblDesc.getCollItemDelim() != null) {
            properties.setProperty(serdeConstants.COLLECTION_DELIM, crtTblDesc.getCollItemDelim());
        }

        if (crtTblDesc.getMapKeyDelim() != null) {
            properties.setProperty(serdeConstants.MAPKEY_DELIM, crtTblDesc.getMapKeyDelim());
        }

        if (crtTblDesc.getFieldEscape() != null) {
            properties.setProperty(serdeConstants.ESCAPE_CHAR, crtTblDesc.getFieldEscape());
        }

        if (crtTblDesc.getLineDelim() != null) {
            properties.setProperty(serdeConstants.LINE_DELIM, crtTblDesc.getLineDelim());
        }

        if (crtTblDesc.getNullFormat() != null) {
            properties.setProperty(serdeConstants.SERIALIZATION_NULL_FORMAT, crtTblDesc.getNullFormat());
        }

        if (crtTblDesc.getTableName() != null && crtTblDesc.getDatabaseName() != null) {
            properties.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME,
                    crtTblDesc.getTableName());
        }

        if (crtTblDesc.getTblProps() != null) {
            properties.putAll(crtTblDesc.getTblProps());
        }
        if (crtTblDesc.getSerdeProps() != null) {
            properties.putAll(crtTblDesc.getSerdeProps());
        }

        // replace the default input & output file format with those found in
        // crtTblDesc
        Class c1 = JavaUtils.loadClass(crtTblDesc.getInputFormat());
        Class c2 = JavaUtils.loadClass(crtTblDesc.getOutputFormat());
        Class<? extends InputFormat> in_class = c1;
        Class<? extends HiveOutputFormat> out_class = c2;

        ret.setInputFileFormatClass(in_class);
        ret.setOutputFileFormatClass(out_class);
    } catch (ClassNotFoundException e) {
        throw new RuntimeException("Unable to find class in getTableDesc: " + e.getMessage(), e);
    }
    return ret;
}

From source file:com.thinkbiganalytics.feedmgr.rest.controller.FeedsController.java

@PUT
@Path("{id}/props")
@Produces(MediaType.APPLICATION_JSON)/*from   www . j a  va 2s .  com*/
@ApiOperation("Sets the properties for the specified feed.")
@ApiResponses({
        @ApiResponse(code = 200, message = "Returns the updated properties.", response = Properties.class),
        @ApiResponse(code = 400, message = "The id is not a valid UUID.", response = RestResponseStatus.class),
        @ApiResponse(code = 404, message = "The feed could not be found.", response = RestResponseStatus.class),
        @ApiResponse(code = 500, message = "The properties could not be updated.", response = RestResponseStatus.class) })
public Properties replaceFeedProperties(@PathParam("id") final String feedId, final Properties props) {
    LOG.debug("Replace feed properties ID: {}, properties: {}", feedId, props);

    return this.metadata.commit(() -> {
        this.accessController.checkPermission(AccessController.SERVICES, FeedServicesAccessControl.EDIT_FEEDS);

        com.thinkbiganalytics.metadata.api.feed.Feed.ID domainId = feedProvider.resolveFeed(feedId);
        com.thinkbiganalytics.metadata.api.feed.Feed domain = feedProvider.getFeed(domainId);

        if (domain != null) {
            Map<String, Object> domainProps = updateProperties(props, domain, true);
            Properties newProps = new Properties();

            newProps.putAll(domainProps);
            return newProps;
        } else {
            throw new WebApplicationException("No feed exist with the ID: " + feedId, Status.NOT_FOUND);
        }
    });
}

From source file:org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils.java

/**
 * initialize MapWork//from w w w  .  ja  v  a  2  s. com
 *
 * @param alias_id
 *          current alias
 * @param topOp
 *          the top operator of the stack
 * @param plan
 *          map work to initialize
 * @param local
 *          whether you need to add to map-reduce or local work
 * @param pList
 *          pruned partition list. If it is null it will be computed on-the-fly.
 * @param inputs
 *          read entities for the map work
 * @param conf
 *          current instance of hive conf
 */
public static void setMapWork(MapWork plan, ParseContext parseCtx, Set<ReadEntity> inputs,
        PrunedPartitionList partsList, Operator<? extends OperatorDesc> topOp, String alias_id, HiveConf conf,
        boolean local) throws SemanticException {
    ArrayList<Path> partDir = new ArrayList<Path>();
    ArrayList<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();

    Path tblDir = null;
    plan.setNameToSplitSample(parseCtx.getNameToSplitSample());

    if (partsList == null) {
        try {
            TableScanOperator tsOp = (TableScanOperator) topOp;
            partsList = PartitionPruner.prune(tsOp, parseCtx, alias_id);
        } catch (SemanticException e) {
            throw e;
        } catch (HiveException e) {
            LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
            throw new SemanticException(e.getMessage(), e);
        }
    }

    // Generate the map work for this alias_id
    // pass both confirmed and unknown partitions through the map-reduce
    // framework
    Set<Partition> parts = partsList.getPartitions();
    PartitionDesc aliasPartnDesc = null;
    try {
        if (!parts.isEmpty()) {
            aliasPartnDesc = Utilities.getPartitionDesc(parts.iterator().next());
        }
    } catch (HiveException e) {
        LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw new SemanticException(e.getMessage(), e);
    }

    // The table does not have any partitions
    if (aliasPartnDesc == null) {
        aliasPartnDesc = new PartitionDesc(
                Utilities.getTableDesc(((TableScanOperator) topOp).getConf().getTableMetadata()), null);
    }

    Map<String, String> props = topOp.getConf().getOpProps();
    if (props != null) {
        Properties target = aliasPartnDesc.getProperties();
        if (target == null) {
            aliasPartnDesc.setProperties(target = new Properties());
        }
        target.putAll(props);
    }

    plan.getAliasToPartnInfo().put(alias_id, aliasPartnDesc);

    long sizeNeeded = Integer.MAX_VALUE;
    int fileLimit = -1;
    if (parseCtx.getGlobalLimitCtx().isEnable()) {
        long sizePerRow = HiveConf.getLongVar(parseCtx.getConf(), HiveConf.ConfVars.HIVELIMITMAXROWSIZE);
        sizeNeeded = parseCtx.getGlobalLimitCtx().getGlobalLimit() * sizePerRow;
        // for the optimization that reduce number of input file, we limit number
        // of files allowed. If more than specific number of files have to be
        // selected, we skip this optimization. Since having too many files as
        // inputs can cause unpredictable latency. It's not necessarily to be
        // cheaper.
        fileLimit = HiveConf.getIntVar(parseCtx.getConf(), HiveConf.ConfVars.HIVELIMITOPTLIMITFILE);

        if (sizePerRow <= 0 || fileLimit <= 0) {
            LOG.info("Skip optimization to reduce input size of 'limit'");
            parseCtx.getGlobalLimitCtx().disableOpt();
        } else if (parts.isEmpty()) {
            LOG.info("Empty input: skip limit optimiztion");
        } else {
            LOG.info("Try to reduce input size for 'limit' " + "sizeNeeded: " + sizeNeeded + "  file limit : "
                    + fileLimit);
        }
    }
    boolean isFirstPart = true;
    boolean emptyInput = true;
    boolean singlePartition = (parts.size() == 1);

    // Track the dependencies for the view. Consider a query like: select * from V;
    // where V is a view of the form: select * from T
    // The dependencies should include V at depth 0, and T at depth 1 (inferred).
    Map<String, ReadEntity> viewToInput = parseCtx.getViewAliasToInput();
    ReadEntity parentViewInfo = PlanUtils.getParentViewInfo(alias_id, viewToInput);

    // The table should also be considered a part of inputs, even if the table is a
    // partitioned table and whether any partition is selected or not

    //This read entity is a direct read entity and not an indirect read (that is when
    // this is being read because it is a dependency of a view).
    boolean isDirectRead = (parentViewInfo == null);
    TableDesc tblDesc = null;
    boolean initTableDesc = false;

    for (Partition part : parts) {
        if (part.getTable().isPartitioned()) {
            PlanUtils.addInput(inputs, new ReadEntity(part, parentViewInfo, isDirectRead));
        } else {
            PlanUtils.addInput(inputs, new ReadEntity(part.getTable(), parentViewInfo, isDirectRead));
        }

        // Later the properties have to come from the partition as opposed
        // to from the table in order to support versioning.
        Path[] paths = null;
        SampleDesc sampleDescr = parseCtx.getOpToSamplePruner().get(topOp);

        // Lookup list bucketing pruner
        Map<String, ExprNodeDesc> partToPruner = parseCtx.getOpToPartToSkewedPruner().get(topOp);
        ExprNodeDesc listBucketingPruner = (partToPruner != null) ? partToPruner.get(part.getName()) : null;

        if (sampleDescr != null) {
            assert (listBucketingPruner == null) : "Sampling and list bucketing can't coexit.";
            paths = SamplePruner.prune(part, sampleDescr);
            parseCtx.getGlobalLimitCtx().disableOpt();
        } else if (listBucketingPruner != null) {
            assert (sampleDescr == null) : "Sampling and list bucketing can't coexist.";
            /* Use list bucketing prunner's path. */
            paths = ListBucketingPruner.prune(parseCtx, part, listBucketingPruner);
        } else {
            // Now we only try the first partition, if the first partition doesn't
            // contain enough size, we change to normal mode.
            if (parseCtx.getGlobalLimitCtx().isEnable()) {
                if (isFirstPart) {
                    long sizeLeft = sizeNeeded;
                    ArrayList<Path> retPathList = new ArrayList<Path>();
                    SamplePruner.LimitPruneRetStatus status = SamplePruner.limitPrune(part, sizeLeft, fileLimit,
                            retPathList);
                    if (status.equals(SamplePruner.LimitPruneRetStatus.NoFile)) {
                        continue;
                    } else if (status.equals(SamplePruner.LimitPruneRetStatus.NotQualify)) {
                        LOG.info("Use full input -- first " + fileLimit + " files are more than " + sizeNeeded
                                + " bytes");

                        parseCtx.getGlobalLimitCtx().disableOpt();

                    } else {
                        emptyInput = false;
                        paths = new Path[retPathList.size()];
                        int index = 0;
                        for (Path path : retPathList) {
                            paths[index++] = path;
                        }
                        if (status.equals(SamplePruner.LimitPruneRetStatus.NeedAllFiles) && singlePartition) {
                            // if all files are needed to meet the size limit, we disable
                            // optimization. It usually happens for empty table/partition or
                            // table/partition with only one file. By disabling this
                            // optimization, we can avoid retrying the query if there is
                            // not sufficient rows.
                            parseCtx.getGlobalLimitCtx().disableOpt();
                        }
                    }
                    isFirstPart = false;
                } else {
                    paths = new Path[0];
                }
            }
            if (!parseCtx.getGlobalLimitCtx().isEnable()) {
                paths = part.getPath();
            }
        }

        // is it a partitioned table ?
        if (!part.getTable().isPartitioned()) {
            assert (tblDir == null);

            tblDir = paths[0];
            if (!initTableDesc) {
                tblDesc = Utilities.getTableDesc(part.getTable());
                initTableDesc = true;
            }
        } else if (tblDesc == null) {
            if (!initTableDesc) {
                tblDesc = Utilities.getTableDesc(part.getTable());
                initTableDesc = true;
            }
        }

        if (props != null) {
            Properties target = tblDesc.getProperties();
            if (target == null) {
                tblDesc.setProperties(target = new Properties());
            }
            target.putAll(props);
        }

        for (Path p : paths) {
            if (p == null) {
                continue;
            }
            String path = p.toString();
            if (LOG.isDebugEnabled()) {
                LOG.debug("Adding " + path + " of table" + alias_id);
            }

            partDir.add(p);
            try {
                if (part.getTable().isPartitioned()) {
                    partDesc.add(Utilities.getPartitionDesc(part));
                } else {
                    partDesc.add(Utilities.getPartitionDescFromTableDesc(tblDesc, part, false));
                }
            } catch (HiveException e) {
                LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
                throw new SemanticException(e.getMessage(), e);
            }
        }
    }
    if (emptyInput) {
        parseCtx.getGlobalLimitCtx().disableOpt();
    }

    Iterator<Path> iterPath = partDir.iterator();
    Iterator<PartitionDesc> iterPartnDesc = partDesc.iterator();

    if (!local) {
        while (iterPath.hasNext()) {
            assert iterPartnDesc.hasNext();
            String path = iterPath.next().toString();

            PartitionDesc prtDesc = iterPartnDesc.next();

            // Add the path to alias mapping
            if (plan.getPathToAliases().get(path) == null) {
                plan.getPathToAliases().put(path, new ArrayList<String>());
            }
            plan.getPathToAliases().get(path).add(alias_id);
            plan.getPathToPartitionInfo().put(path, prtDesc);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Information added for path " + path);
            }
        }

        assert plan.getAliasToWork().get(alias_id) == null;
        plan.getAliasToWork().put(alias_id, topOp);
    } else {
        // populate local work if needed
        MapredLocalWork localPlan = plan.getMapRedLocalWork();
        if (localPlan == null) {
            localPlan = new MapredLocalWork(new LinkedHashMap<String, Operator<? extends OperatorDesc>>(),
                    new LinkedHashMap<String, FetchWork>());
        }

        assert localPlan.getAliasToWork().get(alias_id) == null;
        assert localPlan.getAliasToFetchWork().get(alias_id) == null;
        localPlan.getAliasToWork().put(alias_id, topOp);
        if (tblDir == null) {
            tblDesc = Utilities.getTableDesc(partsList.getSourceTable());
            localPlan.getAliasToFetchWork().put(alias_id, new FetchWork(partDir, partDesc, tblDesc));
        } else {
            localPlan.getAliasToFetchWork().put(alias_id, new FetchWork(tblDir, tblDesc));
        }
        plan.setMapRedLocalWork(localPlan);
    }
}

From source file:io.sledge.core.impl.installer.SledgePackageConfigurer.java

@Override
public Properties mergeProperties(String envFileContent, Properties propsForMerge) {
    Properties mergedProps = new Properties();
    try {/* w w w . j a v  a  2  s  .c  o  m*/

        // Support internal property references for application package provided properties
        Properties origProps = new Properties();
        origProps.load(new StringReader(envFileContent));
        String configuredEnvironmentFileContent = StrSubstitutor.replace(envFileContent, origProps);

        mergedProps.load(new StringReader(configuredEnvironmentFileContent));

        // Support internal property references for overwrite properties
        StringWriter propsForMergeWriter = new StringWriter();
        propsForMerge.store(propsForMergeWriter, "");
        String propsForMergeAsString = propsForMergeWriter.getBuffer().toString();
        String configuredPropsForMerge = StrSubstitutor.replace(propsForMergeAsString, propsForMerge);
        Properties reconfiguredPropsForMerge = new Properties();
        reconfiguredPropsForMerge.load(new StringReader(configuredPropsForMerge));

        mergedProps.putAll(reconfiguredPropsForMerge);

    } catch (IOException e) {
        throw new InstallationException("Could not load environment properties.", e);
    }

    return mergedProps;
}

From source file:io.github.arven.flare.boot.TomcatContainer.java

public void start() throws Exception {
    if (base == null || !base.exists()) {
        setup(configuration);//from ww w . ja  v a  2  s.c  o m
    }

    final Properties props = configuration.getProperties();

    if (props != null) {
        StrSubstitutor substitutor = null;
        for (final String s : props.stringPropertyNames()) {
            final String v = props.getProperty(s);
            if (v != null && v.contains("${")) {
                if (substitutor == null) {
                    final Map<String, String> placeHolders = new HashMap<String, String>();
                    placeHolders.put("tomee.embedded.http", Integer.toString(configuration.getHttpPort()));
                    placeHolders.put("tomee.embedded.https", Integer.toString(configuration.getHttpsPort()));
                    placeHolders.put("tomee.embedded.stop", Integer.toString(configuration.getStopPort()));
                    substitutor = new StrSubstitutor(placeHolders);
                }
                props.put(s, substitutor.replace(v));
            }
        }

        // inherit from system props
        final Properties properties = new Properties(System.getProperties());
        properties.putAll(configuration.getProperties());
        Logger.configure(properties);
    } else {
        Logger.configure();
    }

    final File conf = new File(base, "conf");
    final File webapps = new File(base, "webapps");

    final String catalinaBase = base.getAbsolutePath();

    // set the env before calling anoything on tomcat or Catalina!!
    System.setProperty("catalina.base", catalinaBase);
    System.setProperty("openejb.deployments.classpath", "false");
    System.setProperty("catalina.home", catalinaBase);
    System.setProperty("catalina.base", catalinaBase);
    System.setProperty("openejb.home", catalinaBase);
    System.setProperty("openejb.base", catalinaBase);
    System.setProperty("openejb.servicemanager.enabled", "false");

    copyFileTo(conf, "catalina.policy");
    copyTemplateTo(conf, "catalina.properties");
    copyFileTo(conf, "context.xml");
    copyFileTo(conf, "openejb.xml");
    copyFileTo(conf, "tomcat-users.xml");
    copyFileTo(conf, "web.xml");

    final boolean initialized;
    if (configuration.hasServerXml()) {
        final File file = new File(conf, "server.xml");
        final FileOutputStream fos = new FileOutputStream(file);
        try {
            IO.copy(configuration.getServerXmlFile(), fos);
        } finally {
            IO.close(fos);
        }

        // respect config (host/port) of the Configuration
        final QuickServerXmlParser ports = QuickServerXmlParser.parse(file);
        if (configuration.isKeepServerXmlAsThis()) {
            // force ports to be able to stop the server and get @ArquillianResource
            configuration.setHttpPort(Integer.parseInt(ports.http()));
            configuration.setStopPort(Integer.parseInt(ports.stop()));
        } else {
            final Map<String, String> replacements = new HashMap<String, String>();
            replacements.put(ports.http(), String.valueOf(configuration.getHttpPort()));
            replacements.put(ports.https(), String.valueOf(configuration.getHttpsPort()));
            replacements.put(ports.stop(), String.valueOf(configuration.getStopPort()));
            IO.copy(IO.slurp(new ReplaceStringsInputStream(IO.read(file), replacements)).getBytes(), file);
        }

        tomcat.server(createServer(file.getAbsolutePath()));
        initialized = true;
    } else {
        copyFileTo(conf, "server.xml");
        initialized = false;
    }

    if (props != null && !props.isEmpty()) {
        final FileWriter systemProperties = new FileWriter(new File(conf, "system.properties"));
        try {
            props.store(systemProperties, "");
        } finally {
            IO.close(systemProperties);
        }
    }

    // Need to use JULI so log messages from the tests are visible
    // using openejb logging conf in embedded mode
    /* if we use our config (Logger.configure()) don't override it
    copyFileTo(conf, "logging.properties");
    System.setProperty("java.util.logging.manager", "org.apache.juli.ClassLoaderLogManager");
    final File logging = new File(conf, "logging.properties");
    if (logging.exists()) {
    System.setProperty("java.util.logging.config.file", logging.getAbsolutePath());
    }
    */

    // Trigger loading of catalina.properties
    CatalinaProperties.getProperty("foo");

    tomcat.setBaseDir(base.getAbsolutePath());
    tomcat.setHostname(configuration.getHost());
    if (!initialized) {
        tomcat.getHost().setAppBase(webapps.getAbsolutePath());
        tomcat.getEngine().setDefaultHost(configuration.getHost());
        tomcat.setHostname(configuration.getHost());
    }

    if (tomcat.getRawConnector() == null && !configuration.isSkipHttp()) {
        final Connector connector = new Connector(Http11Protocol.class.getName());
        connector.setPort(configuration.getHttpPort());
        connector.setAttribute("connectionTimeout", "3000");
        tomcat.getService().addConnector(connector);
        tomcat.setConnector(connector);
    }

    // create https connector
    if (configuration.isSsl()) {
        final Connector httpsConnector = new Connector(Http11Protocol.class.getName());
        httpsConnector.setPort(configuration.getHttpsPort());
        httpsConnector.setSecure(true);
        httpsConnector.setProperty("SSLEnabled", "true");
        httpsConnector.setProperty("sslProtocol", configuration.getSslProtocol());

        if (configuration.getKeystoreFile() != null) {
            httpsConnector.setAttribute("keystoreFile", configuration.getKeystoreFile());
        }
        if (configuration.getKeystorePass() != null) {
            httpsConnector.setAttribute("keystorePass", configuration.getKeystorePass());
        }
        httpsConnector.setAttribute("keystoreType", configuration.getKeystoreType());
        httpsConnector.setAttribute("clientAuth", configuration.getClientAuth());
        httpsConnector.setAttribute("keyAlias", configuration.getKeyAlias());

        tomcat.getService().addConnector(httpsConnector);

        if (configuration.isSkipHttp()) {
            tomcat.setConnector(httpsConnector);
        }
    }

    // Bootstrap Tomcat
    Logger.getInstance(LogCategory.OPENEJB_STARTUP, TomcatContainer.class)
            .info("Starting TomEE from: " + base.getAbsolutePath()); // create it after Logger is configured

    if (configuration.getUsers() != null) {
        for (final Map.Entry<String, String> user : configuration.getUsers().entrySet()) {
            tomcat.addUser(user.getKey(), user.getValue());
        }
    }
    if (configuration.getRoles() != null) {
        for (final Map.Entry<String, String> user : configuration.getRoles().entrySet()) {
            for (final String role : user.getValue().split(" *, *")) {
                tomcat.addRole(user.getKey(), role);
            }
        }
    }
    if (!initialized) {
        tomcat.init();
    }
    tomcat.start();

    // Bootstrap OpenEJB
    final Properties properties = new Properties();
    properties.setProperty("openejb.deployments.classpath", "false");
    properties.setProperty("openejb.loader", "tomcat-system");
    properties.setProperty("openejb.home", catalinaBase);
    properties.setProperty("openejb.base", catalinaBase);
    properties.setProperty("openejb.servicemanager.enabled", "false");
    if (configuration.getProperties() != null) {
        properties.putAll(configuration.getProperties());
    }
    if (properties.getProperty("openejb.system.apps") == null) { // will make startup faster and it is rarely useful for embedded case
        properties.setProperty("openejb.system.apps", "false");
    }
    if (configuration.isQuickSession()) {
        properties.put("openejb.session.manager", QuickSessionManager.class.getName());
    }

    try {
        final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
        final Properties tomcatServerInfo = IO.readProperties(
                classLoader.getResourceAsStream("org/apache/catalina/util/ServerInfo.properties"),
                new Properties());

        String serverNumber = tomcatServerInfo.getProperty("server.number");
        if (serverNumber == null) {
            // Tomcat5 only has server.info
            final String serverInfo = tomcatServerInfo.getProperty("server.info");
            if (serverInfo != null) {
                final int slash = serverInfo.indexOf('/');
                serverNumber = serverInfo.substring(slash + 1);
            }
        }
        if (serverNumber != null) {
            System.setProperty("tomcat.version", serverNumber);
        }

        final String serverBuilt = tomcatServerInfo.getProperty("server.built");
        if (serverBuilt != null) {
            System.setProperty("tomcat.built", serverBuilt);
        }
    } catch (final Throwable e) {
        // no-op
    }

    final TomcatLoader loader = new TomcatLoader();
    loader.initDefaults(properties);

    // need to add properties after having initialized defaults
    // to properties passed to SystemInstance otherwise we loose some of them
    final Properties initProps = new Properties();
    initProps.putAll(System.getProperties());
    initProps.putAll(properties);
    if (SystemInstance.isInitialized()) {
        SystemInstance.get().getProperties().putAll(initProps);
    } else {
        SystemInstance.init(initProps);
    }
    SystemInstance.get().setComponent(StandardServer.class, (StandardServer) tomcat.getServer());
    SystemInstance.get().setComponent(Server.class, tomcat.getServer()); // needed again cause of init()

    loader.initialize(properties);

    assembler = SystemInstance.get().getComponent(Assembler.class);
    configurationFactory = new ConfigurationFactory();
}

From source file:org.apache.hadoop.hive.ql.parse.mr2.GenMR2Utils.java

/**
 * initialize MapWork/*from   w w  w.  j  av  a  2 s .c o  m*/
 *
 * @param alias_id
 *          current alias
 * @param topOp
 *          the top operator of the stack
 * @param plan
 *          map work to initialize
 * @param local
 *          whether you need to add to map-reduce or local work
 * @param pList
 *          pruned partition list. If it is null it will be computed on-the-fly.
 * @param inputs
 *          read entities for the map work
 * @param conf
 *          current instance of hive conf
 */
public static void setMapWork(MapWork plan, ParseContext parseCtx, Set<ReadEntity> inputs,
        PrunedPartitionList partsList, Operator<? extends OperatorDesc> topOp, String alias_id, HiveConf conf,
        boolean local) throws SemanticException {
    ArrayList<Path> partDir = new ArrayList<Path>();
    ArrayList<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();

    Path tblDir = null;
    TableDesc tblDesc = null;

    plan.setNameToSplitSample(parseCtx.getNameToSplitSample());

    if (partsList == null) {
        try {
            TableScanOperator tsOp = (TableScanOperator) topOp;
            partsList = PartitionPruner.prune(tsOp, parseCtx, alias_id);
        } catch (SemanticException e) {
            throw e;
        } catch (HiveException e) {
            LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
            throw new SemanticException(e.getMessage(), e);
        }
    }

    // Generate the map work for this alias_id
    // pass both confirmed and unknown partitions through the map-reduce
    // framework
    Set<Partition> parts = partsList.getPartitions();
    PartitionDesc aliasPartnDesc = null;
    try {
        if (!parts.isEmpty()) {
            aliasPartnDesc = Utilities.getPartitionDesc(parts.iterator().next());
        }
    } catch (HiveException e) {
        LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw new SemanticException(e.getMessage(), e);
    }

    // The table does not have any partitions
    if (aliasPartnDesc == null) {
        aliasPartnDesc = new PartitionDesc(
                Utilities.getTableDesc(((TableScanOperator) topOp).getConf().getTableMetadata()), null);
    }

    Map<String, String> props = topOp.getConf().getOpProps();
    if (props != null) {
        Properties target = aliasPartnDesc.getProperties();
        if (target == null) {
            aliasPartnDesc.setProperties(target = new Properties());
        }
        target.putAll(props);
    }

    plan.getAliasToPartnInfo().put(alias_id, aliasPartnDesc);

    long sizeNeeded = Integer.MAX_VALUE;
    int fileLimit = -1;
    if (parseCtx.getGlobalLimitCtx().isEnable()) {
        long sizePerRow = HiveConf.getLongVar(parseCtx.getConf(), HiveConf.ConfVars.HIVELIMITMAXROWSIZE);
        sizeNeeded = parseCtx.getGlobalLimitCtx().getGlobalLimit() * sizePerRow;
        // for the optimization that reduce number of input file, we limit number
        // of files allowed. If more than specific number of files have to be
        // selected, we skip this optimization. Since having too many files as
        // inputs can cause unpredictable latency. It's not necessarily to be
        // cheaper.
        fileLimit = HiveConf.getIntVar(parseCtx.getConf(), HiveConf.ConfVars.HIVELIMITOPTLIMITFILE);

        if (sizePerRow <= 0 || fileLimit <= 0) {
            LOG.info("Skip optimization to reduce input size of 'limit'");
            parseCtx.getGlobalLimitCtx().disableOpt();
        } else if (parts.isEmpty()) {
            LOG.info("Empty input: skip limit optimiztion");
        } else {
            LOG.info("Try to reduce input size for 'limit' " + "sizeNeeded: " + sizeNeeded + "  file limit : "
                    + fileLimit);
        }
    }
    boolean isFirstPart = true;
    boolean emptyInput = true;
    boolean singlePartition = (parts.size() == 1);

    // Track the dependencies for the view. Consider a query like: select * from V;
    // where V is a view of the form: select * from T
    // The dependencies should include V at depth 0, and T at depth 1 (inferred).
    Map<String, ReadEntity> viewToInput = parseCtx.getViewAliasToInput();
    ReadEntity parentViewInfo = PlanUtils.getParentViewInfo(alias_id, viewToInput);

    // The table should also be considered a part of inputs, even if the table is a
    // partitioned table and whether any partition is selected or not

    // This read entity is a direct read entity and not an indirect read (that is when
    // this is being read because it is a dependency of a view).
    boolean isDirectRead = (parentViewInfo == null);

    for (Partition part : parts) {
        if (part.getTable().isPartitioned()) {
            PlanUtils.addInput(inputs, new ReadEntity(part, parentViewInfo, isDirectRead));
        } else {
            PlanUtils.addInput(inputs, new ReadEntity(part.getTable(), parentViewInfo, isDirectRead));
        }

        // Later the properties have to come from the partition as opposed
        // to from the table in order to support versioning.
        Path[] paths = null;
        sampleDesc sampleDescr = parseCtx.getOpToSamplePruner().get(topOp);

        // Lookup list bucketing pruner
        Map<String, ExprNodeDesc> partToPruner = parseCtx.getOpToPartToSkewedPruner().get(topOp);
        ExprNodeDesc listBucketingPruner = (partToPruner != null) ? partToPruner.get(part.getName()) : null;

        if (sampleDescr != null) {
            assert (listBucketingPruner == null) : "Sampling and list bucketing can't coexit.";
            paths = SamplePruner.prune(part, sampleDescr);
            parseCtx.getGlobalLimitCtx().disableOpt();
        } else if (listBucketingPruner != null) {
            assert (sampleDescr == null) : "Sampling and list bucketing can't coexist.";
            /* Use list bucketing prunner's path. */
            paths = ListBucketingPruner.prune(parseCtx, part, listBucketingPruner);
        } else {
            // Now we only try the first partition, if the first partition doesn't
            // contain enough size, we change to normal mode.
            if (parseCtx.getGlobalLimitCtx().isEnable()) {
                if (isFirstPart) {
                    long sizeLeft = sizeNeeded;
                    ArrayList<Path> retPathList = new ArrayList<Path>();
                    SamplePruner.LimitPruneRetStatus status = SamplePruner.limitPrune(part, sizeLeft, fileLimit,
                            retPathList);
                    if (status.equals(SamplePruner.LimitPruneRetStatus.NoFile)) {
                        continue;
                    } else if (status.equals(SamplePruner.LimitPruneRetStatus.NotQualify)) {
                        LOG.info("Use full input -- first " + fileLimit + " files are more than " + sizeNeeded
                                + " bytes");

                        parseCtx.getGlobalLimitCtx().disableOpt();

                    } else {
                        emptyInput = false;
                        paths = new Path[retPathList.size()];
                        int index = 0;
                        for (Path path : retPathList) {
                            paths[index++] = path;
                        }
                        if (status.equals(SamplePruner.LimitPruneRetStatus.NeedAllFiles) && singlePartition) {
                            // if all files are needed to meet the size limit, we disable
                            // optimization. It usually happens for empty table/partition or
                            // table/partition with only one file. By disabling this
                            // optimization, we can avoid retrying the query if there is
                            // not sufficient rows.
                            parseCtx.getGlobalLimitCtx().disableOpt();
                        }
                    }
                    isFirstPart = false;
                } else {
                    paths = new Path[0];
                }
            }
            if (!parseCtx.getGlobalLimitCtx().isEnable()) {
                paths = part.getPath();
            }
        }

        // is it a partitioned table ?
        if (!part.getTable().isPartitioned()) {
            assert ((tblDir == null) && (tblDesc == null));

            tblDir = paths[0];
            tblDesc = Utilities.getTableDesc(part.getTable());
        } else if (tblDesc == null) {
            tblDesc = Utilities.getTableDesc(part.getTable());
        }

        if (props != null) {
            Properties target = tblDesc.getProperties();
            if (target == null) {
                tblDesc.setProperties(target = new Properties());
            }
            target.putAll(props);
        }

        for (Path p : paths) {
            if (p == null) {
                continue;
            }
            String path = p.toString();
            if (LOG.isDebugEnabled()) {
                LOG.debug("Adding " + path + " of table" + alias_id);
            }

            partDir.add(p);
            try {
                if (part.getTable().isPartitioned()) {
                    partDesc.add(Utilities.getPartitionDesc(part));
                } else {
                    partDesc.add(Utilities.getPartitionDescFromTableDesc(tblDesc, part));
                }
            } catch (HiveException e) {
                LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
                throw new SemanticException(e.getMessage(), e);
            }
        }
    }
    if (emptyInput) {
        parseCtx.getGlobalLimitCtx().disableOpt();
    }

    Iterator<Path> iterPath = partDir.iterator();
    Iterator<PartitionDesc> iterPartnDesc = partDesc.iterator();

    if (!local) {
        while (iterPath.hasNext()) {
            assert iterPartnDesc.hasNext();
            String path = iterPath.next().toString();

            PartitionDesc prtDesc = iterPartnDesc.next();

            // Add the path to alias mapping
            if (plan.getPathToAliases().get(path) == null) {
                plan.getPathToAliases().put(path, new ArrayList<String>());
            }
            plan.getPathToAliases().get(path).add(alias_id);
            plan.getPathToPartitionInfo().put(path, prtDesc);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Information added for path " + path);
            }
        }

        assert plan.getAliasToWork().get(alias_id) == null;
        plan.getAliasToWork().put(alias_id, topOp);
    } else {
        // populate local work if needed
        MapredLocalWork localPlan = plan.getMapRedLocalWork();
        if (localPlan == null) {
            localPlan = new MapredLocalWork(new LinkedHashMap<String, Operator<? extends OperatorDesc>>(),
                    new LinkedHashMap<String, FetchWork>());
        }

        assert localPlan.getAliasToWork().get(alias_id) == null;
        assert localPlan.getAliasToFetchWork().get(alias_id) == null;
        localPlan.getAliasToWork().put(alias_id, topOp);
        if (tblDir == null) {
            tblDesc = Utilities.getTableDesc(partsList.getSourceTable());
            localPlan.getAliasToFetchWork().put(alias_id, new FetchWork(partDir, partDesc, tblDesc));
        } else {
            localPlan.getAliasToFetchWork().put(alias_id, new FetchWork(tblDir, tblDesc));
        }
        plan.setMapRedLocalWork(localPlan);
    }
}

From source file:org.apache.tomee.embedded.Container.java

public void start() throws Exception {
    if (base == null || !base.exists()) {
        setup(configuration);/*ww w.  ja  va  2  s  .  com*/
    }

    final Properties props = configuration.getProperties();

    if (props != null) {
        StrSubstitutor substitutor = null;
        for (final String s : props.stringPropertyNames()) {
            final String v = props.getProperty(s);
            if (v != null && v.contains("${")) {
                if (substitutor == null) {
                    final Map<String, String> placeHolders = new HashMap<>();
                    placeHolders.put("tomee.embedded.http", Integer.toString(configuration.getHttpPort()));
                    placeHolders.put("tomee.embedded.https", Integer.toString(configuration.getHttpsPort()));
                    placeHolders.put("tomee.embedded.stop", Integer.toString(configuration.getStopPort()));
                    substitutor = new StrSubstitutor(placeHolders);
                }
                props.put(s, substitutor.replace(v));
            }
        }

        // inherit from system props
        final Properties properties = new Properties(System.getProperties());
        properties.putAll(configuration.getProperties());
        Logger.configure(properties);
    } else {
        Logger.configure();
    }

    final File conf = new File(base, "conf");
    final File webapps = new File(base, "webapps");

    final String catalinaBase = base.getAbsolutePath();

    // set the env before calling anoything on tomcat or Catalina!!
    System.setProperty("catalina.base", catalinaBase);
    System.setProperty("openejb.deployments.classpath", "false");
    System.setProperty("catalina.home", catalinaBase);
    System.setProperty("catalina.base", catalinaBase);
    System.setProperty("openejb.home", catalinaBase);
    System.setProperty("openejb.base", catalinaBase);
    System.setProperty("openejb.servicemanager.enabled", "false");

    copyFileTo(conf, "catalina.policy");
    copyTemplateTo(conf, "catalina.properties");
    copyFileTo(conf, "context.xml");
    copyFileTo(conf, "openejb.xml");
    copyFileTo(conf, "tomcat-users.xml");
    copyFileTo(conf, "web.xml");

    final boolean initialized;
    if (configuration.hasServerXml()) {
        final File file = new File(conf, "server.xml");
        final FileOutputStream fos = new FileOutputStream(file);
        try {
            IO.copy(configuration.getServerXmlFile(), fos);
        } finally {
            IO.close(fos);
        }

        // respect config (host/port) of the Configuration
        final QuickServerXmlParser ports = QuickServerXmlParser.parse(file);
        if (configuration.isKeepServerXmlAsThis()) {
            // force ports to be able to stop the server and get @ArquillianResource
            configuration.setHttpPort(Integer.parseInt(ports.http()));
            configuration.setStopPort(Integer.parseInt(ports.stop()));
        } else {
            final Map<String, String> replacements = new HashMap<String, String>();
            replacements.put(ports.http(), String.valueOf(configuration.getHttpPort()));
            replacements.put(ports.https(), String.valueOf(configuration.getHttpsPort()));
            replacements.put(ports.stop(), String.valueOf(configuration.getStopPort()));
            IO.copy(IO.slurp(new ReplaceStringsInputStream(IO.read(file), replacements)).getBytes(), file);
        }

        tomcat.server(createServer(file.getAbsolutePath()));
        initialized = true;
    } else {
        copyFileTo(conf, "server.xml");
        initialized = false;
    }

    if (props != null && !props.isEmpty()) {
        final FileWriter systemProperties = new FileWriter(new File(conf, "system.properties"));
        try {
            props.store(systemProperties, "");
        } finally {
            IO.close(systemProperties);
        }
    }

    // Need to use JULI so log messages from the tests are visible
    // using openejb logging conf in embedded mode
    /* if we use our config (Logger.configure()) don't override it
    copyFileTo(conf, "logging.properties");
    System.setProperty("java.util.logging.manager", "org.apache.juli.ClassLoaderLogManager");
    final File logging = new File(conf, "logging.properties");
    if (logging.exists()) {
    System.setProperty("java.util.logging.config.file", logging.getAbsolutePath());
    }
    */

    // Trigger loading of catalina.properties
    CatalinaProperties.getProperty("foo");

    tomcat.setBaseDir(base.getAbsolutePath());
    tomcat.setHostname(configuration.getHost());
    if (!initialized) {
        tomcat.getHost().setAppBase(webapps.getAbsolutePath());
        tomcat.getEngine().setDefaultHost(configuration.getHost());
        tomcat.setHostname(configuration.getHost());
    }

    if (configuration.getRealm() != null) {
        tomcat.getEngine().setRealm(configuration.getRealm());
    }

    if (tomcat.getRawConnector() == null && !configuration.isSkipHttp()) {
        final Connector connector = new Connector(Http11Protocol.class.getName());
        connector.setPort(configuration.getHttpPort());
        connector.setAttribute("connectionTimeout", "3000");
        tomcat.getService().addConnector(connector);
        tomcat.setConnector(connector);
    }

    // create https connector
    if (configuration.isSsl()) {
        final Connector httpsConnector = new Connector(Http11Protocol.class.getName());
        httpsConnector.setPort(configuration.getHttpsPort());
        httpsConnector.setSecure(true);
        httpsConnector.setProperty("SSLEnabled", "true");
        httpsConnector.setProperty("sslProtocol", configuration.getSslProtocol());

        if (configuration.getKeystoreFile() != null) {
            httpsConnector.setAttribute("keystoreFile", configuration.getKeystoreFile());
        }
        if (configuration.getKeystorePass() != null) {
            httpsConnector.setAttribute("keystorePass", configuration.getKeystorePass());
        }
        httpsConnector.setAttribute("keystoreType", configuration.getKeystoreType());
        httpsConnector.setAttribute("clientAuth", configuration.getClientAuth());
        httpsConnector.setAttribute("keyAlias", configuration.getKeyAlias());

        tomcat.getService().addConnector(httpsConnector);

        if (configuration.isSkipHttp()) {
            tomcat.setConnector(httpsConnector);
        }
    }

    // Bootstrap Tomcat
    Logger.getInstance(LogCategory.OPENEJB_STARTUP, Container.class)
            .info("Starting TomEE from: " + base.getAbsolutePath()); // create it after Logger is configured

    if (configuration.getUsers() != null) {
        for (final Map.Entry<String, String> user : configuration.getUsers().entrySet()) {
            tomcat.addUser(user.getKey(), user.getValue());
        }
    }
    if (configuration.getRoles() != null) {
        for (final Map.Entry<String, String> user : configuration.getRoles().entrySet()) {
            for (final String role : user.getValue().split(" *, *")) {
                tomcat.addRole(user.getKey(), role);
            }
        }
    }
    if (!initialized) {
        tomcat.init();
    }
    tomcat.start();

    // Bootstrap OpenEJB
    final Properties properties = new Properties();
    properties.setProperty("openejb.deployments.classpath", "false");
    properties.setProperty("openejb.loader", "tomcat-system");
    properties.setProperty("openejb.home", catalinaBase);
    properties.setProperty("openejb.base", catalinaBase);
    properties.setProperty("openejb.servicemanager.enabled", "false");
    if (configuration.getProperties() != null) {
        properties.putAll(configuration.getProperties());
    }
    if (properties.getProperty("openejb.system.apps") == null) { // will make startup faster and it is rarely useful for embedded case
        properties.setProperty("openejb.system.apps", "false");
    }
    if (configuration.isQuickSession()) {
        properties.put("openejb.session.manager", QuickSessionManager.class.getName());
    }

    try {
        final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
        final Properties tomcatServerInfo = IO.readProperties(
                classLoader.getResourceAsStream("org/apache/catalina/util/ServerInfo.properties"),
                new Properties());

        String serverNumber = tomcatServerInfo.getProperty("server.number");
        if (serverNumber == null) {
            // Tomcat5 only has server.info
            final String serverInfo = tomcatServerInfo.getProperty("server.info");
            if (serverInfo != null) {
                final int slash = serverInfo.indexOf('/');
                serverNumber = serverInfo.substring(slash + 1);
            }
        }
        if (serverNumber != null) {
            System.setProperty("tomcat.version", serverNumber);
        }

        final String serverBuilt = tomcatServerInfo.getProperty("server.built");
        if (serverBuilt != null) {
            System.setProperty("tomcat.built", serverBuilt);
        }
    } catch (final Throwable e) {
        // no-op
    }

    final TomcatLoader loader = new TomcatLoader();
    loader.initDefaults(properties);

    // need to add properties after having initialized defaults
    // to properties passed to SystemInstance otherwise we loose some of them
    final Properties initProps = new Properties();
    initProps.putAll(System.getProperties());
    initProps.putAll(properties);
    if (SystemInstance.isInitialized()) {
        SystemInstance.get().getProperties().putAll(initProps);
    } else {
        SystemInstance.init(initProps);
    }
    SystemInstance.get().setComponent(StandardServer.class, (StandardServer) tomcat.getServer());
    SystemInstance.get().setComponent(Server.class, tomcat.getServer()); // needed again cause of init()

    loader.initialize(properties);

    assembler = SystemInstance.get().getComponent(Assembler.class);
    configurationFactory = new ConfigurationFactory();

    if (configuration.isWithEjbRemote()) {
        tomcat.getHost().addChild(new TomEERemoteWebapp());
    }
}